code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
A : Optional[Any] = logging.get_logger(__name__)
class lowerCamelCase (SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
lowerCamelCase__ = ['''pixel_values''']
def __init__( self : Any , __magic_name__ : bool = True , __magic_name__ : Optional[Dict[str, int]] = None , __magic_name__ : PILImageResampling = PILImageResampling.BILINEAR , __magic_name__ : bool = True , __magic_name__ : Dict[str, int] = None , __magic_name__ : bool = True , __magic_name__ : Union[int, float] = 1 / 255 , __magic_name__ : bool = True , __magic_name__ : Optional[Union[float, List[float]]] = None , __magic_name__ : Optional[Union[float, List[float]]] = None , **__magic_name__ : int , ) -> None:
super().__init__(**__magic_name__ )
SCREAMING_SNAKE_CASE_ = size if size is not None else {"shortest_edge": 256}
SCREAMING_SNAKE_CASE_ = get_size_dict(__magic_name__ , default_to_square=__magic_name__ )
SCREAMING_SNAKE_CASE_ = crop_size if crop_size is not None else {"height": 224, "width": 224}
SCREAMING_SNAKE_CASE_ = get_size_dict(__magic_name__ , param_name="crop_size" )
SCREAMING_SNAKE_CASE_ = do_resize
SCREAMING_SNAKE_CASE_ = size
SCREAMING_SNAKE_CASE_ = resample
SCREAMING_SNAKE_CASE_ = do_center_crop
SCREAMING_SNAKE_CASE_ = crop_size
SCREAMING_SNAKE_CASE_ = do_rescale
SCREAMING_SNAKE_CASE_ = rescale_factor
SCREAMING_SNAKE_CASE_ = do_normalize
SCREAMING_SNAKE_CASE_ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
SCREAMING_SNAKE_CASE_ = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __A ( self : Any , __magic_name__ : np.ndarray , __magic_name__ : Dict[str, int] , __magic_name__ : PILImageResampling = PILImageResampling.BICUBIC , __magic_name__ : Optional[Union[str, ChannelDimension]] = None , **__magic_name__ : str , ) -> np.ndarray:
SCREAMING_SNAKE_CASE_ = get_size_dict(__magic_name__ , default_to_square=__magic_name__ )
if "shortest_edge" not in size:
raise ValueError(F'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' )
SCREAMING_SNAKE_CASE_ = get_resize_output_image_size(__magic_name__ , size=size["shortest_edge"] , default_to_square=__magic_name__ )
return resize(__magic_name__ , size=__magic_name__ , resample=__magic_name__ , data_format=__magic_name__ , **__magic_name__ )
def __A ( self : Tuple , __magic_name__ : np.ndarray , __magic_name__ : Dict[str, int] , __magic_name__ : Optional[Union[str, ChannelDimension]] = None , **__magic_name__ : Any , ) -> np.ndarray:
SCREAMING_SNAKE_CASE_ = get_size_dict(__magic_name__ )
if "height" not in size or "width" not in size:
raise ValueError(F'''The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}''' )
return center_crop(__magic_name__ , size=(size["height"], size["width"]) , data_format=__magic_name__ , **__magic_name__ )
def __A ( self : Dict , __magic_name__ : np.ndarray , __magic_name__ : float , __magic_name__ : Optional[Union[str, ChannelDimension]] = None , **__magic_name__ : Tuple ) -> np.ndarray:
return rescale(__magic_name__ , scale=__magic_name__ , data_format=__magic_name__ , **__magic_name__ )
def __A ( self : Any , __magic_name__ : np.ndarray , __magic_name__ : Union[float, List[float]] , __magic_name__ : Union[float, List[float]] , __magic_name__ : Optional[Union[str, ChannelDimension]] = None , **__magic_name__ : List[str] , ) -> np.ndarray:
return normalize(__magic_name__ , mean=__magic_name__ , std=__magic_name__ , data_format=__magic_name__ , **__magic_name__ )
def __A ( self : Optional[Any] , __magic_name__ : ImageInput , __magic_name__ : Optional[bool] = None , __magic_name__ : Dict[str, int] = None , __magic_name__ : PILImageResampling = None , __magic_name__ : bool = None , __magic_name__ : Dict[str, int] = None , __magic_name__ : Optional[bool] = None , __magic_name__ : Optional[float] = None , __magic_name__ : Optional[bool] = None , __magic_name__ : Optional[Union[float, List[float]]] = None , __magic_name__ : Optional[Union[float, List[float]]] = None , __magic_name__ : Optional[Union[str, TensorType]] = None , __magic_name__ : Union[str, ChannelDimension] = ChannelDimension.FIRST , **__magic_name__ : Tuple , ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE_ = size if size is not None else self.size
SCREAMING_SNAKE_CASE_ = get_size_dict(__magic_name__ , default_to_square=__magic_name__ )
SCREAMING_SNAKE_CASE_ = resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE_ = do_center_crop if do_center_crop is not None else self.do_center_crop
SCREAMING_SNAKE_CASE_ = crop_size if crop_size is not None else self.crop_size
SCREAMING_SNAKE_CASE_ = get_size_dict(__magic_name__ , param_name="crop_size" )
SCREAMING_SNAKE_CASE_ = do_rescale if do_rescale is not None else self.do_rescale
SCREAMING_SNAKE_CASE_ = rescale_factor if rescale_factor is not None else self.rescale_factor
SCREAMING_SNAKE_CASE_ = do_normalize if do_normalize is not None else self.do_normalize
SCREAMING_SNAKE_CASE_ = image_mean if image_mean is not None else self.image_mean
SCREAMING_SNAKE_CASE_ = image_std if image_std is not None else self.image_std
SCREAMING_SNAKE_CASE_ = make_list_of_images(__magic_name__ )
if not valid_images(__magic_name__ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE_ = [to_numpy_array(__magic_name__ ) for image in images]
if do_resize:
SCREAMING_SNAKE_CASE_ = [self.resize(image=__magic_name__ , size=__magic_name__ , resample=__magic_name__ ) for image in images]
if do_center_crop:
SCREAMING_SNAKE_CASE_ = [self.center_crop(image=__magic_name__ , size=__magic_name__ ) for image in images]
if do_rescale:
SCREAMING_SNAKE_CASE_ = [self.rescale(image=__magic_name__ , scale=__magic_name__ ) for image in images]
if do_normalize:
SCREAMING_SNAKE_CASE_ = [self.normalize(image=__magic_name__ , mean=__magic_name__ , std=__magic_name__ ) for image in images]
SCREAMING_SNAKE_CASE_ = [to_channel_dimension_format(__magic_name__ , __magic_name__ ) for image in images]
SCREAMING_SNAKE_CASE_ = {"pixel_values": images}
return BatchFeature(data=__magic_name__ , tensor_type=__magic_name__ )
def __A ( self : Optional[Any] , __magic_name__ : Dict , __magic_name__ : List[Tuple] = None ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(__magic_name__ ) != len(__magic_name__ ):
raise ValueError(
"Make sure that you pass in as many target sizes as the batch dimension of the logits" )
if is_torch_tensor(__magic_name__ ):
SCREAMING_SNAKE_CASE_ = target_sizes.numpy()
SCREAMING_SNAKE_CASE_ = []
for idx in range(len(__magic_name__ ) ):
SCREAMING_SNAKE_CASE_ = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="bilinear" , align_corners=__magic_name__ )
SCREAMING_SNAKE_CASE_ = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(__magic_name__ )
else:
SCREAMING_SNAKE_CASE_ = logits.argmax(dim=1 )
SCREAMING_SNAKE_CASE_ = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 305
|
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BlipaProcessor, BlipImageProcessor, GPTaTokenizer, PreTrainedTokenizerFast
@require_vision
class lowerCamelCase (unittest.TestCase ):
"""simple docstring"""
def __A ( self : int ) -> Any:
SCREAMING_SNAKE_CASE_ = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE_ = BlipImageProcessor()
SCREAMING_SNAKE_CASE_ = GPTaTokenizer.from_pretrained("hf-internal-testing/tiny-random-GPT2Model" )
SCREAMING_SNAKE_CASE_ = BlipaProcessor(__magic_name__ , __magic_name__ )
processor.save_pretrained(self.tmpdirname )
def __A ( self : str , **__magic_name__ : int ) -> Union[str, Any]:
return AutoProcessor.from_pretrained(self.tmpdirname , **__magic_name__ ).tokenizer
def __A ( self : Dict , **__magic_name__ : List[Any] ) -> int:
return AutoProcessor.from_pretrained(self.tmpdirname , **__magic_name__ ).image_processor
def __A ( self : int ) -> Any:
shutil.rmtree(self.tmpdirname )
def __A ( self : Dict ) -> Dict:
SCREAMING_SNAKE_CASE_ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
SCREAMING_SNAKE_CASE_ = [Image.fromarray(np.moveaxis(__magic_name__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __A ( self : List[Any] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = BlipaProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE_ = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
SCREAMING_SNAKE_CASE_ = self.get_image_processor(do_normalize=__magic_name__ , padding_value=1.0 )
SCREAMING_SNAKE_CASE_ = BlipaProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=__magic_name__ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __magic_name__ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __magic_name__ )
def __A ( self : Tuple ) -> int:
SCREAMING_SNAKE_CASE_ = self.get_image_processor()
SCREAMING_SNAKE_CASE_ = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ = BlipaProcessor(tokenizer=__magic_name__ , image_processor=__magic_name__ )
SCREAMING_SNAKE_CASE_ = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE_ = image_processor(__magic_name__ , return_tensors="np" )
SCREAMING_SNAKE_CASE_ = processor(images=__magic_name__ , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def __A ( self : str ) -> Tuple:
SCREAMING_SNAKE_CASE_ = self.get_image_processor()
SCREAMING_SNAKE_CASE_ = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ = BlipaProcessor(tokenizer=__magic_name__ , image_processor=__magic_name__ )
SCREAMING_SNAKE_CASE_ = "lower newer"
SCREAMING_SNAKE_CASE_ = processor(text=__magic_name__ )
SCREAMING_SNAKE_CASE_ = tokenizer(__magic_name__ , return_token_type_ids=__magic_name__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __A ( self : Dict ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = self.get_image_processor()
SCREAMING_SNAKE_CASE_ = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ = BlipaProcessor(tokenizer=__magic_name__ , image_processor=__magic_name__ )
SCREAMING_SNAKE_CASE_ = "lower newer"
SCREAMING_SNAKE_CASE_ = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE_ = processor(text=__magic_name__ , images=__magic_name__ )
self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "input_ids", "attention_mask"] )
# test if it raises when no input is passed
with pytest.raises(__magic_name__ ):
processor()
def __A ( self : Dict ) -> Tuple:
SCREAMING_SNAKE_CASE_ = self.get_image_processor()
SCREAMING_SNAKE_CASE_ = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ = BlipaProcessor(tokenizer=__magic_name__ , image_processor=__magic_name__ )
SCREAMING_SNAKE_CASE_ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
SCREAMING_SNAKE_CASE_ = processor.batch_decode(__magic_name__ )
SCREAMING_SNAKE_CASE_ = tokenizer.batch_decode(__magic_name__ )
self.assertListEqual(__magic_name__ , __magic_name__ )
def __A ( self : List[str] ) -> int:
SCREAMING_SNAKE_CASE_ = self.get_image_processor()
SCREAMING_SNAKE_CASE_ = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ = BlipaProcessor(tokenizer=__magic_name__ , image_processor=__magic_name__ )
SCREAMING_SNAKE_CASE_ = "lower newer"
SCREAMING_SNAKE_CASE_ = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE_ = processor(text=__magic_name__ , images=__magic_name__ )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "input_ids", "attention_mask"] )
| 305
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A : List[Any] = logging.get_logger(__name__)
A : int = {
"microsoft/trocr-base-handwritten": (
"https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json"
),
# See all TrOCR models at https://huggingface.co/models?filter=trocr
}
class lowerCamelCase (SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
lowerCamelCase__ = '''trocr'''
lowerCamelCase__ = ['''past_key_values''']
lowerCamelCase__ = {
'''num_attention_heads''': '''decoder_attention_heads''',
'''hidden_size''': '''d_model''',
'''num_hidden_layers''': '''decoder_layers''',
}
def __init__( self : List[str] , __magic_name__ : Tuple=50_265 , __magic_name__ : List[Any]=1_024 , __magic_name__ : Tuple=12 , __magic_name__ : Tuple=16 , __magic_name__ : List[Any]=4_096 , __magic_name__ : Optional[Any]="gelu" , __magic_name__ : str=512 , __magic_name__ : Optional[int]=0.1 , __magic_name__ : List[Any]=0.0 , __magic_name__ : List[Any]=0.0 , __magic_name__ : Any=2 , __magic_name__ : str=0.02 , __magic_name__ : str=0.0 , __magic_name__ : Optional[Any]=True , __magic_name__ : Tuple=False , __magic_name__ : Tuple=True , __magic_name__ : Tuple=True , __magic_name__ : Dict=1 , __magic_name__ : Optional[Any]=0 , __magic_name__ : int=2 , **__magic_name__ : List[str] , ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = vocab_size
SCREAMING_SNAKE_CASE_ = d_model
SCREAMING_SNAKE_CASE_ = decoder_layers
SCREAMING_SNAKE_CASE_ = decoder_attention_heads
SCREAMING_SNAKE_CASE_ = decoder_ffn_dim
SCREAMING_SNAKE_CASE_ = activation_function
SCREAMING_SNAKE_CASE_ = max_position_embeddings
SCREAMING_SNAKE_CASE_ = dropout
SCREAMING_SNAKE_CASE_ = attention_dropout
SCREAMING_SNAKE_CASE_ = activation_dropout
SCREAMING_SNAKE_CASE_ = init_std
SCREAMING_SNAKE_CASE_ = decoder_layerdrop
SCREAMING_SNAKE_CASE_ = use_cache
SCREAMING_SNAKE_CASE_ = scale_embedding
SCREAMING_SNAKE_CASE_ = use_learned_position_embeddings
SCREAMING_SNAKE_CASE_ = layernorm_embedding
super().__init__(
pad_token_id=__magic_name__ , bos_token_id=__magic_name__ , eos_token_id=__magic_name__ , decoder_start_token_id=__magic_name__ , **__magic_name__ , )
| 305
|
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available
from ...utils import OptionalDependencyNotAvailable
A : List[Any] = {"configuration_dpt": ["DPT_PRETRAINED_CONFIG_ARCHIVE_MAP", "DPTConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Optional[Any] = ["DPTFeatureExtractor"]
A : str = ["DPTImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Optional[Any] = [
"DPT_PRETRAINED_MODEL_ARCHIVE_LIST",
"DPTForDepthEstimation",
"DPTForSemanticSegmentation",
"DPTModel",
"DPTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_dpt import DPTFeatureExtractor
from .image_processing_dpt import DPTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dpt import (
DPT_PRETRAINED_MODEL_ARCHIVE_LIST,
DPTForDepthEstimation,
DPTForSemanticSegmentation,
DPTModel,
DPTPreTrainedModel,
)
else:
import sys
A : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 305
| 1
|
from sklearn.metrics import matthews_corrcoef
import datasets
A : Optional[Any] = "\nCompute the Matthews correlation coefficient (MCC)\n\nThe Matthews correlation coefficient is used in machine learning as a\nmeasure of the quality of binary and multiclass classifications. It takes\ninto account true and false positives and negatives and is generally\nregarded as a balanced measure which can be used even if the classes are of\nvery different sizes. The MCC is in essence a correlation coefficient value\nbetween -1 and +1. A coefficient of +1 represents a perfect prediction, 0\nan average random prediction and -1 an inverse prediction. The statistic\nis also known as the phi coefficient. [source: Wikipedia]\n"
A : Union[str, Any] = "\nArgs:\n predictions (list of int): Predicted labels, as returned by a model.\n references (list of int): Ground truth labels.\n sample_weight (list of int, float, or bool): Sample weights. Defaults to `None`.\nReturns:\n matthews_correlation (dict containing float): Matthews correlation.\nExamples:\n Example 1, a basic example with only predictions and references as inputs:\n >>> matthews_metric = datasets.load_metric(\"matthews_correlation\")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3])\n >>> print(round(results['matthews_correlation'], 2))\n 0.54\n\n Example 2, the same example as above, but also including sample weights:\n >>> matthews_metric = datasets.load_metric(\"matthews_correlation\")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3],\n ... sample_weight=[0.5, 3, 1, 1, 1, 2])\n >>> print(round(results['matthews_correlation'], 2))\n 0.1\n\n Example 3, the same example as above, but with sample weights that cause a negative correlation:\n >>> matthews_metric = datasets.load_metric(\"matthews_correlation\")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3],\n ... sample_weight=[0.5, 1, 0, 0, 0, 1])\n >>> print(round(results['matthews_correlation'], 2))\n -0.25\n"
A : Any = "\\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCamelCase (datasets.Metric ):
"""simple docstring"""
def __A ( self : Tuple ) -> Any:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("int32" ),
"references": datasets.Value("int32" ),
} ) , reference_urls=[
"https://scikit-learn.org/stable/modules/generated/sklearn.metrics.matthews_corrcoef.html"
] , )
def __A ( self : Optional[Any] , __magic_name__ : Dict , __magic_name__ : List[Any] , __magic_name__ : Dict=None ) -> Optional[Any]:
return {
"matthews_correlation": float(matthews_corrcoef(__magic_name__ , __magic_name__ , sample_weight=__magic_name__ ) ),
}
| 305
|
from __future__ import annotations
import collections
import pprint
from pathlib import Path
def a__ ( __UpperCamelCase ):
return "".join(sorted(__UpperCamelCase ) )
def a__ ( __UpperCamelCase ):
return word_by_signature[signature(__UpperCamelCase )]
A : str = Path(__file__).parent.joinpath("words.txt").read_text(encoding="utf-8")
A : int = sorted({word.strip().lower() for word in data.splitlines()})
A : Tuple = collections.defaultdict(list)
for word in word_list:
word_by_signature[signature(word)].append(word)
if __name__ == "__main__":
A : Union[str, Any] = {word: anagram(word) for word in word_list if len(anagram(word)) > 1}
with open("anagrams.txt", "w") as file:
file.write("all_anagrams = \n ")
file.write(pprint.pformat(all_anagrams))
| 305
| 1
|
import os
import zipfile
import pytest
from datasets.utils.extract import (
BzipaExtractor,
Extractor,
GzipExtractor,
LzaExtractor,
SevenZipExtractor,
TarExtractor,
XzExtractor,
ZipExtractor,
ZstdExtractor,
)
from .utils import require_lza, require_pyazr, require_zstandard
@pytest.mark.parametrize(
"compression_format, is_archive" , [
("7z", True),
("bz2", False),
("gzip", False),
("lz4", False),
("tar", True),
("xz", False),
("zip", True),
("zstd", False),
] , )
def a__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , ):
SCREAMING_SNAKE_CASE_ = {
"7z": (seven_zip_file, SevenZipExtractor),
"bz2": (bza_file, BzipaExtractor),
"gzip": (gz_file, GzipExtractor),
"lz4": (lza_file, LzaExtractor),
"tar": (tar_file, TarExtractor),
"xz": (xz_file, XzExtractor),
"zip": (zip_file, ZipExtractor),
"zstd": (zstd_file, ZstdExtractor),
}
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = input_paths_and_base_extractors[compression_format]
if input_path is None:
SCREAMING_SNAKE_CASE_ = F'''for \'{compression_format}\' compression_format, '''
if compression_format == "7z":
reason += require_pyazr.kwargs["reason"]
elif compression_format == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_format == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(__UpperCamelCase )
assert base_extractor.is_extractable(__UpperCamelCase )
SCREAMING_SNAKE_CASE_ = tmp_path / ("extracted" if is_archive else "extracted.txt")
base_extractor.extract(__UpperCamelCase , __UpperCamelCase )
if is_archive:
assert output_path.is_dir()
for file_path in output_path.iterdir():
assert file_path.name == text_file.name
SCREAMING_SNAKE_CASE_ = file_path.read_text(encoding="utf-8" )
else:
SCREAMING_SNAKE_CASE_ = output_path.read_text(encoding="utf-8" )
SCREAMING_SNAKE_CASE_ = text_file.read_text(encoding="utf-8" )
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize(
"compression_format, is_archive" , [
("7z", True),
("bz2", False),
("gzip", False),
("lz4", False),
("tar", True),
("xz", False),
("zip", True),
("zstd", False),
] , )
def a__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , ):
SCREAMING_SNAKE_CASE_ = {
"7z": seven_zip_file,
"bz2": bza_file,
"gzip": gz_file,
"lz4": lza_file,
"tar": tar_file,
"xz": xz_file,
"zip": zip_file,
"zstd": zstd_file,
}
SCREAMING_SNAKE_CASE_ = input_paths[compression_format]
if input_path is None:
SCREAMING_SNAKE_CASE_ = F'''for \'{compression_format}\' compression_format, '''
if compression_format == "7z":
reason += require_pyazr.kwargs["reason"]
elif compression_format == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_format == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(__UpperCamelCase )
SCREAMING_SNAKE_CASE_ = Extractor.infer_extractor_format(__UpperCamelCase )
assert extractor_format is not None
SCREAMING_SNAKE_CASE_ = tmp_path / ("extracted" if is_archive else "extracted.txt")
Extractor.extract(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
if is_archive:
assert output_path.is_dir()
for file_path in output_path.iterdir():
assert file_path.name == text_file.name
SCREAMING_SNAKE_CASE_ = file_path.read_text(encoding="utf-8" )
else:
SCREAMING_SNAKE_CASE_ = output_path.read_text(encoding="utf-8" )
SCREAMING_SNAKE_CASE_ = text_file.read_text(encoding="utf-8" )
assert extracted_file_content == expected_file_content
@pytest.fixture
def a__ ( __UpperCamelCase , __UpperCamelCase ):
import tarfile
SCREAMING_SNAKE_CASE_ = tmp_path / "data_dot_dot"
directory.mkdir()
SCREAMING_SNAKE_CASE_ = directory / "tar_file_with_dot_dot.tar"
with tarfile.TarFile(__UpperCamelCase , "w" ) as f:
f.add(__UpperCamelCase , arcname=os.path.join(".." , text_file.name ) )
return path
@pytest.fixture
def a__ ( __UpperCamelCase ):
import tarfile
SCREAMING_SNAKE_CASE_ = tmp_path / "data_sym_link"
directory.mkdir()
SCREAMING_SNAKE_CASE_ = directory / "tar_file_with_sym_link.tar"
os.symlink(".." , directory / "subdir" , target_is_directory=__UpperCamelCase )
with tarfile.TarFile(__UpperCamelCase , "w" ) as f:
f.add(str(directory / "subdir" ) , arcname="subdir" ) # str required by os.readlink on Windows and Python < 3.8
return path
@pytest.mark.parametrize(
"insecure_tar_file, error_log" , [("tar_file_with_dot_dot", "illegal path"), ("tar_file_with_sym_link", "Symlink")] , )
def a__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = {
"tar_file_with_dot_dot": tar_file_with_dot_dot,
"tar_file_with_sym_link": tar_file_with_sym_link,
}
SCREAMING_SNAKE_CASE_ = insecure_tar_files[insecure_tar_file]
SCREAMING_SNAKE_CASE_ = tmp_path / "extracted"
TarExtractor.extract(__UpperCamelCase , __UpperCamelCase )
assert caplog.text
for record in caplog.records:
assert record.levelname == "ERROR"
assert error_log in record.msg
def a__ ( __UpperCamelCase ):
# We should have less false positives than zipfile.is_zipfile
# We do that by checking only the magic number
SCREAMING_SNAKE_CASE_ = tmpdir / "not_a_zip_file"
# From: https://github.com/python/cpython/pull/5053
SCREAMING_SNAKE_CASE_ = (
b"\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x00\x01\x00\x00"
b"\x00\x02\x08\x06\x00\x00\x00\x99\x81\xb6'\x00\x00\x00\x15I"
b"DATx\x01\x01\n\x00\xf5\xff\x00PK\x05\x06\x00PK\x06\x06\x07"
b"\xac\x01N\xc6|a\r\x00\x00\x00\x00IEND\xaeB`\x82"
)
with not_a_zip_file.open("wb" ) as f:
f.write(__UpperCamelCase )
assert zipfile.is_zipfile(str(__UpperCamelCase ) ) # is a false positive for `zipfile`
assert not ZipExtractor.is_extractable(__UpperCamelCase ) # but we're right
| 305
|
import copy
import os
from typing import TYPE_CHECKING, List, Union
if TYPE_CHECKING:
pass
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A : int = logging.get_logger(__name__)
A : str = {
"kakaobrain/align-base": "https://huggingface.co/kakaobrain/align-base/resolve/main/config.json",
}
class lowerCamelCase (SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
lowerCamelCase__ = '''align_text_model'''
def __init__( self : Optional[Any] , __magic_name__ : Union[str, Any]=30_522 , __magic_name__ : Tuple=768 , __magic_name__ : List[str]=12 , __magic_name__ : Optional[Any]=12 , __magic_name__ : str=3_072 , __magic_name__ : Dict="gelu" , __magic_name__ : Any=0.1 , __magic_name__ : Optional[int]=0.1 , __magic_name__ : List[str]=512 , __magic_name__ : Any=2 , __magic_name__ : Optional[Any]=0.02 , __magic_name__ : int=1e-12 , __magic_name__ : str=0 , __magic_name__ : Optional[Any]="absolute" , __magic_name__ : Optional[Any]=True , **__magic_name__ : Tuple , ) -> Union[str, Any]:
super().__init__(**__magic_name__ )
SCREAMING_SNAKE_CASE_ = vocab_size
SCREAMING_SNAKE_CASE_ = hidden_size
SCREAMING_SNAKE_CASE_ = num_hidden_layers
SCREAMING_SNAKE_CASE_ = num_attention_heads
SCREAMING_SNAKE_CASE_ = hidden_act
SCREAMING_SNAKE_CASE_ = intermediate_size
SCREAMING_SNAKE_CASE_ = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ = max_position_embeddings
SCREAMING_SNAKE_CASE_ = type_vocab_size
SCREAMING_SNAKE_CASE_ = initializer_range
SCREAMING_SNAKE_CASE_ = layer_norm_eps
SCREAMING_SNAKE_CASE_ = position_embedding_type
SCREAMING_SNAKE_CASE_ = use_cache
SCREAMING_SNAKE_CASE_ = pad_token_id
@classmethod
def __A ( cls : Any , __magic_name__ : Union[str, os.PathLike] , **__magic_name__ : Optional[Any] ) -> "PretrainedConfig":
cls._set_token_in_kwargs(__magic_name__ )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = cls.get_config_dict(__magic_name__ , **__magic_name__ )
# get the text config dict if we are loading from AlignConfig
if config_dict.get("model_type" ) == "align":
SCREAMING_SNAKE_CASE_ = config_dict["text_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__magic_name__ , **__magic_name__ )
class lowerCamelCase (SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
lowerCamelCase__ = '''align_vision_model'''
def __init__( self : List[str] , __magic_name__ : int = 3 , __magic_name__ : int = 600 , __magic_name__ : float = 2.0 , __magic_name__ : float = 3.1 , __magic_name__ : int = 8 , __magic_name__ : List[int] = [3, 3, 5, 3, 5, 5, 3] , __magic_name__ : List[int] = [32, 16, 24, 40, 80, 112, 192] , __magic_name__ : List[int] = [16, 24, 40, 80, 112, 192, 320] , __magic_name__ : List[int] = [] , __magic_name__ : List[int] = [1, 2, 2, 2, 1, 2, 1] , __magic_name__ : List[int] = [1, 2, 2, 3, 3, 4, 1] , __magic_name__ : List[int] = [1, 6, 6, 6, 6, 6, 6] , __magic_name__ : float = 0.25 , __magic_name__ : str = "swish" , __magic_name__ : int = 2_560 , __magic_name__ : str = "mean" , __magic_name__ : float = 0.02 , __magic_name__ : float = 0.001 , __magic_name__ : float = 0.99 , __magic_name__ : float = 0.2 , **__magic_name__ : List[Any] , ) -> Tuple:
super().__init__(**__magic_name__ )
SCREAMING_SNAKE_CASE_ = num_channels
SCREAMING_SNAKE_CASE_ = image_size
SCREAMING_SNAKE_CASE_ = width_coefficient
SCREAMING_SNAKE_CASE_ = depth_coefficient
SCREAMING_SNAKE_CASE_ = depth_divisor
SCREAMING_SNAKE_CASE_ = kernel_sizes
SCREAMING_SNAKE_CASE_ = in_channels
SCREAMING_SNAKE_CASE_ = out_channels
SCREAMING_SNAKE_CASE_ = depthwise_padding
SCREAMING_SNAKE_CASE_ = strides
SCREAMING_SNAKE_CASE_ = num_block_repeats
SCREAMING_SNAKE_CASE_ = expand_ratios
SCREAMING_SNAKE_CASE_ = squeeze_expansion_ratio
SCREAMING_SNAKE_CASE_ = hidden_act
SCREAMING_SNAKE_CASE_ = hidden_dim
SCREAMING_SNAKE_CASE_ = pooling_type
SCREAMING_SNAKE_CASE_ = initializer_range
SCREAMING_SNAKE_CASE_ = batch_norm_eps
SCREAMING_SNAKE_CASE_ = batch_norm_momentum
SCREAMING_SNAKE_CASE_ = drop_connect_rate
SCREAMING_SNAKE_CASE_ = sum(__magic_name__ ) * 4
@classmethod
def __A ( cls : List[str] , __magic_name__ : Union[str, os.PathLike] , **__magic_name__ : Dict ) -> "PretrainedConfig":
cls._set_token_in_kwargs(__magic_name__ )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = cls.get_config_dict(__magic_name__ , **__magic_name__ )
# get the vision config dict if we are loading from AlignConfig
if config_dict.get("model_type" ) == "align":
SCREAMING_SNAKE_CASE_ = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__magic_name__ , **__magic_name__ )
class lowerCamelCase (SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
lowerCamelCase__ = '''align'''
lowerCamelCase__ = True
def __init__( self : Optional[Any] , __magic_name__ : Dict=None , __magic_name__ : List[Any]=None , __magic_name__ : str=640 , __magic_name__ : Any=1.0 , __magic_name__ : Dict=0.02 , **__magic_name__ : Union[str, Any] , ) -> int:
super().__init__(**__magic_name__ )
if text_config is None:
SCREAMING_SNAKE_CASE_ = {}
logger.info("text_config is None. Initializing the AlignTextConfig with default values." )
if vision_config is None:
SCREAMING_SNAKE_CASE_ = {}
logger.info("vision_config is None. Initializing the AlignVisionConfig with default values." )
SCREAMING_SNAKE_CASE_ = AlignTextConfig(**__magic_name__ )
SCREAMING_SNAKE_CASE_ = AlignVisionConfig(**__magic_name__ )
SCREAMING_SNAKE_CASE_ = projection_dim
SCREAMING_SNAKE_CASE_ = temperature_init_value
SCREAMING_SNAKE_CASE_ = initializer_range
@classmethod
def __A ( cls : List[str] , __magic_name__ : AlignTextConfig , __magic_name__ : AlignVisionConfig , **__magic_name__ : Tuple ) -> Any:
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **__magic_name__ )
def __A ( self : int ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = copy.deepcopy(self.__dict__ )
SCREAMING_SNAKE_CASE_ = self.text_config.to_dict()
SCREAMING_SNAKE_CASE_ = self.vision_config.to_dict()
SCREAMING_SNAKE_CASE_ = self.__class__.model_type
return output
| 305
| 1
|
from collections.abc import Callable
import numpy as np
def a__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = int(np.ceil((x_end - xa) / step_size ) )
SCREAMING_SNAKE_CASE_ = np.zeros((n + 1,) )
SCREAMING_SNAKE_CASE_ = ya
SCREAMING_SNAKE_CASE_ = xa
for k in range(__UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = y[k] + step_size * ode_func(__UpperCamelCase , y[k] )
SCREAMING_SNAKE_CASE_ = y[k] + (
(step_size / 2) * (ode_func(__UpperCamelCase , y[k] ) + ode_func(x + step_size , __UpperCamelCase ))
)
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 305
|
import unittest
from transformers.testing_utils import CaptureStdout
from transformers.tools.python_interpreter import evaluate
def a__ ( __UpperCamelCase ):
return x + 2
class lowerCamelCase (unittest.TestCase ):
"""simple docstring"""
def __A ( self : List[Any] ) -> int:
SCREAMING_SNAKE_CASE_ = "x = 3"
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = evaluate(__magic_name__ , {} , state=__magic_name__ )
assert result == 3
self.assertDictEqual(__magic_name__ , {"x": 3} )
SCREAMING_SNAKE_CASE_ = "x = y"
SCREAMING_SNAKE_CASE_ = {"y": 5}
SCREAMING_SNAKE_CASE_ = evaluate(__magic_name__ , {} , state=__magic_name__ )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(__magic_name__ , {"x": 5, "y": 5} )
def __A ( self : Union[str, Any] ) -> str:
SCREAMING_SNAKE_CASE_ = "y = add_two(x)"
SCREAMING_SNAKE_CASE_ = {"x": 3}
SCREAMING_SNAKE_CASE_ = evaluate(__magic_name__ , {"add_two": add_two} , state=__magic_name__ )
assert result == 5
self.assertDictEqual(__magic_name__ , {"x": 3, "y": 5} )
# Won't work without the tool
with CaptureStdout() as out:
SCREAMING_SNAKE_CASE_ = evaluate(__magic_name__ , {} , state=__magic_name__ )
assert result is None
assert "tried to execute add_two" in out.out
def __A ( self : List[str] ) -> int:
SCREAMING_SNAKE_CASE_ = "x = 3"
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = evaluate(__magic_name__ , {} , state=__magic_name__ )
assert result == 3
self.assertDictEqual(__magic_name__ , {"x": 3} )
def __A ( self : Optional[Any] ) -> str:
SCREAMING_SNAKE_CASE_ = "test_dict = {'x': x, 'y': add_two(x)}"
SCREAMING_SNAKE_CASE_ = {"x": 3}
SCREAMING_SNAKE_CASE_ = evaluate(__magic_name__ , {"add_two": add_two} , state=__magic_name__ )
self.assertDictEqual(__magic_name__ , {"x": 3, "y": 5} )
self.assertDictEqual(__magic_name__ , {"x": 3, "test_dict": {"x": 3, "y": 5}} )
def __A ( self : Optional[int] ) -> List[str]:
SCREAMING_SNAKE_CASE_ = "x = 3\ny = 5"
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = evaluate(__magic_name__ , {} , state=__magic_name__ )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(__magic_name__ , {"x": 3, "y": 5} )
def __A ( self : Any ) -> List[str]:
SCREAMING_SNAKE_CASE_ = "text = f'This is x: {x}.'"
SCREAMING_SNAKE_CASE_ = {"x": 3}
SCREAMING_SNAKE_CASE_ = evaluate(__magic_name__ , {} , state=__magic_name__ )
# evaluate returns the value of the last assignment.
assert result == "This is x: 3."
self.assertDictEqual(__magic_name__ , {"x": 3, "text": "This is x: 3."} )
def __A ( self : int ) -> Tuple:
SCREAMING_SNAKE_CASE_ = "if x <= 3:\n y = 2\nelse:\n y = 5"
SCREAMING_SNAKE_CASE_ = {"x": 3}
SCREAMING_SNAKE_CASE_ = evaluate(__magic_name__ , {} , state=__magic_name__ )
# evaluate returns the value of the last assignment.
assert result == 2
self.assertDictEqual(__magic_name__ , {"x": 3, "y": 2} )
SCREAMING_SNAKE_CASE_ = {"x": 8}
SCREAMING_SNAKE_CASE_ = evaluate(__magic_name__ , {} , state=__magic_name__ )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(__magic_name__ , {"x": 8, "y": 5} )
def __A ( self : str ) -> str:
SCREAMING_SNAKE_CASE_ = "test_list = [x, add_two(x)]"
SCREAMING_SNAKE_CASE_ = {"x": 3}
SCREAMING_SNAKE_CASE_ = evaluate(__magic_name__ , {"add_two": add_two} , state=__magic_name__ )
self.assertListEqual(__magic_name__ , [3, 5] )
self.assertDictEqual(__magic_name__ , {"x": 3, "test_list": [3, 5]} )
def __A ( self : Union[str, Any] ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = "y = x"
SCREAMING_SNAKE_CASE_ = {"x": 3}
SCREAMING_SNAKE_CASE_ = evaluate(__magic_name__ , {} , state=__magic_name__ )
assert result == 3
self.assertDictEqual(__magic_name__ , {"x": 3, "y": 3} )
def __A ( self : Tuple ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = "test_list = [x, add_two(x)]\ntest_list[1]"
SCREAMING_SNAKE_CASE_ = {"x": 3}
SCREAMING_SNAKE_CASE_ = evaluate(__magic_name__ , {"add_two": add_two} , state=__magic_name__ )
assert result == 5
self.assertDictEqual(__magic_name__ , {"x": 3, "test_list": [3, 5]} )
SCREAMING_SNAKE_CASE_ = "test_dict = {'x': x, 'y': add_two(x)}\ntest_dict['y']"
SCREAMING_SNAKE_CASE_ = {"x": 3}
SCREAMING_SNAKE_CASE_ = evaluate(__magic_name__ , {"add_two": add_two} , state=__magic_name__ )
assert result == 5
self.assertDictEqual(__magic_name__ , {"x": 3, "test_dict": {"x": 3, "y": 5}} )
def __A ( self : Tuple ) -> Any:
SCREAMING_SNAKE_CASE_ = "x = 0\nfor i in range(3):\n x = i"
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = evaluate(__magic_name__ , {"range": range} , state=__magic_name__ )
assert result == 2
self.assertDictEqual(__magic_name__ , {"x": 2, "i": 2} )
| 305
| 1
|
import pandas as pd
from matplotlib import pyplot as plt
from sklearn.linear_model import LinearRegression
# Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
# Fitting Polynomial Regression to the dataset
from sklearn.preprocessing import PolynomialFeatures
# Importing the dataset
A : Any = pd.read_csv(
"https://s3.us-west-2.amazonaws.com/public.gamelab.fun/dataset/"
"position_salaries.csv"
)
A : Optional[Any] = dataset.iloc[:, 1:2].values
A : List[str] = dataset.iloc[:, 2].values
A , A , A , A : Tuple = train_test_split(X, y, test_size=0.2, random_state=0)
A : List[Any] = PolynomialFeatures(degree=4)
A : int = poly_reg.fit_transform(X)
A : Optional[int] = LinearRegression()
pol_reg.fit(X_poly, y)
def a__ ( ):
plt.scatter(__UpperCamelCase , __UpperCamelCase , color="red" )
plt.plot(__UpperCamelCase , pol_reg.predict(poly_reg.fit_transform(__UpperCamelCase ) ) , color="blue" )
plt.title("Truth or Bluff (Linear Regression)" )
plt.xlabel("Position level" )
plt.ylabel("Salary" )
plt.show()
if __name__ == "__main__":
viz_polymonial()
# Predicting a new result with Polymonial Regression
pol_reg.predict(poly_reg.fit_transform([[5.5]]))
# output should be 132148.43750003
| 305
|
import numpy as np
import pandas as pd
from sklearn.preprocessing import Normalizer
from sklearn.svm import SVR
from statsmodels.tsa.statespace.sarimax import SARIMAX
def a__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = np.array([[1, item, train_mtch[i]] for i, item in enumerate(__UpperCamelCase )] )
SCREAMING_SNAKE_CASE_ = np.array(__UpperCamelCase )
SCREAMING_SNAKE_CASE_ = np.dot(np.dot(np.linalg.inv(np.dot(x.transpose() , __UpperCamelCase ) ) , x.transpose() ) , __UpperCamelCase )
return abs(beta[0] + test_dt[0] * beta[1] + test_mtch[0] + beta[2] )
def a__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = (1, 2, 1)
SCREAMING_SNAKE_CASE_ = (1, 1, 0, 7)
SCREAMING_SNAKE_CASE_ = SARIMAX(
__UpperCamelCase , exog=__UpperCamelCase , order=__UpperCamelCase , seasonal_order=__UpperCamelCase )
SCREAMING_SNAKE_CASE_ = model.fit(disp=__UpperCamelCase , maxiter=6_0_0 , method="nm" )
SCREAMING_SNAKE_CASE_ = model_fit.predict(1 , len(__UpperCamelCase ) , exog=[test_match] )
return result[0]
def a__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = SVR(kernel="rbf" , C=1 , gamma=0.1 , epsilon=0.1 )
regressor.fit(__UpperCamelCase , __UpperCamelCase )
SCREAMING_SNAKE_CASE_ = regressor.predict(__UpperCamelCase )
return y_pred[0]
def a__ ( __UpperCamelCase ):
train_user.sort()
SCREAMING_SNAKE_CASE_ = np.percentile(__UpperCamelCase , 2_5 )
SCREAMING_SNAKE_CASE_ = np.percentile(__UpperCamelCase , 7_5 )
SCREAMING_SNAKE_CASE_ = qa - qa
SCREAMING_SNAKE_CASE_ = qa - (iqr * 0.1)
return low_lim
def a__ ( __UpperCamelCase , __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = 0
for i in list_vote:
if i > actual_result:
SCREAMING_SNAKE_CASE_ = not_safe + 1
else:
if abs(abs(__UpperCamelCase ) - abs(__UpperCamelCase ) ) <= 0.1:
safe += 1
else:
not_safe += 1
return safe > not_safe
if __name__ == "__main__":
# data_input_df = pd.read_csv("ex_data.csv", header=None)
A : Dict = [[1_82_31, 0.0, 1], [2_26_21, 1.0, 2], [1_56_75, 0.0, 3], [2_35_83, 1.0, 4]]
A : Optional[Any] = pd.DataFrame(
data_input, columns=["total_user", "total_even", "days"]
)
A : Union[str, Any] = Normalizer().fit_transform(data_input_df.values)
# split data
A : Optional[int] = normalize_df[:, 2].tolist()
A : List[str] = normalize_df[:, 0].tolist()
A : int = normalize_df[:, 1].tolist()
# for svr (input variable = total date and total match)
A : int = normalize_df[:, [1, 2]].tolist()
A : Tuple = x[: len(x) - 1]
A : str = x[len(x) - 1 :]
# for linear regression & sarimax
A : Tuple = total_date[: len(total_date) - 1]
A : Optional[int] = total_user[: len(total_user) - 1]
A : str = total_match[: len(total_match) - 1]
A : List[Any] = total_date[len(total_date) - 1 :]
A : List[Any] = total_user[len(total_user) - 1 :]
A : Optional[Any] = total_match[len(total_match) - 1 :]
# voting system with forecasting
A : Optional[int] = [
linear_regression_prediction(
trn_date, trn_user, trn_match, tst_date, tst_match
),
sarimax_predictor(trn_user, trn_match, tst_match),
support_vector_regressor(x_train, x_test, trn_user),
]
# check the safety of today's data
A : str = "" if data_safety_checker(res_vote, tst_user) else "not "
print("Today's data is {not_str}safe.")
| 305
| 1
|
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class lowerCamelCase (SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
lowerCamelCase__ = ['''image_processor''', '''tokenizer''']
lowerCamelCase__ = '''LayoutLMv2ImageProcessor'''
lowerCamelCase__ = ('''LayoutXLMTokenizer''', '''LayoutXLMTokenizerFast''')
def __init__( self : str , __magic_name__ : Dict=None , __magic_name__ : str=None , **__magic_name__ : List[str] ) -> Any:
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , __magic_name__ , )
SCREAMING_SNAKE_CASE_ = kwargs.pop("feature_extractor" )
SCREAMING_SNAKE_CASE_ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(__magic_name__ , __magic_name__ )
def __call__( self : Any , __magic_name__ : List[str] , __magic_name__ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , __magic_name__ : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , __magic_name__ : Union[List[List[int]], List[List[List[int]]]] = None , __magic_name__ : Optional[Union[List[int], List[List[int]]]] = None , __magic_name__ : bool = True , __magic_name__ : Union[bool, str, PaddingStrategy] = False , __magic_name__ : Union[bool, str, TruncationStrategy] = None , __magic_name__ : Optional[int] = None , __magic_name__ : int = 0 , __magic_name__ : Optional[int] = None , __magic_name__ : Optional[bool] = None , __magic_name__ : Optional[bool] = None , __magic_name__ : bool = False , __magic_name__ : bool = False , __magic_name__ : bool = False , __magic_name__ : bool = False , __magic_name__ : bool = True , __magic_name__ : Optional[Union[str, TensorType]] = None , **__magic_name__ : Union[str, Any] , ) -> BatchEncoding:
# verify input
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
"You cannot provide bounding boxes "
"if you initialized the image processor with apply_ocr set to True." )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
"You cannot provide word labels if you initialized the image processor with apply_ocr set to True." )
if return_overflowing_tokens is True and return_offsets_mapping is False:
raise ValueError("You cannot return overflowing tokens without returning the offsets mapping." )
# first, apply the image processor
SCREAMING_SNAKE_CASE_ = self.image_processor(images=__magic_name__ , return_tensors=__magic_name__ )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(__magic_name__ , __magic_name__ ):
SCREAMING_SNAKE_CASE_ = [text] # add batch dimension (as the image processor always adds a batch dimension)
SCREAMING_SNAKE_CASE_ = features["words"]
SCREAMING_SNAKE_CASE_ = self.tokenizer(
text=text if text is not None else features["words"] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features["boxes"] , word_labels=__magic_name__ , add_special_tokens=__magic_name__ , padding=__magic_name__ , truncation=__magic_name__ , max_length=__magic_name__ , stride=__magic_name__ , pad_to_multiple_of=__magic_name__ , return_token_type_ids=__magic_name__ , return_attention_mask=__magic_name__ , return_overflowing_tokens=__magic_name__ , return_special_tokens_mask=__magic_name__ , return_offsets_mapping=__magic_name__ , return_length=__magic_name__ , verbose=__magic_name__ , return_tensors=__magic_name__ , **__magic_name__ , )
# add pixel values
SCREAMING_SNAKE_CASE_ = features.pop("pixel_values" )
if return_overflowing_tokens is True:
SCREAMING_SNAKE_CASE_ = self.get_overflowing_images(__magic_name__ , encoded_inputs["overflow_to_sample_mapping"] )
SCREAMING_SNAKE_CASE_ = images
return encoded_inputs
def __A ( self : Tuple , __magic_name__ : Any , __magic_name__ : Tuple ) -> Optional[int]:
# in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image
SCREAMING_SNAKE_CASE_ = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(__magic_name__ ) != len(__magic_name__ ):
raise ValueError(
"Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got"
F''' {len(__magic_name__ )} and {len(__magic_name__ )}''' )
return images_with_overflow
def __A ( self : List[str] , *__magic_name__ : Any , **__magic_name__ : Optional[Any] ) -> str:
return self.tokenizer.batch_decode(*__magic_name__ , **__magic_name__ )
def __A ( self : Optional[int] , *__magic_name__ : Any , **__magic_name__ : int ) -> Dict:
return self.tokenizer.decode(*__magic_name__ , **__magic_name__ )
@property
def __A ( self : List[Any] ) -> Union[str, Any]:
return ["input_ids", "bbox", "attention_mask", "image"]
@property
def __A ( self : str ) -> Any:
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , __magic_name__ , )
return self.image_processor_class
@property
def __A ( self : List[Any] ) -> str:
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , __magic_name__ , )
return self.image_processor
| 305
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
A : List[str] = {"configuration_swin": ["SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP", "SwinConfig", "SwinOnnxConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Any = [
"SWIN_PRETRAINED_MODEL_ARCHIVE_LIST",
"SwinForImageClassification",
"SwinForMaskedImageModeling",
"SwinModel",
"SwinPreTrainedModel",
"SwinBackbone",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : str = [
"TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFSwinForImageClassification",
"TFSwinForMaskedImageModeling",
"TFSwinModel",
"TFSwinPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_swin import SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinConfig, SwinOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swin import (
SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinBackbone,
SwinForImageClassification,
SwinForMaskedImageModeling,
SwinModel,
SwinPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_swin import (
TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSwinForImageClassification,
TFSwinForMaskedImageModeling,
TFSwinModel,
TFSwinPreTrainedModel,
)
else:
import sys
A : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 305
| 1
|
def a__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
# Return True if there is node that has not iterated.
SCREAMING_SNAKE_CASE_ = [False] * len(__UpperCamelCase )
SCREAMING_SNAKE_CASE_ = []
queue.append(__UpperCamelCase )
SCREAMING_SNAKE_CASE_ = True
while queue:
SCREAMING_SNAKE_CASE_ = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(__UpperCamelCase )
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = u
return visited[t]
def a__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
# This array is filled by BFS and to store path
SCREAMING_SNAKE_CASE_ = [-1] * (len(__UpperCamelCase ))
SCREAMING_SNAKE_CASE_ = 0
while bfs(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = float("Inf" )
SCREAMING_SNAKE_CASE_ = sink
while s != source:
# Find the minimum value in select path
SCREAMING_SNAKE_CASE_ = min(__UpperCamelCase , graph[parent[s]][s] )
SCREAMING_SNAKE_CASE_ = parent[s]
max_flow += path_flow
SCREAMING_SNAKE_CASE_ = sink
while v != source:
SCREAMING_SNAKE_CASE_ = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
SCREAMING_SNAKE_CASE_ = parent[v]
return max_flow
A : Any = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
A , A : Optional[int] = 0, 5
print(ford_fulkerson(graph, source, sink))
| 305
|
import tempfile
import unittest
import numpy as np
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_flax
if is_flax_available():
import os
from flax.core.frozen_dict import unfreeze
from flax.traverse_util import flatten_dict
from transformers import FlaxBertModel
A : Union[str, Any] = "0.12" # assumed parallelism: 8
@require_flax
@is_staging_test
class lowerCamelCase (unittest.TestCase ):
"""simple docstring"""
@classmethod
def __A ( cls : Any ) -> Dict:
SCREAMING_SNAKE_CASE_ = TOKEN
HfFolder.save_token(__magic_name__ )
@classmethod
def __A ( cls : Optional[int] ) -> Tuple:
try:
delete_repo(token=cls._token , repo_id="test-model-flax" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-model-flax-org" )
except HTTPError:
pass
def __A ( self : str ) -> str:
SCREAMING_SNAKE_CASE_ = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
SCREAMING_SNAKE_CASE_ = FlaxBertModel(__magic_name__ )
model.push_to_hub("test-model-flax" , use_auth_token=self._token )
SCREAMING_SNAKE_CASE_ = FlaxBertModel.from_pretrained(F'''{USER}/test-model-flax''' )
SCREAMING_SNAKE_CASE_ = flatten_dict(unfreeze(model.params ) )
SCREAMING_SNAKE_CASE_ = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
SCREAMING_SNAKE_CASE_ = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(__magic_name__ , 1e-3 , msg=F'''{key} not identical''' )
# Reset repo
delete_repo(token=self._token , repo_id="test-model-flax" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(__magic_name__ , repo_id="test-model-flax" , push_to_hub=__magic_name__ , use_auth_token=self._token )
SCREAMING_SNAKE_CASE_ = FlaxBertModel.from_pretrained(F'''{USER}/test-model-flax''' )
SCREAMING_SNAKE_CASE_ = flatten_dict(unfreeze(model.params ) )
SCREAMING_SNAKE_CASE_ = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
SCREAMING_SNAKE_CASE_ = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(__magic_name__ , 1e-3 , msg=F'''{key} not identical''' )
def __A ( self : int ) -> Tuple:
SCREAMING_SNAKE_CASE_ = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
SCREAMING_SNAKE_CASE_ = FlaxBertModel(__magic_name__ )
model.push_to_hub("valid_org/test-model-flax-org" , use_auth_token=self._token )
SCREAMING_SNAKE_CASE_ = FlaxBertModel.from_pretrained("valid_org/test-model-flax-org" )
SCREAMING_SNAKE_CASE_ = flatten_dict(unfreeze(model.params ) )
SCREAMING_SNAKE_CASE_ = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
SCREAMING_SNAKE_CASE_ = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(__magic_name__ , 1e-3 , msg=F'''{key} not identical''' )
# Reset repo
delete_repo(token=self._token , repo_id="valid_org/test-model-flax-org" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(
__magic_name__ , repo_id="valid_org/test-model-flax-org" , push_to_hub=__magic_name__ , use_auth_token=self._token )
SCREAMING_SNAKE_CASE_ = FlaxBertModel.from_pretrained("valid_org/test-model-flax-org" )
SCREAMING_SNAKE_CASE_ = flatten_dict(unfreeze(model.params ) )
SCREAMING_SNAKE_CASE_ = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
SCREAMING_SNAKE_CASE_ = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(__magic_name__ , 1e-3 , msg=F'''{key} not identical''' )
def a__ ( __UpperCamelCase , __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = flatten_dict(modela.params )
SCREAMING_SNAKE_CASE_ = flatten_dict(modela.params )
for key in flat_params_a.keys():
if np.sum(np.abs(flat_params_a[key] - flat_params_a[key] ) ) > 1E-4:
SCREAMING_SNAKE_CASE_ = False
return models_are_equal
@require_flax
class lowerCamelCase (unittest.TestCase ):
"""simple docstring"""
def __A ( self : str ) -> Dict:
SCREAMING_SNAKE_CASE_ = BertConfig.from_pretrained("hf-internal-testing/tiny-bert-flax-only" )
SCREAMING_SNAKE_CASE_ = FlaxBertModel(__magic_name__ )
SCREAMING_SNAKE_CASE_ = "bert"
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(__magic_name__ , __magic_name__ ) )
with self.assertRaises(__magic_name__ ):
SCREAMING_SNAKE_CASE_ = FlaxBertModel.from_pretrained(__magic_name__ )
SCREAMING_SNAKE_CASE_ = FlaxBertModel.from_pretrained(__magic_name__ , subfolder=__magic_name__ )
self.assertTrue(check_models_equal(__magic_name__ , __magic_name__ ) )
def __A ( self : Optional[Any] ) -> Tuple:
SCREAMING_SNAKE_CASE_ = BertConfig.from_pretrained("hf-internal-testing/tiny-bert-flax-only" )
SCREAMING_SNAKE_CASE_ = FlaxBertModel(__magic_name__ )
SCREAMING_SNAKE_CASE_ = "bert"
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(__magic_name__ , __magic_name__ ) , max_shard_size="10KB" )
with self.assertRaises(__magic_name__ ):
SCREAMING_SNAKE_CASE_ = FlaxBertModel.from_pretrained(__magic_name__ )
SCREAMING_SNAKE_CASE_ = FlaxBertModel.from_pretrained(__magic_name__ , subfolder=__magic_name__ )
self.assertTrue(check_models_equal(__magic_name__ , __magic_name__ ) )
def __A ( self : Optional[int] ) -> Dict:
SCREAMING_SNAKE_CASE_ = "bert"
SCREAMING_SNAKE_CASE_ = "hf-internal-testing/tiny-random-bert-subfolder"
with self.assertRaises(__magic_name__ ):
SCREAMING_SNAKE_CASE_ = FlaxBertModel.from_pretrained(__magic_name__ )
SCREAMING_SNAKE_CASE_ = FlaxBertModel.from_pretrained(__magic_name__ , subfolder=__magic_name__ )
self.assertIsNotNone(__magic_name__ )
def __A ( self : List[str] ) -> Dict:
SCREAMING_SNAKE_CASE_ = "bert"
SCREAMING_SNAKE_CASE_ = "hf-internal-testing/tiny-random-bert-sharded-subfolder"
with self.assertRaises(__magic_name__ ):
SCREAMING_SNAKE_CASE_ = FlaxBertModel.from_pretrained(__magic_name__ )
SCREAMING_SNAKE_CASE_ = FlaxBertModel.from_pretrained(__magic_name__ , subfolder=__magic_name__ )
self.assertIsNotNone(__magic_name__ )
| 305
| 1
|
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class lowerCamelCase (SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
lowerCamelCase__ = ['''image_processor''', '''tokenizer''']
lowerCamelCase__ = '''ViltImageProcessor'''
lowerCamelCase__ = ('''BertTokenizer''', '''BertTokenizerFast''')
def __init__( self : Optional[int] , __magic_name__ : str=None , __magic_name__ : List[str]=None , **__magic_name__ : Any ) -> str:
SCREAMING_SNAKE_CASE_ = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , __magic_name__ , )
SCREAMING_SNAKE_CASE_ = kwargs.pop("feature_extractor" )
SCREAMING_SNAKE_CASE_ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(__magic_name__ , __magic_name__ )
SCREAMING_SNAKE_CASE_ = self.image_processor
def __call__( self : List[str] , __magic_name__ : List[str] , __magic_name__ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , __magic_name__ : bool = True , __magic_name__ : Union[bool, str, PaddingStrategy] = False , __magic_name__ : Union[bool, str, TruncationStrategy] = None , __magic_name__ : Optional[int] = None , __magic_name__ : int = 0 , __magic_name__ : Optional[int] = None , __magic_name__ : Optional[bool] = None , __magic_name__ : Optional[bool] = None , __magic_name__ : bool = False , __magic_name__ : bool = False , __magic_name__ : bool = False , __magic_name__ : bool = False , __magic_name__ : bool = True , __magic_name__ : Optional[Union[str, TensorType]] = None , **__magic_name__ : str , ) -> BatchEncoding:
SCREAMING_SNAKE_CASE_ = self.tokenizer(
text=__magic_name__ , add_special_tokens=__magic_name__ , padding=__magic_name__ , truncation=__magic_name__ , max_length=__magic_name__ , stride=__magic_name__ , pad_to_multiple_of=__magic_name__ , return_token_type_ids=__magic_name__ , return_attention_mask=__magic_name__ , return_overflowing_tokens=__magic_name__ , return_special_tokens_mask=__magic_name__ , return_offsets_mapping=__magic_name__ , return_length=__magic_name__ , verbose=__magic_name__ , return_tensors=__magic_name__ , **__magic_name__ , )
# add pixel_values + pixel_mask
SCREAMING_SNAKE_CASE_ = self.image_processor(__magic_name__ , return_tensors=__magic_name__ )
encoding.update(__magic_name__ )
return encoding
def __A ( self : Optional[int] , *__magic_name__ : List[Any] , **__magic_name__ : Optional[Any] ) -> Any:
return self.tokenizer.batch_decode(*__magic_name__ , **__magic_name__ )
def __A ( self : Dict , *__magic_name__ : List[Any] , **__magic_name__ : Union[str, Any] ) -> str:
return self.tokenizer.decode(*__magic_name__ , **__magic_name__ )
@property
def __A ( self : Optional[int] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = self.tokenizer.model_input_names
SCREAMING_SNAKE_CASE_ = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def __A ( self : Dict ) -> List[Any]:
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , __magic_name__ , )
return self.image_processor_class
@property
def __A ( self : int ) -> List[Any]:
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , __magic_name__ , )
return self.image_processor
| 305
|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.utils import ComputeEnvironment
from .cluster import get_cluster_input
from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
from .sagemaker import get_sagemaker_input
A : Union[str, Any] = "Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine"
def a__ ( ):
SCREAMING_SNAKE_CASE_ = _ask_options(
"In which compute environment are you running?" , ["This machine", "AWS (Amazon SageMaker)"] , _convert_compute_environment , )
if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
SCREAMING_SNAKE_CASE_ = get_sagemaker_input()
else:
SCREAMING_SNAKE_CASE_ = get_cluster_input()
return config
def a__ ( __UpperCamelCase=None ):
if subparsers is not None:
SCREAMING_SNAKE_CASE_ = subparsers.add_parser("config" , description=__UpperCamelCase )
else:
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser("Accelerate config command" , description=__UpperCamelCase )
parser.add_argument(
"--config_file" , default=__UpperCamelCase , help=(
"The path to use to store the config file. Will default to a file named default_config.yaml in the cache "
"location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have "
"such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed "
"with 'huggingface'."
) , )
if subparsers is not None:
parser.set_defaults(func=__UpperCamelCase )
return parser
def a__ ( __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = get_user_input()
if args.config_file is not None:
SCREAMING_SNAKE_CASE_ = args.config_file
else:
if not os.path.isdir(__UpperCamelCase ):
os.makedirs(__UpperCamelCase )
SCREAMING_SNAKE_CASE_ = default_yaml_config_file
if config_file.endswith(".json" ):
config.to_json_file(__UpperCamelCase )
else:
config.to_yaml_file(__UpperCamelCase )
print(F'''accelerate configuration saved at {config_file}''' )
def a__ ( ):
SCREAMING_SNAKE_CASE_ = config_command_parser()
SCREAMING_SNAKE_CASE_ = parser.parse_args()
config_command(__UpperCamelCase )
if __name__ == "__main__":
main()
| 305
| 1
|
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import torch
class lowerCamelCase (TensorFormatter[Mapping, '''torch.Tensor''', Mapping] ):
"""simple docstring"""
def __init__( self : Optional[int] , __magic_name__ : Optional[int]=None , **__magic_name__ : Tuple ) -> str:
super().__init__(features=__magic_name__ )
SCREAMING_SNAKE_CASE_ = torch_tensor_kwargs
import torch # noqa import torch at initialization
def __A ( self : Any , __magic_name__ : str ) -> List[Any]:
import torch
if isinstance(__magic_name__ , __magic_name__ ) and column:
if all(
isinstance(__magic_name__ , torch.Tensor ) and x.shape == column[0].shape and x.dtype == column[0].dtype
for x in column ):
return torch.stack(__magic_name__ )
return column
def __A ( self : int , __magic_name__ : str ) -> Tuple:
import torch
if isinstance(__magic_name__ , (str, bytes, type(__magic_name__ )) ):
return value
elif isinstance(__magic_name__ , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
SCREAMING_SNAKE_CASE_ = {}
if isinstance(__magic_name__ , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
SCREAMING_SNAKE_CASE_ = {"dtype": torch.intaa}
elif isinstance(__magic_name__ , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
SCREAMING_SNAKE_CASE_ = {"dtype": torch.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(__magic_name__ , PIL.Image.Image ):
SCREAMING_SNAKE_CASE_ = np.asarray(__magic_name__ )
return torch.tensor(__magic_name__ , **{**default_dtype, **self.torch_tensor_kwargs} )
def __A ( self : List[str] , __magic_name__ : List[Any] ) -> Any:
import torch
# support for torch, tf, jax etc.
if hasattr(__magic_name__ , "__array__" ) and not isinstance(__magic_name__ , torch.Tensor ):
SCREAMING_SNAKE_CASE_ = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(__magic_name__ , np.ndarray ):
if data_struct.dtype == object: # torch tensors cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(__magic_name__ ) for substruct in data_struct] )
elif isinstance(__magic_name__ , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(__magic_name__ ) for substruct in data_struct] )
return self._tensorize(__magic_name__ )
def __A ( self : Any , __magic_name__ : dict ) -> Tuple:
return map_nested(self._recursive_tensorize , __magic_name__ , map_list=__magic_name__ )
def __A ( self : Tuple , __magic_name__ : pa.Table ) -> Mapping:
SCREAMING_SNAKE_CASE_ = self.numpy_arrow_extractor().extract_row(__magic_name__ )
SCREAMING_SNAKE_CASE_ = self.python_features_decoder.decode_row(__magic_name__ )
return self.recursive_tensorize(__magic_name__ )
def __A ( self : Optional[Any] , __magic_name__ : pa.Table ) -> "torch.Tensor":
SCREAMING_SNAKE_CASE_ = self.numpy_arrow_extractor().extract_column(__magic_name__ )
SCREAMING_SNAKE_CASE_ = self.python_features_decoder.decode_column(__magic_name__ , pa_table.column_names[0] )
SCREAMING_SNAKE_CASE_ = self.recursive_tensorize(__magic_name__ )
SCREAMING_SNAKE_CASE_ = self._consolidate(__magic_name__ )
return column
def __A ( self : int , __magic_name__ : pa.Table ) -> Mapping:
SCREAMING_SNAKE_CASE_ = self.numpy_arrow_extractor().extract_batch(__magic_name__ )
SCREAMING_SNAKE_CASE_ = self.python_features_decoder.decode_batch(__magic_name__ )
SCREAMING_SNAKE_CASE_ = self.recursive_tensorize(__magic_name__ )
for column_name in batch:
SCREAMING_SNAKE_CASE_ = self._consolidate(batch[column_name] )
return batch
| 305
|
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=SCREAMING_SNAKE_CASE__ )
class lowerCamelCase (SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
lowerCamelCase__ = field(default='''summarization''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
lowerCamelCase__ = Features({'''text''': Value('''string''' )} )
lowerCamelCase__ = Features({'''summary''': Value('''string''' )} )
lowerCamelCase__ = "text"
lowerCamelCase__ = "summary"
@property
def __A ( self : Dict ) -> Dict[str, str]:
return {self.text_column: "text", self.summary_column: "summary"}
| 305
| 1
|
from sympy import diff, lambdify, symbols
from sympy.functions import * # noqa: F403
def a__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = "x" , __UpperCamelCase = 1_0**-1_0 , __UpperCamelCase = 1 , ):
SCREAMING_SNAKE_CASE_ = symbols(__UpperCamelCase )
SCREAMING_SNAKE_CASE_ = lambdify(__UpperCamelCase , __UpperCamelCase )
SCREAMING_SNAKE_CASE_ = lambdify(__UpperCamelCase , diff(__UpperCamelCase , __UpperCamelCase ) )
SCREAMING_SNAKE_CASE_ = starting_point
while True:
if diff_function(__UpperCamelCase ) != 0:
SCREAMING_SNAKE_CASE_ = prev_guess - multiplicity * func(__UpperCamelCase ) / diff_function(
__UpperCamelCase )
else:
raise ZeroDivisionError("Could not find root" ) from None
# Precision is checked by comparing the difference of consecutive guesses
if abs(next_guess - prev_guess ) < precision:
return next_guess
SCREAMING_SNAKE_CASE_ = next_guess
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(f"The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}")
# Find root of polynomial
# Find fourth Root of 5
print(f"The root of x**4 - 5 = 0 is {newton_raphson('x**4 -5', 0.4 +5J)}")
# Find value of e
print(
"The root of log(y) - 1 = 0 is ",
f"{newton_raphson('log(y) - 1', 2, variable='y')}",
)
# Exponential Roots
print(
"The root of exp(x) - 1 = 0 is",
f"{newton_raphson('exp(x) - 1', 10, precision=0.0_05)}",
)
# Find root of cos(x)
print(f"The root of cos(x) = 0 is {newton_raphson('cos(x)', 0)}")
| 305
|
from ....utils import logging
A : List[str] = logging.get_logger(__name__)
class lowerCamelCase (SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def __init__( self : List[str] , __magic_name__ : Optional[Any] , __magic_name__ : Any=None , __magic_name__ : List[str]=2_048 ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = config.__dict__
SCREAMING_SNAKE_CASE_ = modal_hidden_size
if num_labels:
SCREAMING_SNAKE_CASE_ = num_labels
| 305
| 1
|
import argparse
from transformers import TaConfig, TaForConditionalGeneration, load_tf_weights_in_ta
from transformers.utils import logging
logging.set_verbosity_info()
def a__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
# Initialise PyTorch model
SCREAMING_SNAKE_CASE_ = TaConfig.from_json_file(__UpperCamelCase )
print(F'''Building PyTorch model from configuration: {config}''' )
SCREAMING_SNAKE_CASE_ = TaForConditionalGeneration(__UpperCamelCase )
# Load weights from tf checkpoint
load_tf_weights_in_ta(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
model.save_pretrained(__UpperCamelCase )
if __name__ == "__main__":
A : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
A : Dict = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 305
|
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class lowerCamelCase (SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
lowerCamelCase__ = ['''image_processor''', '''tokenizer''']
lowerCamelCase__ = '''ViltImageProcessor'''
lowerCamelCase__ = ('''BertTokenizer''', '''BertTokenizerFast''')
def __init__( self : Optional[int] , __magic_name__ : str=None , __magic_name__ : List[str]=None , **__magic_name__ : Any ) -> str:
SCREAMING_SNAKE_CASE_ = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , __magic_name__ , )
SCREAMING_SNAKE_CASE_ = kwargs.pop("feature_extractor" )
SCREAMING_SNAKE_CASE_ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(__magic_name__ , __magic_name__ )
SCREAMING_SNAKE_CASE_ = self.image_processor
def __call__( self : List[str] , __magic_name__ : List[str] , __magic_name__ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , __magic_name__ : bool = True , __magic_name__ : Union[bool, str, PaddingStrategy] = False , __magic_name__ : Union[bool, str, TruncationStrategy] = None , __magic_name__ : Optional[int] = None , __magic_name__ : int = 0 , __magic_name__ : Optional[int] = None , __magic_name__ : Optional[bool] = None , __magic_name__ : Optional[bool] = None , __magic_name__ : bool = False , __magic_name__ : bool = False , __magic_name__ : bool = False , __magic_name__ : bool = False , __magic_name__ : bool = True , __magic_name__ : Optional[Union[str, TensorType]] = None , **__magic_name__ : str , ) -> BatchEncoding:
SCREAMING_SNAKE_CASE_ = self.tokenizer(
text=__magic_name__ , add_special_tokens=__magic_name__ , padding=__magic_name__ , truncation=__magic_name__ , max_length=__magic_name__ , stride=__magic_name__ , pad_to_multiple_of=__magic_name__ , return_token_type_ids=__magic_name__ , return_attention_mask=__magic_name__ , return_overflowing_tokens=__magic_name__ , return_special_tokens_mask=__magic_name__ , return_offsets_mapping=__magic_name__ , return_length=__magic_name__ , verbose=__magic_name__ , return_tensors=__magic_name__ , **__magic_name__ , )
# add pixel_values + pixel_mask
SCREAMING_SNAKE_CASE_ = self.image_processor(__magic_name__ , return_tensors=__magic_name__ )
encoding.update(__magic_name__ )
return encoding
def __A ( self : Optional[int] , *__magic_name__ : List[Any] , **__magic_name__ : Optional[Any] ) -> Any:
return self.tokenizer.batch_decode(*__magic_name__ , **__magic_name__ )
def __A ( self : Dict , *__magic_name__ : List[Any] , **__magic_name__ : Union[str, Any] ) -> str:
return self.tokenizer.decode(*__magic_name__ , **__magic_name__ )
@property
def __A ( self : Optional[int] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = self.tokenizer.model_input_names
SCREAMING_SNAKE_CASE_ = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def __A ( self : Dict ) -> List[Any]:
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , __magic_name__ , )
return self.image_processor_class
@property
def __A ( self : int ) -> List[Any]:
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , __magic_name__ , )
return self.image_processor
| 305
| 1
|
import warnings
from ...utils import logging
from .image_processing_beit import BeitImageProcessor
A : Any = logging.get_logger(__name__)
class lowerCamelCase (SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def __init__( self : Optional[int] , *__magic_name__ : Tuple , **__magic_name__ : Optional[int] ) -> None:
warnings.warn(
"The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use BeitImageProcessor instead." , __magic_name__ , )
super().__init__(*__magic_name__ , **__magic_name__ )
| 305
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
A : str = logging.get_logger(__name__)
A : Optional[int] = {
"microsoft/table-transformer-detection": (
"https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json"
),
}
class lowerCamelCase (SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
lowerCamelCase__ = '''table-transformer'''
lowerCamelCase__ = ['''past_key_values''']
lowerCamelCase__ = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
}
def __init__( self : List[Any] , __magic_name__ : Optional[Any]=True , __magic_name__ : Dict=None , __magic_name__ : Any=3 , __magic_name__ : List[str]=100 , __magic_name__ : Union[str, Any]=6 , __magic_name__ : Dict=2_048 , __magic_name__ : str=8 , __magic_name__ : int=6 , __magic_name__ : List[Any]=2_048 , __magic_name__ : Optional[int]=8 , __magic_name__ : Optional[int]=0.0 , __magic_name__ : List[Any]=0.0 , __magic_name__ : Optional[Any]=True , __magic_name__ : List[Any]="relu" , __magic_name__ : List[str]=256 , __magic_name__ : List[str]=0.1 , __magic_name__ : int=0.0 , __magic_name__ : Optional[Any]=0.0 , __magic_name__ : Tuple=0.02 , __magic_name__ : str=1.0 , __magic_name__ : int=False , __magic_name__ : Dict="sine" , __magic_name__ : Union[str, Any]="resnet50" , __magic_name__ : Optional[Any]=True , __magic_name__ : str=False , __magic_name__ : List[str]=1 , __magic_name__ : int=5 , __magic_name__ : Union[str, Any]=2 , __magic_name__ : Tuple=1 , __magic_name__ : Optional[int]=1 , __magic_name__ : Optional[Any]=5 , __magic_name__ : Optional[int]=2 , __magic_name__ : Union[str, Any]=0.1 , **__magic_name__ : Tuple , ) -> str:
if backbone_config is not None and use_timm_backbone:
raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`." )
if not use_timm_backbone:
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
SCREAMING_SNAKE_CASE_ = CONFIG_MAPPING["resnet"](out_features=["stage4"] )
elif isinstance(__magic_name__ , __magic_name__ ):
SCREAMING_SNAKE_CASE_ = backbone_config.get("model_type" )
SCREAMING_SNAKE_CASE_ = CONFIG_MAPPING[backbone_model_type]
SCREAMING_SNAKE_CASE_ = config_class.from_dict(__magic_name__ )
# set timm attributes to None
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None, None, None
SCREAMING_SNAKE_CASE_ = use_timm_backbone
SCREAMING_SNAKE_CASE_ = backbone_config
SCREAMING_SNAKE_CASE_ = num_channels
SCREAMING_SNAKE_CASE_ = num_queries
SCREAMING_SNAKE_CASE_ = d_model
SCREAMING_SNAKE_CASE_ = encoder_ffn_dim
SCREAMING_SNAKE_CASE_ = encoder_layers
SCREAMING_SNAKE_CASE_ = encoder_attention_heads
SCREAMING_SNAKE_CASE_ = decoder_ffn_dim
SCREAMING_SNAKE_CASE_ = decoder_layers
SCREAMING_SNAKE_CASE_ = decoder_attention_heads
SCREAMING_SNAKE_CASE_ = dropout
SCREAMING_SNAKE_CASE_ = attention_dropout
SCREAMING_SNAKE_CASE_ = activation_dropout
SCREAMING_SNAKE_CASE_ = activation_function
SCREAMING_SNAKE_CASE_ = init_std
SCREAMING_SNAKE_CASE_ = init_xavier_std
SCREAMING_SNAKE_CASE_ = encoder_layerdrop
SCREAMING_SNAKE_CASE_ = decoder_layerdrop
SCREAMING_SNAKE_CASE_ = encoder_layers
SCREAMING_SNAKE_CASE_ = auxiliary_loss
SCREAMING_SNAKE_CASE_ = position_embedding_type
SCREAMING_SNAKE_CASE_ = backbone
SCREAMING_SNAKE_CASE_ = use_pretrained_backbone
SCREAMING_SNAKE_CASE_ = dilation
# Hungarian matcher
SCREAMING_SNAKE_CASE_ = class_cost
SCREAMING_SNAKE_CASE_ = bbox_cost
SCREAMING_SNAKE_CASE_ = giou_cost
# Loss coefficients
SCREAMING_SNAKE_CASE_ = mask_loss_coefficient
SCREAMING_SNAKE_CASE_ = dice_loss_coefficient
SCREAMING_SNAKE_CASE_ = bbox_loss_coefficient
SCREAMING_SNAKE_CASE_ = giou_loss_coefficient
SCREAMING_SNAKE_CASE_ = eos_coefficient
super().__init__(is_encoder_decoder=__magic_name__ , **__magic_name__ )
@property
def __A ( self : Union[str, Any] ) -> int:
return self.encoder_attention_heads
@property
def __A ( self : Any ) -> int:
return self.d_model
class lowerCamelCase (SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
lowerCamelCase__ = version.parse('''1.11''' )
@property
def __A ( self : Tuple ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
("pixel_mask", {0: "batch"}),
] )
@property
def __A ( self : Any ) -> float:
return 1e-5
@property
def __A ( self : int ) -> int:
return 12
| 305
| 1
|
import inspect
import unittest
from transformers import YolosConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import YolosForObjectDetection, YolosModel
from transformers.models.yolos.modeling_yolos import YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowerCamelCase :
"""simple docstring"""
def __init__( self : str , __magic_name__ : Dict , __magic_name__ : str=13 , __magic_name__ : Dict=[30, 30] , __magic_name__ : Any=2 , __magic_name__ : Optional[int]=3 , __magic_name__ : Dict=True , __magic_name__ : Tuple=True , __magic_name__ : Union[str, Any]=32 , __magic_name__ : Optional[Any]=5 , __magic_name__ : List[str]=4 , __magic_name__ : Any=37 , __magic_name__ : Union[str, Any]="gelu" , __magic_name__ : Tuple=0.1 , __magic_name__ : Tuple=0.1 , __magic_name__ : Optional[Any]=10 , __magic_name__ : str=0.02 , __magic_name__ : int=3 , __magic_name__ : Union[str, Any]=None , __magic_name__ : Optional[int]=8 , __magic_name__ : int=10 , ) -> Any:
SCREAMING_SNAKE_CASE_ = parent
SCREAMING_SNAKE_CASE_ = batch_size
SCREAMING_SNAKE_CASE_ = image_size
SCREAMING_SNAKE_CASE_ = patch_size
SCREAMING_SNAKE_CASE_ = num_channels
SCREAMING_SNAKE_CASE_ = is_training
SCREAMING_SNAKE_CASE_ = use_labels
SCREAMING_SNAKE_CASE_ = hidden_size
SCREAMING_SNAKE_CASE_ = num_hidden_layers
SCREAMING_SNAKE_CASE_ = num_attention_heads
SCREAMING_SNAKE_CASE_ = intermediate_size
SCREAMING_SNAKE_CASE_ = hidden_act
SCREAMING_SNAKE_CASE_ = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ = type_sequence_label_size
SCREAMING_SNAKE_CASE_ = initializer_range
SCREAMING_SNAKE_CASE_ = num_labels
SCREAMING_SNAKE_CASE_ = scope
SCREAMING_SNAKE_CASE_ = n_targets
SCREAMING_SNAKE_CASE_ = num_detection_tokens
# we set the expected sequence length (which is used in several tests)
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) + num_detection_tokens
SCREAMING_SNAKE_CASE_ = (image_size[1] // patch_size) * (image_size[0] // patch_size)
SCREAMING_SNAKE_CASE_ = num_patches + 1 + self.num_detection_tokens
def __A ( self : Dict ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = floats_tensor([self.batch_size, self.num_channels, self.image_size[0], self.image_size[1]] )
SCREAMING_SNAKE_CASE_ = None
if self.use_labels:
# labels is a list of Dict (each Dict being the labels for a given example in the batch)
SCREAMING_SNAKE_CASE_ = []
for i in range(self.batch_size ):
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = torch.randint(
high=self.num_labels , size=(self.n_targets,) , device=__magic_name__ )
SCREAMING_SNAKE_CASE_ = torch.rand(self.n_targets , 4 , device=__magic_name__ )
labels.append(__magic_name__ )
SCREAMING_SNAKE_CASE_ = self.get_config()
return config, pixel_values, labels
def __A ( self : List[Any] ) -> List[Any]:
return YolosConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__magic_name__ , initializer_range=self.initializer_range , num_detection_tokens=self.num_detection_tokens , num_labels=self.num_labels , )
def __A ( self : Dict , __magic_name__ : Any , __magic_name__ : Dict , __magic_name__ : List[Any] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = YolosModel(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
SCREAMING_SNAKE_CASE_ = model(__magic_name__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.expected_seq_len, self.hidden_size) )
def __A ( self : Dict , __magic_name__ : Any , __magic_name__ : Optional[int] , __magic_name__ : Any ) -> Dict:
SCREAMING_SNAKE_CASE_ = YolosForObjectDetection(__magic_name__ )
model.to(__magic_name__ )
model.eval()
SCREAMING_SNAKE_CASE_ = model(pixel_values=__magic_name__ )
SCREAMING_SNAKE_CASE_ = model(__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
SCREAMING_SNAKE_CASE_ = model(pixel_values=__magic_name__ , labels=__magic_name__ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
def __A ( self : List[str] ) -> Tuple:
SCREAMING_SNAKE_CASE_ = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = config_and_inputs
SCREAMING_SNAKE_CASE_ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class lowerCamelCase (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase__ = (YolosModel, YolosForObjectDetection) if is_torch_available() else ()
lowerCamelCase__ = (
{'''feature-extraction''': YolosModel, '''object-detection''': YolosForObjectDetection} if is_torch_available() else {}
)
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
def __A ( self : int , __magic_name__ : List[str] , __magic_name__ : List[Any] , __magic_name__ : str=False ) -> int:
SCREAMING_SNAKE_CASE_ = super()._prepare_for_class(__magic_name__ , __magic_name__ , return_labels=__magic_name__ )
if return_labels:
if model_class.__name__ == "YolosForObjectDetection":
SCREAMING_SNAKE_CASE_ = []
for i in range(self.model_tester.batch_size ):
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = torch.ones(
size=(self.model_tester.n_targets,) , device=__magic_name__ , dtype=torch.long )
SCREAMING_SNAKE_CASE_ = torch.ones(
self.model_tester.n_targets , 4 , device=__magic_name__ , dtype=torch.float )
labels.append(__magic_name__ )
SCREAMING_SNAKE_CASE_ = labels
return inputs_dict
def __A ( self : Optional[Any] ) -> List[str]:
SCREAMING_SNAKE_CASE_ = YolosModelTester(self )
SCREAMING_SNAKE_CASE_ = ConfigTester(self , config_class=__magic_name__ , has_text_modality=__magic_name__ , hidden_size=37 )
def __A ( self : Dict ) -> str:
self.config_tester.run_common_tests()
def __A ( self : str ) -> Tuple:
# YOLOS does not use inputs_embeds
pass
def __A ( self : List[str] ) -> Dict:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ = model_class(__magic_name__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
SCREAMING_SNAKE_CASE_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__magic_name__ , nn.Linear ) )
def __A ( self : Optional[int] ) -> List[Any]:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ = model_class(__magic_name__ )
SCREAMING_SNAKE_CASE_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE_ = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE_ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __magic_name__ )
def __A ( self : Tuple ) -> Any:
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__magic_name__ )
def __A ( self : Optional[Any] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ = True
# in YOLOS, the seq_len is different
SCREAMING_SNAKE_CASE_ = self.model_tester.expected_seq_len
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = model_class(__magic_name__ )
model.to(__magic_name__ )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = model(**self._prepare_for_class(__magic_name__ , __magic_name__ ) )
SCREAMING_SNAKE_CASE_ = outputs.attentions
self.assertEqual(len(__magic_name__ ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = model_class(__magic_name__ )
model.to(__magic_name__ )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = model(**self._prepare_for_class(__magic_name__ , __magic_name__ ) )
SCREAMING_SNAKE_CASE_ = outputs.attentions
self.assertEqual(len(__magic_name__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
SCREAMING_SNAKE_CASE_ = len(__magic_name__ )
# Check attention is always last and order is fine
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = model_class(__magic_name__ )
model.to(__magic_name__ )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = model(**self._prepare_for_class(__magic_name__ , __magic_name__ ) )
SCREAMING_SNAKE_CASE_ = 1
self.assertEqual(out_len + added_hidden_states , len(__magic_name__ ) )
SCREAMING_SNAKE_CASE_ = outputs.attentions
self.assertEqual(len(__magic_name__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def __A ( self : Dict ) -> str:
def check_hidden_states_output(__magic_name__ : List[str] , __magic_name__ : Tuple , __magic_name__ : Optional[int] ):
SCREAMING_SNAKE_CASE_ = model_class(__magic_name__ )
model.to(__magic_name__ )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = model(**self._prepare_for_class(__magic_name__ , __magic_name__ ) )
SCREAMING_SNAKE_CASE_ = outputs.hidden_states
SCREAMING_SNAKE_CASE_ = getattr(
self.model_tester , "expected_num_hidden_layers" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(__magic_name__ ) , __magic_name__ )
# YOLOS has a different seq_length
SCREAMING_SNAKE_CASE_ = self.model_tester.expected_seq_len
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ = True
check_hidden_states_output(__magic_name__ , __magic_name__ , __magic_name__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE_ = True
check_hidden_states_output(__magic_name__ , __magic_name__ , __magic_name__ )
def __A ( self : Optional[int] ) -> str:
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_object_detection(*__magic_name__ )
@slow
def __A ( self : Any ) -> Optional[Any]:
for model_name in YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE_ = YolosModel.from_pretrained(__magic_name__ )
self.assertIsNotNone(__magic_name__ )
def a__ ( ):
SCREAMING_SNAKE_CASE_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class lowerCamelCase (unittest.TestCase ):
"""simple docstring"""
@cached_property
def __A ( self : Dict ) -> Optional[Any]:
return AutoImageProcessor.from_pretrained("hustvl/yolos-small" ) if is_vision_available() else None
@slow
def __A ( self : Any ) -> List[str]:
SCREAMING_SNAKE_CASE_ = YolosForObjectDetection.from_pretrained("hustvl/yolos-small" ).to(__magic_name__ )
SCREAMING_SNAKE_CASE_ = self.default_image_processor
SCREAMING_SNAKE_CASE_ = prepare_img()
SCREAMING_SNAKE_CASE_ = image_processor(images=__magic_name__ , return_tensors="pt" ).to(__magic_name__ )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = model(inputs.pixel_values )
# verify outputs
SCREAMING_SNAKE_CASE_ = torch.Size((1, 100, 92) )
self.assertEqual(outputs.logits.shape , __magic_name__ )
SCREAMING_SNAKE_CASE_ = torch.tensor(
[[-24.0248, -10.3024, -14.8290], [-42.0392, -16.8200, -27.4334], [-27.2743, -11.8154, -18.7148]] , device=__magic_name__ , )
SCREAMING_SNAKE_CASE_ = torch.tensor(
[[0.2559, 0.5455, 0.4706], [0.2989, 0.7279, 0.1875], [0.7732, 0.4017, 0.4462]] , device=__magic_name__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , __magic_name__ , atol=1e-4 ) )
self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3] , __magic_name__ , atol=1e-4 ) )
# verify postprocessing
SCREAMING_SNAKE_CASE_ = image_processor.post_process_object_detection(
__magic_name__ , threshold=0.3 , target_sizes=[image.size[::-1]] )[0]
SCREAMING_SNAKE_CASE_ = torch.tensor([0.9994, 0.9790, 0.9964, 0.9972, 0.9861] ).to(__magic_name__ )
SCREAMING_SNAKE_CASE_ = [75, 75, 17, 63, 17]
SCREAMING_SNAKE_CASE_ = torch.tensor([335.0609, 79.3848, 375.4216, 187.2495] ).to(__magic_name__ )
self.assertEqual(len(results["scores"] ) , 5 )
self.assertTrue(torch.allclose(results["scores"] , __magic_name__ , atol=1e-4 ) )
self.assertSequenceEqual(results["labels"].tolist() , __magic_name__ )
self.assertTrue(torch.allclose(results["boxes"][0, :] , __magic_name__ ) )
| 305
|
import warnings
from diffusers import StableDiffusionImgaImgPipeline # noqa F401
warnings.warn(
"The `image_to_image.py` script is outdated. Please use directly `from diffusers import"
" StableDiffusionImg2ImgPipeline` instead."
)
| 305
| 1
|
import argparse
import os
import re
import torch
from flax.traverse_util import flatten_dict
from tax import checkpoints
from transformers import (
AutoTokenizer,
PixaStructConfig,
PixaStructForConditionalGeneration,
PixaStructImageProcessor,
PixaStructProcessor,
PixaStructTextConfig,
PixaStructVisionConfig,
)
def a__ ( __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = checkpoints.load_tax_checkpoint(__UpperCamelCase )
SCREAMING_SNAKE_CASE_ = flatten_dict(__UpperCamelCase )
return flax_params
def a__ ( __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = {
"token_embedder": "embeddings",
"encoder_norm": "layernorm",
"kernel": "weight",
".out": ".output",
"scale": "weight",
"embedders_0.pos_embedding": "row_embedder.weight",
"embedders_1.pos_embedding": "column_embedder.weight",
}
SCREAMING_SNAKE_CASE_ = {
"query": "attention.query",
"key": "attention.key",
"value": "attention.value",
"output.dense": "output",
"encoder_decoder_attention.o": "encoder_decoder_attention.attention.o",
"pre_self_attention_layer_norm": "self_attention.layer_norm",
"pre_cross_attention_layer_norm": "encoder_decoder_attention.layer_norm",
"mlp.": "mlp.DenseReluDense.",
"pre_mlp_layer_norm": "mlp.layer_norm",
"self_attention.o": "self_attention.attention.o",
"decoder.embeddings.embedding": "decoder.embed_tokens.weight",
"decoder.relpos_bias.rel_embedding": "decoder.layer.0.self_attention.attention.relative_attention_bias.weight",
"decoder.decoder_norm.weight": "decoder.final_layer_norm.weight",
"decoder.logits_dense.weight": "decoder.lm_head.weight",
}
for key in flax_dict.keys():
if "target" in key:
# remove the first prefix from the key
SCREAMING_SNAKE_CASE_ = ".".join(key[1:] )
# rename the key
for old, new in CONVERSION_MAPPING.items():
SCREAMING_SNAKE_CASE_ = new_key.replace(__UpperCamelCase , __UpperCamelCase )
if "decoder" in new_key:
for old, new in DECODER_CONVERSION_MAPPING.items():
SCREAMING_SNAKE_CASE_ = new_key.replace(__UpperCamelCase , __UpperCamelCase )
if "layers" in new_key and "decoder" not in new_key:
# use regex to replace the layer number
SCREAMING_SNAKE_CASE_ = re.sub(r"layers_(\d+)" , r"layer.\1" , __UpperCamelCase )
SCREAMING_SNAKE_CASE_ = new_key.replace("encoder" , "encoder.encoder" )
elif "layers" in new_key and "decoder" in new_key:
# use regex to replace the layer number
SCREAMING_SNAKE_CASE_ = re.sub(r"layers_(\d+)" , r"layer.\1" , __UpperCamelCase )
SCREAMING_SNAKE_CASE_ = flax_dict[key]
SCREAMING_SNAKE_CASE_ = {}
# convert converted_dict into torch format
for key in converted_dict.keys():
if ("embed_tokens" not in key) and ("embedder" not in key):
SCREAMING_SNAKE_CASE_ = torch.from_numpy(converted_dict[key].T )
else:
SCREAMING_SNAKE_CASE_ = torch.from_numpy(converted_dict[key] )
return converted_torch_dict
def a__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=False , __UpperCamelCase=False ):
SCREAMING_SNAKE_CASE_ = get_flax_param(__UpperCamelCase )
if not use_large:
SCREAMING_SNAKE_CASE_ = PixaStructVisionConfig()
SCREAMING_SNAKE_CASE_ = PixaStructTextConfig()
else:
SCREAMING_SNAKE_CASE_ = PixaStructVisionConfig(
hidden_size=1_5_3_6 , d_ff=3_9_6_8 , num_attention_heads=2_4 , num_hidden_layers=1_8 )
SCREAMING_SNAKE_CASE_ = PixaStructTextConfig(hidden_size=1_5_3_6 , d_ff=3_9_6_8 , num_heads=2_4 , num_layers=1_8 )
SCREAMING_SNAKE_CASE_ = PixaStructConfig(
vision_config=encoder_config.to_dict() , text_config=decoder_config.to_dict() , is_vqa=__UpperCamelCase )
SCREAMING_SNAKE_CASE_ = PixaStructForConditionalGeneration(__UpperCamelCase )
SCREAMING_SNAKE_CASE_ = rename_and_convert_flax_params(__UpperCamelCase )
model.load_state_dict(__UpperCamelCase )
SCREAMING_SNAKE_CASE_ = AutoTokenizer.from_pretrained("ybelkada/test-pix2struct-tokenizer" )
SCREAMING_SNAKE_CASE_ = PixaStructImageProcessor()
SCREAMING_SNAKE_CASE_ = PixaStructProcessor(image_processor=__UpperCamelCase , tokenizer=__UpperCamelCase )
if use_large:
SCREAMING_SNAKE_CASE_ = 4_0_9_6
SCREAMING_SNAKE_CASE_ = True
# mkdir if needed
os.makedirs(__UpperCamelCase , exist_ok=__UpperCamelCase )
model.save_pretrained(__UpperCamelCase )
processor.save_pretrained(__UpperCamelCase )
print("Model saved in {}".format(__UpperCamelCase ) )
if __name__ == "__main__":
A : Any = argparse.ArgumentParser()
parser.add_argument("--t5x_checkpoint_path", default=None, type=str, help="Path to the original T5x checkpoint.")
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--use_large", action="store_true", help="Use large model.")
parser.add_argument("--is_vqa", action="store_true", help="Use large model.")
A : Dict = parser.parse_args()
convert_pixastruct_original_pytorch_checkpoint_to_hf(
args.tax_checkpoint_path, args.pytorch_dump_folder_path, args.use_large
)
| 305
|
from __future__ import annotations
import numpy as np
def a__ ( __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = np.shape(__UpperCamelCase )
if rows != columns:
SCREAMING_SNAKE_CASE_ = (
"'table' has to be of square shaped array but got a "
F'''{rows}x{columns} array:\n{table}'''
)
raise ValueError(__UpperCamelCase )
SCREAMING_SNAKE_CASE_ = np.zeros((rows, columns) )
SCREAMING_SNAKE_CASE_ = np.zeros((rows, columns) )
for i in range(__UpperCamelCase ):
for j in range(__UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = sum(lower[i][k] * upper[k][j] for k in range(__UpperCamelCase ) )
if upper[j][j] == 0:
raise ArithmeticError("No LU decomposition exists" )
SCREAMING_SNAKE_CASE_ = (table[i][j] - total) / upper[j][j]
SCREAMING_SNAKE_CASE_ = 1
for j in range(__UpperCamelCase , __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = sum(lower[i][k] * upper[k][j] for k in range(__UpperCamelCase ) )
SCREAMING_SNAKE_CASE_ = table[i][j] - total
return lower, upper
if __name__ == "__main__":
import doctest
doctest.testmod()
| 305
| 1
|
import unittest
from transformers.testing_utils import CaptureStdout
from transformers.tools.python_interpreter import evaluate
def a__ ( __UpperCamelCase ):
return x + 2
class lowerCamelCase (unittest.TestCase ):
"""simple docstring"""
def __A ( self : List[Any] ) -> int:
SCREAMING_SNAKE_CASE_ = "x = 3"
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = evaluate(__magic_name__ , {} , state=__magic_name__ )
assert result == 3
self.assertDictEqual(__magic_name__ , {"x": 3} )
SCREAMING_SNAKE_CASE_ = "x = y"
SCREAMING_SNAKE_CASE_ = {"y": 5}
SCREAMING_SNAKE_CASE_ = evaluate(__magic_name__ , {} , state=__magic_name__ )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(__magic_name__ , {"x": 5, "y": 5} )
def __A ( self : Union[str, Any] ) -> str:
SCREAMING_SNAKE_CASE_ = "y = add_two(x)"
SCREAMING_SNAKE_CASE_ = {"x": 3}
SCREAMING_SNAKE_CASE_ = evaluate(__magic_name__ , {"add_two": add_two} , state=__magic_name__ )
assert result == 5
self.assertDictEqual(__magic_name__ , {"x": 3, "y": 5} )
# Won't work without the tool
with CaptureStdout() as out:
SCREAMING_SNAKE_CASE_ = evaluate(__magic_name__ , {} , state=__magic_name__ )
assert result is None
assert "tried to execute add_two" in out.out
def __A ( self : List[str] ) -> int:
SCREAMING_SNAKE_CASE_ = "x = 3"
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = evaluate(__magic_name__ , {} , state=__magic_name__ )
assert result == 3
self.assertDictEqual(__magic_name__ , {"x": 3} )
def __A ( self : Optional[Any] ) -> str:
SCREAMING_SNAKE_CASE_ = "test_dict = {'x': x, 'y': add_two(x)}"
SCREAMING_SNAKE_CASE_ = {"x": 3}
SCREAMING_SNAKE_CASE_ = evaluate(__magic_name__ , {"add_two": add_two} , state=__magic_name__ )
self.assertDictEqual(__magic_name__ , {"x": 3, "y": 5} )
self.assertDictEqual(__magic_name__ , {"x": 3, "test_dict": {"x": 3, "y": 5}} )
def __A ( self : Optional[int] ) -> List[str]:
SCREAMING_SNAKE_CASE_ = "x = 3\ny = 5"
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = evaluate(__magic_name__ , {} , state=__magic_name__ )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(__magic_name__ , {"x": 3, "y": 5} )
def __A ( self : Any ) -> List[str]:
SCREAMING_SNAKE_CASE_ = "text = f'This is x: {x}.'"
SCREAMING_SNAKE_CASE_ = {"x": 3}
SCREAMING_SNAKE_CASE_ = evaluate(__magic_name__ , {} , state=__magic_name__ )
# evaluate returns the value of the last assignment.
assert result == "This is x: 3."
self.assertDictEqual(__magic_name__ , {"x": 3, "text": "This is x: 3."} )
def __A ( self : int ) -> Tuple:
SCREAMING_SNAKE_CASE_ = "if x <= 3:\n y = 2\nelse:\n y = 5"
SCREAMING_SNAKE_CASE_ = {"x": 3}
SCREAMING_SNAKE_CASE_ = evaluate(__magic_name__ , {} , state=__magic_name__ )
# evaluate returns the value of the last assignment.
assert result == 2
self.assertDictEqual(__magic_name__ , {"x": 3, "y": 2} )
SCREAMING_SNAKE_CASE_ = {"x": 8}
SCREAMING_SNAKE_CASE_ = evaluate(__magic_name__ , {} , state=__magic_name__ )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(__magic_name__ , {"x": 8, "y": 5} )
def __A ( self : str ) -> str:
SCREAMING_SNAKE_CASE_ = "test_list = [x, add_two(x)]"
SCREAMING_SNAKE_CASE_ = {"x": 3}
SCREAMING_SNAKE_CASE_ = evaluate(__magic_name__ , {"add_two": add_two} , state=__magic_name__ )
self.assertListEqual(__magic_name__ , [3, 5] )
self.assertDictEqual(__magic_name__ , {"x": 3, "test_list": [3, 5]} )
def __A ( self : Union[str, Any] ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = "y = x"
SCREAMING_SNAKE_CASE_ = {"x": 3}
SCREAMING_SNAKE_CASE_ = evaluate(__magic_name__ , {} , state=__magic_name__ )
assert result == 3
self.assertDictEqual(__magic_name__ , {"x": 3, "y": 3} )
def __A ( self : Tuple ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = "test_list = [x, add_two(x)]\ntest_list[1]"
SCREAMING_SNAKE_CASE_ = {"x": 3}
SCREAMING_SNAKE_CASE_ = evaluate(__magic_name__ , {"add_two": add_two} , state=__magic_name__ )
assert result == 5
self.assertDictEqual(__magic_name__ , {"x": 3, "test_list": [3, 5]} )
SCREAMING_SNAKE_CASE_ = "test_dict = {'x': x, 'y': add_two(x)}\ntest_dict['y']"
SCREAMING_SNAKE_CASE_ = {"x": 3}
SCREAMING_SNAKE_CASE_ = evaluate(__magic_name__ , {"add_two": add_two} , state=__magic_name__ )
assert result == 5
self.assertDictEqual(__magic_name__ , {"x": 3, "test_dict": {"x": 3, "y": 5}} )
def __A ( self : Tuple ) -> Any:
SCREAMING_SNAKE_CASE_ = "x = 0\nfor i in range(3):\n x = i"
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = evaluate(__magic_name__ , {"range": range} , state=__magic_name__ )
assert result == 2
self.assertDictEqual(__magic_name__ , {"x": 2, "i": 2} )
| 305
|
from math import pi, sqrt, tan
def a__ ( __UpperCamelCase ):
if side_length < 0:
raise ValueError("surface_area_cube() only accepts non-negative values" )
return 6 * side_length**2
def a__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
if length < 0 or breadth < 0 or height < 0:
raise ValueError("surface_area_cuboid() only accepts non-negative values" )
return 2 * ((length * breadth) + (breadth * height) + (length * height))
def a__ ( __UpperCamelCase ):
if radius < 0:
raise ValueError("surface_area_sphere() only accepts non-negative values" )
return 4 * pi * radius**2
def a__ ( __UpperCamelCase ):
if radius < 0:
raise ValueError("surface_area_hemisphere() only accepts non-negative values" )
return 3 * pi * radius**2
def a__ ( __UpperCamelCase , __UpperCamelCase ):
if radius < 0 or height < 0:
raise ValueError("surface_area_cone() only accepts non-negative values" )
return pi * radius * (radius + (height**2 + radius**2) ** 0.5)
def a__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
if radius_a < 0 or radius_a < 0 or height < 0:
raise ValueError(
"surface_area_conical_frustum() only accepts non-negative values" )
SCREAMING_SNAKE_CASE_ = (height**2 + (radius_a - radius_a) ** 2) ** 0.5
return pi * ((slant_height * (radius_a + radius_a)) + radius_a**2 + radius_a**2)
def a__ ( __UpperCamelCase , __UpperCamelCase ):
if radius < 0 or height < 0:
raise ValueError("surface_area_cylinder() only accepts non-negative values" )
return 2 * pi * radius * (height + radius)
def a__ ( __UpperCamelCase , __UpperCamelCase ):
if torus_radius < 0 or tube_radius < 0:
raise ValueError("surface_area_torus() only accepts non-negative values" )
if torus_radius < tube_radius:
raise ValueError(
"surface_area_torus() does not support spindle or self intersecting tori" )
return 4 * pow(__UpperCamelCase , 2 ) * torus_radius * tube_radius
def a__ ( __UpperCamelCase , __UpperCamelCase ):
if length < 0 or width < 0:
raise ValueError("area_rectangle() only accepts non-negative values" )
return length * width
def a__ ( __UpperCamelCase ):
if side_length < 0:
raise ValueError("area_square() only accepts non-negative values" )
return side_length**2
def a__ ( __UpperCamelCase , __UpperCamelCase ):
if base < 0 or height < 0:
raise ValueError("area_triangle() only accepts non-negative values" )
return (base * height) / 2
def a__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
if sidea < 0 or sidea < 0 or sidea < 0:
raise ValueError("area_triangle_three_sides() only accepts non-negative values" )
elif sidea + sidea < sidea or sidea + sidea < sidea or sidea + sidea < sidea:
raise ValueError("Given three sides do not form a triangle" )
SCREAMING_SNAKE_CASE_ = (sidea + sidea + sidea) / 2
SCREAMING_SNAKE_CASE_ = sqrt(
semi_perimeter
* (semi_perimeter - sidea)
* (semi_perimeter - sidea)
* (semi_perimeter - sidea) )
return area
def a__ ( __UpperCamelCase , __UpperCamelCase ):
if base < 0 or height < 0:
raise ValueError("area_parallelogram() only accepts non-negative values" )
return base * height
def a__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
if basea < 0 or basea < 0 or height < 0:
raise ValueError("area_trapezium() only accepts non-negative values" )
return 1 / 2 * (basea + basea) * height
def a__ ( __UpperCamelCase ):
if radius < 0:
raise ValueError("area_circle() only accepts non-negative values" )
return pi * radius**2
def a__ ( __UpperCamelCase , __UpperCamelCase ):
if radius_x < 0 or radius_y < 0:
raise ValueError("area_ellipse() only accepts non-negative values" )
return pi * radius_x * radius_y
def a__ ( __UpperCamelCase , __UpperCamelCase ):
if diagonal_a < 0 or diagonal_a < 0:
raise ValueError("area_rhombus() only accepts non-negative values" )
return 1 / 2 * diagonal_a * diagonal_a
def a__ ( __UpperCamelCase , __UpperCamelCase ):
if not isinstance(__UpperCamelCase , __UpperCamelCase ) or sides < 3:
raise ValueError(
"area_reg_polygon() only accepts integers greater than or \
equal to three as number of sides" )
elif length < 0:
raise ValueError(
"area_reg_polygon() only accepts non-negative values as \
length of a side" )
return (sides * length**2) / (4 * tan(pi / sides ))
return (sides * length**2) / (4 * tan(pi / sides ))
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True) # verbose so we can see methods missing tests
print("[DEMO] Areas of various geometric shapes: \n")
print(f"Rectangle: {area_rectangle(10, 20) = }")
print(f"Square: {area_square(10) = }")
print(f"Triangle: {area_triangle(10, 10) = }")
print(f"Triangle: {area_triangle_three_sides(5, 12, 13) = }")
print(f"Parallelogram: {area_parallelogram(10, 20) = }")
print(f"Rhombus: {area_rhombus(10, 20) = }")
print(f"Trapezium: {area_trapezium(10, 20, 30) = }")
print(f"Circle: {area_circle(20) = }")
print(f"Ellipse: {area_ellipse(10, 20) = }")
print("\nSurface Areas of various geometric shapes: \n")
print(f"Cube: {surface_area_cube(20) = }")
print(f"Cuboid: {surface_area_cuboid(10, 20, 30) = }")
print(f"Sphere: {surface_area_sphere(20) = }")
print(f"Hemisphere: {surface_area_hemisphere(20) = }")
print(f"Cone: {surface_area_cone(10, 20) = }")
print(f"Conical Frustum: {surface_area_conical_frustum(10, 20, 30) = }")
print(f"Cylinder: {surface_area_cylinder(10, 20) = }")
print(f"Torus: {surface_area_torus(20, 10) = }")
print(f"Equilateral Triangle: {area_reg_polygon(3, 10) = }")
print(f"Square: {area_reg_polygon(4, 10) = }")
print(f"Reqular Pentagon: {area_reg_polygon(5, 10) = }")
| 305
| 1
|
import logging
import math
from functools import partial
from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union
import torch
from .tensor_utils import tensor_tree_map, tree_map
def a__ ( __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = []
if isinstance(__UpperCamelCase , __UpperCamelCase ):
for v in tree.values():
shapes.extend(_fetch_dims(__UpperCamelCase ) )
elif isinstance(__UpperCamelCase , (list, tuple) ):
for t in tree:
shapes.extend(_fetch_dims(__UpperCamelCase ) )
elif isinstance(__UpperCamelCase , torch.Tensor ):
shapes.append(tree.shape )
else:
raise ValueError("Not supported" )
return shapes
@torch.jit.ignore
def a__ ( __UpperCamelCase , __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = []
for d in reversed(__UpperCamelCase ):
idx.append(flat_idx % d )
SCREAMING_SNAKE_CASE_ = flat_idx // d
return tuple(reversed(__UpperCamelCase ) )
@torch.jit.ignore
def a__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = None , ):
# start_edges and end_edges both indicate whether, starting from any given
# dimension, the start/end index is at the top/bottom edge of the
# corresponding tensor, modeled as a tree
def reduce_edge_list(__UpperCamelCase ) -> None:
SCREAMING_SNAKE_CASE_ = True
for i in range(len(__UpperCamelCase ) ):
SCREAMING_SNAKE_CASE_ = -1 * (i + 1)
l[reversed_idx] &= tally
SCREAMING_SNAKE_CASE_ = l[reversed_idx]
if start_edges is None:
SCREAMING_SNAKE_CASE_ = [s == 0 for s in start]
reduce_edge_list(__UpperCamelCase )
if end_edges is None:
SCREAMING_SNAKE_CASE_ = [e == (d - 1) for e, d in zip(__UpperCamelCase , __UpperCamelCase )]
reduce_edge_list(__UpperCamelCase )
# Base cases. Either start/end are empty and we're done, or the final,
# one-dimensional tensor can be simply sliced
if len(__UpperCamelCase ) == 0:
return [()]
elif len(__UpperCamelCase ) == 1:
return [(slice(start[0] , end[0] + 1 ),)]
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = []
# Dimensions common to start and end can be selected directly
for s, e in zip(__UpperCamelCase , __UpperCamelCase ):
if s == e:
path_list.append(slice(__UpperCamelCase , s + 1 ) )
else:
break
SCREAMING_SNAKE_CASE_ = tuple(__UpperCamelCase )
SCREAMING_SNAKE_CASE_ = len(__UpperCamelCase )
# start == end, and we're done
if divergence_idx == len(__UpperCamelCase ):
return [path]
def upper() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
SCREAMING_SNAKE_CASE_ = start[divergence_idx]
return tuple(
path + (slice(__UpperCamelCase , sdi + 1 ),) + s
for s in _get_minimal_slice_set(
start[divergence_idx + 1 :] , [d - 1 for d in dims[divergence_idx + 1 :]] , dims[divergence_idx + 1 :] , start_edges=start_edges[divergence_idx + 1 :] , end_edges=[True for _ in end_edges[divergence_idx + 1 :]] , ) )
def lower() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
SCREAMING_SNAKE_CASE_ = end[divergence_idx]
return tuple(
path + (slice(__UpperCamelCase , edi + 1 ),) + s
for s in _get_minimal_slice_set(
[0 for _ in start[divergence_idx + 1 :]] , end[divergence_idx + 1 :] , dims[divergence_idx + 1 :] , start_edges=[True for _ in start_edges[divergence_idx + 1 :]] , end_edges=end_edges[divergence_idx + 1 :] , ) )
# If both start and end are at the edges of the subtree rooted at
# divergence_idx, we can just select the whole subtree at once
if start_edges[divergence_idx] and end_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] + 1 ),) )
# If just start is at the edge, we can grab almost all of the subtree,
# treating only the ragged bottom edge as an edge case
elif start_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] ),) )
slices.extend(lower() )
# Analogous to the previous case, but the top is ragged this time
elif end_edges[divergence_idx]:
slices.extend(upper() )
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] + 1 ),) )
# If both sides of the range are ragged, we need to handle both sides
# separately. If there's contiguous meat in between them, we can index it
# in one big chunk
else:
slices.extend(upper() )
SCREAMING_SNAKE_CASE_ = end[divergence_idx] - start[divergence_idx]
if middle_ground > 1:
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] ),) )
slices.extend(lower() )
return slices
@torch.jit.ignore
def a__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = t.shape[:no_batch_dims]
SCREAMING_SNAKE_CASE_ = list(_flat_idx_to_idx(__UpperCamelCase , __UpperCamelCase ) )
# _get_minimal_slice_set is inclusive
SCREAMING_SNAKE_CASE_ = list(_flat_idx_to_idx(flat_end - 1 , __UpperCamelCase ) )
# Get an ordered list of slices to perform
SCREAMING_SNAKE_CASE_ = _get_minimal_slice_set(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , )
SCREAMING_SNAKE_CASE_ = [t[s] for s in slices]
return torch.cat([s.view((-1,) + t.shape[no_batch_dims:] ) for s in sliced_tensors] )
def a__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = False , __UpperCamelCase = None , __UpperCamelCase = False , ):
if not (len(__UpperCamelCase ) > 0):
raise ValueError("Must provide at least one input" )
SCREAMING_SNAKE_CASE_ = [shape[:no_batch_dims] for shape in _fetch_dims(__UpperCamelCase )]
SCREAMING_SNAKE_CASE_ = tuple([max(__UpperCamelCase ) for s in zip(*__UpperCamelCase )] )
def _prep_inputs(__UpperCamelCase ) -> torch.Tensor:
if not low_mem:
if not sum(t.shape[:no_batch_dims] ) == no_batch_dims:
SCREAMING_SNAKE_CASE_ = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
SCREAMING_SNAKE_CASE_ = t.reshape(-1 , *t.shape[no_batch_dims:] )
else:
SCREAMING_SNAKE_CASE_ = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
return t
SCREAMING_SNAKE_CASE_ = tensor_tree_map(_prep_inputs , __UpperCamelCase )
SCREAMING_SNAKE_CASE_ = None
if _out is not None:
SCREAMING_SNAKE_CASE_ = tensor_tree_map(lambda __UpperCamelCase : t.view([-1] + list(t.shape[no_batch_dims:] ) ) , _out )
SCREAMING_SNAKE_CASE_ = 1
for d in orig_batch_dims:
flat_batch_dim *= d
SCREAMING_SNAKE_CASE_ = flat_batch_dim // chunk_size + (flat_batch_dim % chunk_size != 0)
def _select_chunk(__UpperCamelCase ) -> torch.Tensor:
return t[i : i + chunk_size] if t.shape[0] != 1 else t
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = prepped_outputs
for _ in range(__UpperCamelCase ):
# Chunk the input
if not low_mem:
SCREAMING_SNAKE_CASE_ = _select_chunk
else:
SCREAMING_SNAKE_CASE_ = partial(
_chunk_slice , flat_start=__UpperCamelCase , flat_end=min(__UpperCamelCase , i + chunk_size ) , no_batch_dims=len(__UpperCamelCase ) , )
SCREAMING_SNAKE_CASE_ = tensor_tree_map(__UpperCamelCase , __UpperCamelCase )
# Run the layer on the chunk
SCREAMING_SNAKE_CASE_ = layer(**__UpperCamelCase )
# Allocate space for the output
if out is None:
SCREAMING_SNAKE_CASE_ = tensor_tree_map(lambda __UpperCamelCase : t.new_zeros((flat_batch_dim,) + t.shape[1:] ) , __UpperCamelCase )
# Put the chunk in its pre-allocated space
if isinstance(__UpperCamelCase , __UpperCamelCase ):
def assign(__UpperCamelCase , __UpperCamelCase ) -> None:
for k, v in da.items():
if isinstance(__UpperCamelCase , __UpperCamelCase ):
assign(__UpperCamelCase , da[k] )
else:
if _add_into_out:
v[i : i + chunk_size] += da[k]
else:
SCREAMING_SNAKE_CASE_ = da[k]
assign(__UpperCamelCase , __UpperCamelCase )
elif isinstance(__UpperCamelCase , __UpperCamelCase ):
for xa, xa in zip(__UpperCamelCase , __UpperCamelCase ):
if _add_into_out:
xa[i : i + chunk_size] += xa
else:
SCREAMING_SNAKE_CASE_ = xa
elif isinstance(__UpperCamelCase , torch.Tensor ):
if _add_into_out:
out[i : i + chunk_size] += output_chunk
else:
SCREAMING_SNAKE_CASE_ = output_chunk
else:
raise ValueError("Not supported" )
i += chunk_size
SCREAMING_SNAKE_CASE_ = tensor_tree_map(lambda __UpperCamelCase : t.view(orig_batch_dims + t.shape[1:] ) , __UpperCamelCase )
return out
class lowerCamelCase :
"""simple docstring"""
def __init__( self : Dict , __magic_name__ : int = 512 , ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = max_chunk_size
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = None
def __A ( self : List[str] , __magic_name__ : Callable , __magic_name__ : tuple , __magic_name__ : int ) -> int:
logging.info("Tuning chunk size..." )
if min_chunk_size >= self.max_chunk_size:
return min_chunk_size
SCREAMING_SNAKE_CASE_ = [2**l for l in range(int(math.log(self.max_chunk_size , 2 ) ) + 1 )]
SCREAMING_SNAKE_CASE_ = [c for c in candidates if c > min_chunk_size]
SCREAMING_SNAKE_CASE_ = [min_chunk_size] + candidates
candidates[-1] += 4
def test_chunk_size(__magic_name__ : int ) -> bool:
try:
with torch.no_grad():
fn(*__magic_name__ , chunk_size=__magic_name__ )
return True
except RuntimeError:
return False
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = len(__magic_name__ ) - 1
while i > min_viable_chunk_size_index:
SCREAMING_SNAKE_CASE_ = test_chunk_size(candidates[i] )
if not viable:
SCREAMING_SNAKE_CASE_ = (min_viable_chunk_size_index + i) // 2
else:
SCREAMING_SNAKE_CASE_ = i
SCREAMING_SNAKE_CASE_ = (i + len(__magic_name__ ) - 1) // 2
return candidates[min_viable_chunk_size_index]
def __A ( self : int , __magic_name__ : Iterable , __magic_name__ : Iterable ) -> bool:
SCREAMING_SNAKE_CASE_ = True
for aa, aa in zip(__magic_name__ , __magic_name__ ):
assert type(__magic_name__ ) == type(__magic_name__ )
if isinstance(__magic_name__ , (list, tuple) ):
consistent &= self._compare_arg_caches(__magic_name__ , __magic_name__ )
elif isinstance(__magic_name__ , __magic_name__ ):
SCREAMING_SNAKE_CASE_ = [v for _, v in sorted(aa.items() , key=lambda __magic_name__ : x[0] )]
SCREAMING_SNAKE_CASE_ = [v for _, v in sorted(aa.items() , key=lambda __magic_name__ : x[0] )]
consistent &= self._compare_arg_caches(__magic_name__ , __magic_name__ )
else:
consistent &= aa == aa
return consistent
def __A ( self : Any , __magic_name__ : Callable , __magic_name__ : tuple , __magic_name__ : int , ) -> int:
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = tree_map(lambda __magic_name__ : a.shape if isinstance(__magic_name__ , torch.Tensor ) else a , __magic_name__ , __magic_name__ )
if self.cached_arg_data is not None:
# If args have changed shape/value, we need to re-tune
assert len(self.cached_arg_data ) == len(__magic_name__ )
SCREAMING_SNAKE_CASE_ = self._compare_arg_caches(self.cached_arg_data , __magic_name__ )
else:
# Otherwise, we can reuse the precomputed value
SCREAMING_SNAKE_CASE_ = False
if not consistent:
SCREAMING_SNAKE_CASE_ = self._determine_favorable_chunk_size(
__magic_name__ , __magic_name__ , __magic_name__ , )
SCREAMING_SNAKE_CASE_ = arg_data
assert self.cached_chunk_size is not None
return self.cached_chunk_size
| 305
|
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...file_utils import TensorType, is_torch_available
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
A : List[str] = logging.get_logger(__name__)
A : int = {
"facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json",
# See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small
}
class lowerCamelCase (SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
lowerCamelCase__ = '''blenderbot-small'''
lowerCamelCase__ = ['''past_key_values''']
lowerCamelCase__ = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self : Dict , __magic_name__ : Dict=50_265 , __magic_name__ : str=512 , __magic_name__ : List[Any]=8 , __magic_name__ : Any=2_048 , __magic_name__ : Dict=16 , __magic_name__ : Any=8 , __magic_name__ : Optional[int]=2_048 , __magic_name__ : Dict=16 , __magic_name__ : Tuple=0.0 , __magic_name__ : Dict=0.0 , __magic_name__ : Optional[int]=True , __magic_name__ : Any=True , __magic_name__ : Dict="gelu" , __magic_name__ : Tuple=512 , __magic_name__ : List[str]=0.1 , __magic_name__ : List[Any]=0.0 , __magic_name__ : List[Any]=0.0 , __magic_name__ : Tuple=0.02 , __magic_name__ : Union[str, Any]=1 , __magic_name__ : List[Any]=False , __magic_name__ : str=0 , __magic_name__ : Dict=1 , __magic_name__ : str=2 , __magic_name__ : Union[str, Any]=2 , **__magic_name__ : Optional[Any] , ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = vocab_size
SCREAMING_SNAKE_CASE_ = max_position_embeddings
SCREAMING_SNAKE_CASE_ = d_model
SCREAMING_SNAKE_CASE_ = encoder_ffn_dim
SCREAMING_SNAKE_CASE_ = encoder_layers
SCREAMING_SNAKE_CASE_ = encoder_attention_heads
SCREAMING_SNAKE_CASE_ = decoder_ffn_dim
SCREAMING_SNAKE_CASE_ = decoder_layers
SCREAMING_SNAKE_CASE_ = decoder_attention_heads
SCREAMING_SNAKE_CASE_ = dropout
SCREAMING_SNAKE_CASE_ = attention_dropout
SCREAMING_SNAKE_CASE_ = activation_dropout
SCREAMING_SNAKE_CASE_ = activation_function
SCREAMING_SNAKE_CASE_ = init_std
SCREAMING_SNAKE_CASE_ = encoder_layerdrop
SCREAMING_SNAKE_CASE_ = decoder_layerdrop
SCREAMING_SNAKE_CASE_ = use_cache
SCREAMING_SNAKE_CASE_ = encoder_layers
SCREAMING_SNAKE_CASE_ = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=__magic_name__ , bos_token_id=__magic_name__ , eos_token_id=__magic_name__ , is_encoder_decoder=__magic_name__ , decoder_start_token_id=__magic_name__ , forced_eos_token_id=__magic_name__ , **__magic_name__ , )
class lowerCamelCase (SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
@property
def __A ( self : str ) -> Mapping[str, Mapping[int, str]]:
if self.task in ["default", "seq2seq-lm"]:
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
SCREAMING_SNAKE_CASE_ = {0: "batch"}
SCREAMING_SNAKE_CASE_ = {0: "batch", 1: "past_decoder_sequence + sequence"}
else:
SCREAMING_SNAKE_CASE_ = {0: "batch", 1: "decoder_sequence"}
SCREAMING_SNAKE_CASE_ = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(__magic_name__ , direction="inputs" )
elif self.task == "causal-lm":
# TODO: figure this case out.
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.num_layers
for i in range(__magic_name__ ):
SCREAMING_SNAKE_CASE_ = {0: "batch", 2: "past_sequence + sequence"}
SCREAMING_SNAKE_CASE_ = {0: "batch", 2: "past_sequence + sequence"}
else:
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
("decoder_input_ids", {0: "batch", 1: "decoder_sequence"}),
("decoder_attention_mask", {0: "batch", 1: "decoder_sequence"}),
] )
return common_inputs
@property
def __A ( self : Tuple ) -> Mapping[str, Mapping[int, str]]:
if self.task in ["default", "seq2seq-lm"]:
SCREAMING_SNAKE_CASE_ = super().outputs
else:
SCREAMING_SNAKE_CASE_ = super(__magic_name__ , self ).outputs
if self.use_past:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.num_layers
for i in range(__magic_name__ ):
SCREAMING_SNAKE_CASE_ = {0: "batch", 2: "past_sequence + sequence"}
SCREAMING_SNAKE_CASE_ = {0: "batch", 2: "past_sequence + sequence"}
return common_outputs
def __A ( self : int , __magic_name__ : PreTrainedTokenizer , __magic_name__ : int = -1 , __magic_name__ : int = -1 , __magic_name__ : bool = False , __magic_name__ : Optional[TensorType] = None , ) -> Mapping[str, Any]:
SCREAMING_SNAKE_CASE_ = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
# Generate decoder inputs
SCREAMING_SNAKE_CASE_ = seq_length if not self.use_past else 1
SCREAMING_SNAKE_CASE_ = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
SCREAMING_SNAKE_CASE_ = {F'''decoder_{name}''': tensor for name, tensor in decoder_inputs.items()}
SCREAMING_SNAKE_CASE_ = dict(**__magic_name__ , **__magic_name__ )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = common_inputs["input_ids"].shape
SCREAMING_SNAKE_CASE_ = common_inputs["decoder_input_ids"].shape[1]
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.num_attention_heads
SCREAMING_SNAKE_CASE_ = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
SCREAMING_SNAKE_CASE_ = decoder_seq_length + 3
SCREAMING_SNAKE_CASE_ = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
SCREAMING_SNAKE_CASE_ = torch.cat(
[common_inputs["decoder_attention_mask"], torch.ones(__magic_name__ , __magic_name__ )] , dim=1 )
SCREAMING_SNAKE_CASE_ = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.num_layers
SCREAMING_SNAKE_CASE_ = min(__magic_name__ , __magic_name__ )
SCREAMING_SNAKE_CASE_ = max(__magic_name__ , __magic_name__ ) - min_num_layers
SCREAMING_SNAKE_CASE_ = "encoder" if num_encoder_layers > num_decoder_layers else "decoder"
for _ in range(__magic_name__ ):
common_inputs["past_key_values"].append(
(
torch.zeros(__magic_name__ ),
torch.zeros(__magic_name__ ),
torch.zeros(__magic_name__ ),
torch.zeros(__magic_name__ ),
) )
# TODO: test this.
SCREAMING_SNAKE_CASE_ = encoder_shape if remaining_side_name == "encoder" else decoder_shape
for _ in range(__magic_name__ , __magic_name__ ):
common_inputs["past_key_values"].append((torch.zeros(__magic_name__ ), torch.zeros(__magic_name__ )) )
return common_inputs
def __A ( self : Union[str, Any] , __magic_name__ : PreTrainedTokenizer , __magic_name__ : int = -1 , __magic_name__ : int = -1 , __magic_name__ : bool = False , __magic_name__ : Optional[TensorType] = None , ) -> Mapping[str, Any]:
SCREAMING_SNAKE_CASE_ = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
SCREAMING_SNAKE_CASE_ = seqlen + 2
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.num_layers
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.num_attention_heads
SCREAMING_SNAKE_CASE_ = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
SCREAMING_SNAKE_CASE_ = common_inputs["attention_mask"].dtype
SCREAMING_SNAKE_CASE_ = torch.cat(
[common_inputs["attention_mask"], torch.ones(__magic_name__ , __magic_name__ , dtype=__magic_name__ )] , dim=1 )
SCREAMING_SNAKE_CASE_ = [
(torch.zeros(__magic_name__ ), torch.zeros(__magic_name__ )) for _ in range(__magic_name__ )
]
return common_inputs
def __A ( self : Dict , __magic_name__ : PreTrainedTokenizer , __magic_name__ : int = -1 , __magic_name__ : int = -1 , __magic_name__ : bool = False , __magic_name__ : Optional[TensorType] = None , ) -> Mapping[str, Any]:
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
SCREAMING_SNAKE_CASE_ = compute_effective_axis_dimension(
__magic_name__ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
SCREAMING_SNAKE_CASE_ = tokenizer.num_special_tokens_to_add(__magic_name__ )
SCREAMING_SNAKE_CASE_ = compute_effective_axis_dimension(
__magic_name__ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__magic_name__ )
# Generate dummy inputs according to compute batch and sequence
SCREAMING_SNAKE_CASE_ = [" ".join([tokenizer.unk_token] ) * seq_length] * batch_size
SCREAMING_SNAKE_CASE_ = dict(tokenizer(__magic_name__ , return_tensors=__magic_name__ ) )
return common_inputs
def __A ( self : Optional[Any] , __magic_name__ : PreTrainedTokenizer , __magic_name__ : int = -1 , __magic_name__ : int = -1 , __magic_name__ : bool = False , __magic_name__ : Optional[TensorType] = None , ) -> Mapping[str, Any]:
if self.task in ["default", "seq2seq-lm"]:
SCREAMING_SNAKE_CASE_ = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
__magic_name__ , batch_size=__magic_name__ , seq_length=__magic_name__ , is_pair=__magic_name__ , framework=__magic_name__ )
elif self.task == "causal-lm":
SCREAMING_SNAKE_CASE_ = self._generate_dummy_inputs_for_causal_lm(
__magic_name__ , batch_size=__magic_name__ , seq_length=__magic_name__ , is_pair=__magic_name__ , framework=__magic_name__ )
else:
SCREAMING_SNAKE_CASE_ = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__magic_name__ , batch_size=__magic_name__ , seq_length=__magic_name__ , is_pair=__magic_name__ , framework=__magic_name__ )
return common_inputs
def __A ( self : Optional[Any] , __magic_name__ : str , __magic_name__ : List[Any] , __magic_name__ : str , __magic_name__ : List[str] ) -> List[str]:
if self.task in ["default", "seq2seq-lm"]:
SCREAMING_SNAKE_CASE_ = super()._flatten_past_key_values_(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
else:
SCREAMING_SNAKE_CASE_ = super(__magic_name__ , self )._flatten_past_key_values_(
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
| 305
| 1
|
def a__ ( __UpperCamelCase ):
assert column_title.isupper()
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = len(__UpperCamelCase ) - 1
SCREAMING_SNAKE_CASE_ = 0
while index >= 0:
SCREAMING_SNAKE_CASE_ = (ord(column_title[index] ) - 6_4) * pow(2_6 , __UpperCamelCase )
answer += value
power += 1
index -= 1
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 305
|
import inspect
import unittest
from datasets import load_dataset
from packaging import version
from transformers import BeitConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_MAPPING,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
)
from transformers.models.beit.modeling_beit import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
import PIL
from PIL import Image
from transformers import BeitImageProcessor
class lowerCamelCase :
"""simple docstring"""
def __init__( self : List[Any] , __magic_name__ : List[str] , __magic_name__ : int=100 , __magic_name__ : Optional[Any]=13 , __magic_name__ : Dict=30 , __magic_name__ : Tuple=2 , __magic_name__ : str=3 , __magic_name__ : str=True , __magic_name__ : Optional[int]=True , __magic_name__ : Union[str, Any]=32 , __magic_name__ : Optional[int]=4 , __magic_name__ : Dict=4 , __magic_name__ : Tuple=37 , __magic_name__ : Any="gelu" , __magic_name__ : int=0.1 , __magic_name__ : List[str]=0.1 , __magic_name__ : Optional[int]=10 , __magic_name__ : Tuple=0.02 , __magic_name__ : Optional[int]=3 , __magic_name__ : List[str]=None , __magic_name__ : Tuple=[0, 1, 2, 3] , ) -> List[str]:
SCREAMING_SNAKE_CASE_ = parent
SCREAMING_SNAKE_CASE_ = 100
SCREAMING_SNAKE_CASE_ = batch_size
SCREAMING_SNAKE_CASE_ = image_size
SCREAMING_SNAKE_CASE_ = patch_size
SCREAMING_SNAKE_CASE_ = num_channels
SCREAMING_SNAKE_CASE_ = is_training
SCREAMING_SNAKE_CASE_ = use_labels
SCREAMING_SNAKE_CASE_ = hidden_size
SCREAMING_SNAKE_CASE_ = num_hidden_layers
SCREAMING_SNAKE_CASE_ = num_attention_heads
SCREAMING_SNAKE_CASE_ = intermediate_size
SCREAMING_SNAKE_CASE_ = hidden_act
SCREAMING_SNAKE_CASE_ = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ = type_sequence_label_size
SCREAMING_SNAKE_CASE_ = initializer_range
SCREAMING_SNAKE_CASE_ = scope
SCREAMING_SNAKE_CASE_ = out_indices
SCREAMING_SNAKE_CASE_ = num_labels
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
SCREAMING_SNAKE_CASE_ = (image_size // patch_size) ** 2
SCREAMING_SNAKE_CASE_ = num_patches + 1
def __A ( self : Any ) -> int:
SCREAMING_SNAKE_CASE_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = None
if self.use_labels:
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
SCREAMING_SNAKE_CASE_ = self.get_config()
return config, pixel_values, labels, pixel_labels
def __A ( self : Dict ) -> Optional[int]:
return BeitConfig(
vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__magic_name__ , initializer_range=self.initializer_range , out_indices=self.out_indices , )
def __A ( self : Optional[int] , __magic_name__ : List[str] , __magic_name__ : Optional[int] , __magic_name__ : Tuple , __magic_name__ : Tuple ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = BeitModel(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
SCREAMING_SNAKE_CASE_ = model(__magic_name__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __A ( self : Union[str, Any] , __magic_name__ : List[str] , __magic_name__ : int , __magic_name__ : int , __magic_name__ : str ) -> int:
SCREAMING_SNAKE_CASE_ = BeitForMaskedImageModeling(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
SCREAMING_SNAKE_CASE_ = model(__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) )
def __A ( self : Dict , __magic_name__ : List[Any] , __magic_name__ : int , __magic_name__ : Tuple , __magic_name__ : Optional[Any] ) -> int:
SCREAMING_SNAKE_CASE_ = self.type_sequence_label_size
SCREAMING_SNAKE_CASE_ = BeitForImageClassification(__magic_name__ )
model.to(__magic_name__ )
model.eval()
SCREAMING_SNAKE_CASE_ = model(__magic_name__ , labels=__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = BeitForImageClassification(__magic_name__ )
model.to(__magic_name__ )
model.eval()
SCREAMING_SNAKE_CASE_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE_ = model(__magic_name__ , labels=__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __A ( self : Tuple , __magic_name__ : Any , __magic_name__ : Optional[Any] , __magic_name__ : Tuple , __magic_name__ : int ) -> int:
SCREAMING_SNAKE_CASE_ = self.num_labels
SCREAMING_SNAKE_CASE_ = BeitForSemanticSegmentation(__magic_name__ )
model.to(__magic_name__ )
model.eval()
SCREAMING_SNAKE_CASE_ = model(__magic_name__ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
SCREAMING_SNAKE_CASE_ = model(__magic_name__ , labels=__magic_name__ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
def __A ( self : str ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = config_and_inputs
SCREAMING_SNAKE_CASE_ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class lowerCamelCase (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase__ = (
(BeitModel, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation)
if is_torch_available()
else ()
)
lowerCamelCase__ = (
{
'''feature-extraction''': BeitModel,
'''image-classification''': BeitForImageClassification,
'''image-segmentation''': BeitForSemanticSegmentation,
}
if is_torch_available()
else {}
)
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
def __A ( self : Tuple ) -> Any:
SCREAMING_SNAKE_CASE_ = BeitModelTester(self )
SCREAMING_SNAKE_CASE_ = ConfigTester(self , config_class=__magic_name__ , has_text_modality=__magic_name__ , hidden_size=37 )
def __A ( self : Dict ) -> List[Any]:
self.config_tester.run_common_tests()
@unittest.skip(reason="BEiT does not use inputs_embeds" )
def __A ( self : List[str] ) -> Optional[Any]:
pass
@require_torch_multi_gpu
@unittest.skip(reason="BEiT has some layers using `add_module` which doesn't work well with `nn.DataParallel`" )
def __A ( self : str ) -> List[str]:
pass
def __A ( self : List[Any] ) -> List[str]:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ = model_class(__magic_name__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
SCREAMING_SNAKE_CASE_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__magic_name__ , nn.Linear ) )
def __A ( self : Union[str, Any] ) -> int:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ = model_class(__magic_name__ )
SCREAMING_SNAKE_CASE_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE_ = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE_ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __magic_name__ )
def __A ( self : Tuple ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__magic_name__ )
def __A ( self : Dict ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__magic_name__ )
def __A ( self : Optional[int] ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__magic_name__ )
def __A ( self : Optional[Any] ) -> List[str]:
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__magic_name__ )
def __A ( self : int ) -> Optional[int]:
if not self.model_tester.is_training:
return
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if model_class in [*get_values(__magic_name__ ), BeitForMaskedImageModeling]:
continue
SCREAMING_SNAKE_CASE_ = model_class(__magic_name__ )
model.to(__magic_name__ )
model.train()
SCREAMING_SNAKE_CASE_ = self._prepare_for_class(__magic_name__ , __magic_name__ , return_labels=__magic_name__ )
SCREAMING_SNAKE_CASE_ = model(**__magic_name__ ).loss
loss.backward()
def __A ( self : Any ) -> Tuple:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if (
model_class in [*get_values(__magic_name__ ), BeitForMaskedImageModeling]
or not model_class.supports_gradient_checkpointing
):
continue
SCREAMING_SNAKE_CASE_ = model_class(__magic_name__ )
model.gradient_checkpointing_enable()
model.to(__magic_name__ )
model.train()
SCREAMING_SNAKE_CASE_ = self._prepare_for_class(__magic_name__ , __magic_name__ , return_labels=__magic_name__ )
SCREAMING_SNAKE_CASE_ = model(**__magic_name__ ).loss
loss.backward()
def __A ( self : List[str] ) -> str:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ = _config_zero_init(__magic_name__ )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ = model_class(config=__magic_name__ )
for name, param in model.named_parameters():
# we skip lambda parameters as these require special initial values
# determined by config.layer_scale_init_value
if "lambda" in name:
continue
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@slow
def __A ( self : int ) -> Optional[int]:
for model_name in BEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE_ = BeitModel.from_pretrained(__magic_name__ )
self.assertIsNotNone(__magic_name__ )
def a__ ( ):
SCREAMING_SNAKE_CASE_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class lowerCamelCase (unittest.TestCase ):
"""simple docstring"""
@cached_property
def __A ( self : List[Any] ) -> str:
return BeitImageProcessor.from_pretrained("microsoft/beit-base-patch16-224" ) if is_vision_available() else None
@slow
def __A ( self : List[str] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = BeitForMaskedImageModeling.from_pretrained("microsoft/beit-base-patch16-224-pt22k" ).to(__magic_name__ )
SCREAMING_SNAKE_CASE_ = self.default_image_processor
SCREAMING_SNAKE_CASE_ = prepare_img()
SCREAMING_SNAKE_CASE_ = image_processor(images=__magic_name__ , return_tensors="pt" ).pixel_values.to(__magic_name__ )
# prepare bool_masked_pos
SCREAMING_SNAKE_CASE_ = torch.ones((1, 196) , dtype=torch.bool ).to(__magic_name__ )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = model(pixel_values=__magic_name__ , bool_masked_pos=__magic_name__ )
SCREAMING_SNAKE_CASE_ = outputs.logits
# verify the logits
SCREAMING_SNAKE_CASE_ = torch.Size((1, 196, 8_192) )
self.assertEqual(logits.shape , __magic_name__ )
SCREAMING_SNAKE_CASE_ = torch.tensor(
[[-3.2437, 0.5072, -13.9174], [-3.2456, 0.4948, -13.9401], [-3.2033, 0.5121, -13.8550]] ).to(__magic_name__ )
self.assertTrue(torch.allclose(logits[bool_masked_pos][:3, :3] , __magic_name__ , atol=1e-2 ) )
@slow
def __A ( self : Tuple ) -> int:
SCREAMING_SNAKE_CASE_ = BeitForImageClassification.from_pretrained("microsoft/beit-base-patch16-224" ).to(__magic_name__ )
SCREAMING_SNAKE_CASE_ = self.default_image_processor
SCREAMING_SNAKE_CASE_ = prepare_img()
SCREAMING_SNAKE_CASE_ = image_processor(images=__magic_name__ , return_tensors="pt" ).to(__magic_name__ )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = model(**__magic_name__ )
SCREAMING_SNAKE_CASE_ = outputs.logits
# verify the logits
SCREAMING_SNAKE_CASE_ = torch.Size((1, 1_000) )
self.assertEqual(logits.shape , __magic_name__ )
SCREAMING_SNAKE_CASE_ = torch.tensor([-1.2385, -1.0987, -1.0108] ).to(__magic_name__ )
self.assertTrue(torch.allclose(logits[0, :3] , __magic_name__ , atol=1e-4 ) )
SCREAMING_SNAKE_CASE_ = 281
self.assertEqual(logits.argmax(-1 ).item() , __magic_name__ )
@slow
def __A ( self : Optional[Any] ) -> int:
SCREAMING_SNAKE_CASE_ = BeitForImageClassification.from_pretrained("microsoft/beit-large-patch16-224-pt22k-ft22k" ).to(
__magic_name__ )
SCREAMING_SNAKE_CASE_ = self.default_image_processor
SCREAMING_SNAKE_CASE_ = prepare_img()
SCREAMING_SNAKE_CASE_ = image_processor(images=__magic_name__ , return_tensors="pt" ).to(__magic_name__ )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = model(**__magic_name__ )
SCREAMING_SNAKE_CASE_ = outputs.logits
# verify the logits
SCREAMING_SNAKE_CASE_ = torch.Size((1, 21_841) )
self.assertEqual(logits.shape , __magic_name__ )
SCREAMING_SNAKE_CASE_ = torch.tensor([1.6881, -0.2787, 0.5901] ).to(__magic_name__ )
self.assertTrue(torch.allclose(logits[0, :3] , __magic_name__ , atol=1e-4 ) )
SCREAMING_SNAKE_CASE_ = 2_396
self.assertEqual(logits.argmax(-1 ).item() , __magic_name__ )
@slow
def __A ( self : Tuple ) -> Tuple:
SCREAMING_SNAKE_CASE_ = BeitForSemanticSegmentation.from_pretrained("microsoft/beit-base-finetuned-ade-640-640" )
SCREAMING_SNAKE_CASE_ = model.to(__magic_name__ )
SCREAMING_SNAKE_CASE_ = BeitImageProcessor(do_resize=__magic_name__ , size=640 , do_center_crop=__magic_name__ )
SCREAMING_SNAKE_CASE_ = load_dataset("hf-internal-testing/fixtures_ade20k" , split="test" )
SCREAMING_SNAKE_CASE_ = Image.open(ds[0]["file"] )
SCREAMING_SNAKE_CASE_ = image_processor(images=__magic_name__ , return_tensors="pt" ).to(__magic_name__ )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = model(**__magic_name__ )
SCREAMING_SNAKE_CASE_ = outputs.logits
# verify the logits
SCREAMING_SNAKE_CASE_ = torch.Size((1, 150, 160, 160) )
self.assertEqual(logits.shape , __magic_name__ )
SCREAMING_SNAKE_CASE_ = version.parse(PIL.__version__ ) < version.parse("9.0.0" )
if is_pillow_less_than_a:
SCREAMING_SNAKE_CASE_ = torch.tensor(
[
[[-4.9225, -2.3954, -3.0522], [-2.8822, -1.0046, -1.7561], [-2.9549, -1.3228, -2.1347]],
[[-5.8168, -3.4129, -4.0778], [-3.8651, -2.2214, -3.0277], [-3.8356, -2.4643, -3.3535]],
[[-0.0078, 3.9952, 4.0754], [2.9856, 4.6944, 5.0035], [3.2413, 4.7813, 4.9969]],
] , device=__magic_name__ , )
else:
SCREAMING_SNAKE_CASE_ = torch.tensor(
[
[[-4.8960, -2.3688, -3.0355], [-2.8478, -0.9836, -1.7418], [-2.9449, -1.3332, -2.1456]],
[[-5.8081, -3.4124, -4.1006], [-3.8561, -2.2081, -3.0323], [-3.8365, -2.4601, -3.3669]],
[[-0.0309, 3.9868, 4.0540], [2.9640, 4.6877, 4.9976], [3.2081, 4.7690, 4.9942]],
] , device=__magic_name__ , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , __magic_name__ , atol=1e-4 ) )
@slow
def __A ( self : List[str] ) -> Tuple:
SCREAMING_SNAKE_CASE_ = BeitForSemanticSegmentation.from_pretrained("microsoft/beit-base-finetuned-ade-640-640" )
SCREAMING_SNAKE_CASE_ = model.to(__magic_name__ )
SCREAMING_SNAKE_CASE_ = BeitImageProcessor(do_resize=__magic_name__ , size=640 , do_center_crop=__magic_name__ )
SCREAMING_SNAKE_CASE_ = load_dataset("hf-internal-testing/fixtures_ade20k" , split="test" )
SCREAMING_SNAKE_CASE_ = Image.open(ds[0]["file"] )
SCREAMING_SNAKE_CASE_ = image_processor(images=__magic_name__ , return_tensors="pt" ).to(__magic_name__ )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = model(**__magic_name__ )
SCREAMING_SNAKE_CASE_ = outputs.logits.detach().cpu()
SCREAMING_SNAKE_CASE_ = image_processor.post_process_semantic_segmentation(outputs=__magic_name__ , target_sizes=[(500, 300)] )
SCREAMING_SNAKE_CASE_ = torch.Size((500, 300) )
self.assertEqual(segmentation[0].shape , __magic_name__ )
SCREAMING_SNAKE_CASE_ = image_processor.post_process_semantic_segmentation(outputs=__magic_name__ )
SCREAMING_SNAKE_CASE_ = torch.Size((160, 160) )
self.assertEqual(segmentation[0].shape , __magic_name__ )
| 305
| 1
|
import io
import itertools
import json
from dataclasses import dataclass
from typing import Optional
import pyarrow as pa
import pyarrow.json as paj
import datasets
from datasets.table import table_cast
from datasets.utils.file_utils import readline
A : Union[str, Any] = datasets.utils.logging.get_logger(__name__)
@dataclass
class lowerCamelCase (datasets.BuilderConfig ):
"""simple docstring"""
lowerCamelCase__ = None
lowerCamelCase__ = "utf-8"
lowerCamelCase__ = None
lowerCamelCase__ = None
lowerCamelCase__ = True # deprecated
lowerCamelCase__ = None # deprecated
lowerCamelCase__ = 1_0 << 2_0 # 10MB
lowerCamelCase__ = None
class lowerCamelCase (datasets.ArrowBasedBuilder ):
"""simple docstring"""
lowerCamelCase__ = JsonConfig
def __A ( self : Optional[Any] ) -> List[Any]:
if self.config.block_size is not None:
logger.warning("The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead" )
SCREAMING_SNAKE_CASE_ = self.config.block_size
if self.config.use_threads is not True:
logger.warning(
"The JSON loader parameter `use_threads` is deprecated and doesn't have any effect anymore." )
if self.config.newlines_in_values is not None:
raise ValueError("The JSON loader parameter `newlines_in_values` is no longer supported" )
return datasets.DatasetInfo(features=self.config.features )
def __A ( self : Dict , __magic_name__ : List[str] ) -> List[Any]:
if not self.config.data_files:
raise ValueError(F'''At least one data file must be specified, but got data_files={self.config.data_files}''' )
SCREAMING_SNAKE_CASE_ = dl_manager.download_and_extract(self.config.data_files )
if isinstance(__magic_name__ , (str, list, tuple) ):
SCREAMING_SNAKE_CASE_ = data_files
if isinstance(__magic_name__ , __magic_name__ ):
SCREAMING_SNAKE_CASE_ = [files]
SCREAMING_SNAKE_CASE_ = [dl_manager.iter_files(__magic_name__ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"files": files} )]
SCREAMING_SNAKE_CASE_ = []
for split_name, files in data_files.items():
if isinstance(__magic_name__ , __magic_name__ ):
SCREAMING_SNAKE_CASE_ = [files]
SCREAMING_SNAKE_CASE_ = [dl_manager.iter_files(__magic_name__ ) for file in files]
splits.append(datasets.SplitGenerator(name=__magic_name__ , gen_kwargs={"files": files} ) )
return splits
def __A ( self : int , __magic_name__ : pa.Table ) -> pa.Table:
if self.config.features is not None:
# adding missing columns
for column_name in set(self.config.features ) - set(pa_table.column_names ):
SCREAMING_SNAKE_CASE_ = self.config.features.arrow_schema.field(__magic_name__ ).type
SCREAMING_SNAKE_CASE_ = pa_table.append_column(__magic_name__ , pa.array([None] * len(__magic_name__ ) , type=__magic_name__ ) )
# more expensive cast to support nested structures with keys in a different order
# allows str <-> int/float or str to Audio for example
SCREAMING_SNAKE_CASE_ = table_cast(__magic_name__ , self.config.features.arrow_schema )
return pa_table
def __A ( self : Tuple , __magic_name__ : Any ) -> int:
for file_idx, file in enumerate(itertools.chain.from_iterable(__magic_name__ ) ):
# If the file is one json object and if we need to look at the list of items in one specific field
if self.config.field is not None:
with open(__magic_name__ , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
SCREAMING_SNAKE_CASE_ = json.load(__magic_name__ )
# We keep only the field we are interested in
SCREAMING_SNAKE_CASE_ = dataset[self.config.field]
# We accept two format: a list of dicts or a dict of lists
if isinstance(__magic_name__ , (list, tuple) ):
SCREAMING_SNAKE_CASE_ = set().union(*[row.keys() for row in dataset] )
SCREAMING_SNAKE_CASE_ = {col: [row.get(__magic_name__ ) for row in dataset] for col in keys}
else:
SCREAMING_SNAKE_CASE_ = dataset
SCREAMING_SNAKE_CASE_ = pa.Table.from_pydict(__magic_name__ )
yield file_idx, self._cast_table(__magic_name__ )
# If the file has one json object per line
else:
with open(__magic_name__ , "rb" ) as f:
SCREAMING_SNAKE_CASE_ = 0
# Use block_size equal to the chunk size divided by 32 to leverage multithreading
# Set a default minimum value of 16kB if the chunk size is really small
SCREAMING_SNAKE_CASE_ = max(self.config.chunksize // 32 , 16 << 10 )
SCREAMING_SNAKE_CASE_ = (
self.config.encoding_errors if self.config.encoding_errors is not None else "strict"
)
while True:
SCREAMING_SNAKE_CASE_ = f.read(self.config.chunksize )
if not batch:
break
# Finish current line
try:
batch += f.readline()
except (AttributeError, io.UnsupportedOperation):
batch += readline(__magic_name__ )
# PyArrow only accepts utf-8 encoded bytes
if self.config.encoding != "utf-8":
SCREAMING_SNAKE_CASE_ = batch.decode(self.config.encoding , errors=__magic_name__ ).encode("utf-8" )
try:
while True:
try:
SCREAMING_SNAKE_CASE_ = paj.read_json(
io.BytesIO(__magic_name__ ) , read_options=paj.ReadOptions(block_size=__magic_name__ ) )
break
except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e:
if (
isinstance(__magic_name__ , pa.ArrowInvalid )
and "straddling" not in str(__magic_name__ )
or block_size > len(__magic_name__ )
):
raise
else:
# Increase the block size in case it was too small.
# The block size will be reset for the next file.
logger.debug(
F'''Batch of {len(__magic_name__ )} bytes couldn\'t be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}.''' )
block_size *= 2
except pa.ArrowInvalid as e:
try:
with open(
__magic_name__ , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
SCREAMING_SNAKE_CASE_ = json.load(__magic_name__ )
except json.JSONDecodeError:
logger.error(F'''Failed to read file \'{file}\' with error {type(__magic_name__ )}: {e}''' )
raise e
# If possible, parse the file as a list of json objects and exit the loop
if isinstance(__magic_name__ , __magic_name__ ): # list is the only sequence type supported in JSON
try:
SCREAMING_SNAKE_CASE_ = set().union(*[row.keys() for row in dataset] )
SCREAMING_SNAKE_CASE_ = {col: [row.get(__magic_name__ ) for row in dataset] for col in keys}
SCREAMING_SNAKE_CASE_ = pa.Table.from_pydict(__magic_name__ )
except (pa.ArrowInvalid, AttributeError) as e:
logger.error(F'''Failed to read file \'{file}\' with error {type(__magic_name__ )}: {e}''' )
raise ValueError(F'''Not able to read records in the JSON file at {file}.''' ) from None
yield file_idx, self._cast_table(__magic_name__ )
break
else:
logger.error(F'''Failed to read file \'{file}\' with error {type(__magic_name__ )}: {e}''' )
raise ValueError(
F'''Not able to read records in the JSON file at {file}. '''
F'''You should probably indicate the field of the JSON file containing your records. '''
F'''This JSON file contain the following fields: {str(list(dataset.keys() ) )}. '''
F'''Select the correct one and provide it as `field=\'XXX\'` to the dataset loading method. ''' ) from None
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(__magic_name__ )
batch_idx += 1
| 305
|
from __future__ import annotations
A : Dict = "#"
class lowerCamelCase :
"""simple docstring"""
def __init__( self : Dict ) -> None:
SCREAMING_SNAKE_CASE_ = {}
def __A ( self : List[Any] , __magic_name__ : str ) -> None:
SCREAMING_SNAKE_CASE_ = self._trie
for char in text:
if char not in trie:
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = trie[char]
SCREAMING_SNAKE_CASE_ = True
def __A ( self : Union[str, Any] , __magic_name__ : str ) -> tuple | list:
SCREAMING_SNAKE_CASE_ = self._trie
for char in prefix:
if char in trie:
SCREAMING_SNAKE_CASE_ = trie[char]
else:
return []
return self._elements(__magic_name__ )
def __A ( self : int , __magic_name__ : dict ) -> tuple:
SCREAMING_SNAKE_CASE_ = []
for c, v in d.items():
SCREAMING_SNAKE_CASE_ = [" "] if c == END else [(c + s) for s in self._elements(__magic_name__ )]
result.extend(__magic_name__ )
return tuple(__magic_name__ )
A : Union[str, Any] = Trie()
A : Optional[int] = ("depart", "detergent", "daring", "dog", "deer", "deal")
for word in words:
trie.insert_word(word)
def a__ ( __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = trie.find_word(__UpperCamelCase )
return tuple(string + word for word in suffixes )
def a__ ( ):
print(autocomplete_using_trie("de" ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 305
| 1
|
import argparse
import json
import os
import pickle
import shutil
import numpy as np
import torch
from distiller import Distiller
from lm_seqs_dataset import LmSeqsDataset
from transformers import (
BertConfig,
BertForMaskedLM,
BertTokenizer,
DistilBertConfig,
DistilBertForMaskedLM,
DistilBertTokenizer,
GPTaConfig,
GPTaLMHeadModel,
GPTaTokenizer,
RobertaConfig,
RobertaForMaskedLM,
RobertaTokenizer,
)
from utils import git_log, init_gpu_params, logger, set_seed
A : Tuple = {
"distilbert": (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer),
"roberta": (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer),
"bert": (BertConfig, BertForMaskedLM, BertTokenizer),
"gpt2": (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer),
}
def a__ ( __UpperCamelCase ):
assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0)
assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0)
if args.mlm:
assert os.path.isfile(args.token_counts )
assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"])
else:
assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"])
assert args.teacher_type == args.student_type or (
args.student_type == "distilbert" and args.teacher_type == "bert"
)
assert os.path.isfile(args.student_config )
if args.student_pretrained_weights is not None:
assert os.path.isfile(args.student_pretrained_weights )
if args.freeze_token_type_embds:
assert args.student_type in ["roberta"]
assert args.alpha_ce >= 0.0
assert args.alpha_mlm >= 0.0
assert args.alpha_clm >= 0.0
assert args.alpha_mse >= 0.0
assert args.alpha_cos >= 0.0
assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0
def a__ ( __UpperCamelCase , __UpperCamelCase ):
if args.student_type == "roberta":
SCREAMING_SNAKE_CASE_ = False
elif args.student_type == "gpt2":
SCREAMING_SNAKE_CASE_ = False
def a__ ( __UpperCamelCase , __UpperCamelCase ):
if args.student_type == "roberta":
SCREAMING_SNAKE_CASE_ = False
def a__ ( ):
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser(description="Training" )
parser.add_argument("--force" , action="store_true" , help="Overwrite dump_path if it already exists." )
parser.add_argument(
"--dump_path" , type=__UpperCamelCase , required=__UpperCamelCase , help="The output directory (log, checkpoints, parameters, etc.)" )
parser.add_argument(
"--data_file" , type=__UpperCamelCase , required=__UpperCamelCase , help="The binarized file (tokenized + tokens_to_ids) and grouped by sequence." , )
parser.add_argument(
"--student_type" , type=__UpperCamelCase , choices=["distilbert", "roberta", "gpt2"] , required=__UpperCamelCase , help="The student type (DistilBERT, RoBERTa)." , )
parser.add_argument("--student_config" , type=__UpperCamelCase , required=__UpperCamelCase , help="Path to the student configuration." )
parser.add_argument(
"--student_pretrained_weights" , default=__UpperCamelCase , type=__UpperCamelCase , help="Load student initialization checkpoint." )
parser.add_argument(
"--teacher_type" , choices=["bert", "roberta", "gpt2"] , required=__UpperCamelCase , help="Teacher type (BERT, RoBERTa)." )
parser.add_argument("--teacher_name" , type=__UpperCamelCase , required=__UpperCamelCase , help="The teacher model." )
parser.add_argument("--temperature" , default=2.0 , type=__UpperCamelCase , help="Temperature for the softmax temperature." )
parser.add_argument(
"--alpha_ce" , default=0.5 , type=__UpperCamelCase , help="Linear weight for the distillation loss. Must be >=0." )
parser.add_argument(
"--alpha_mlm" , default=0.0 , type=__UpperCamelCase , help="Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag." , )
parser.add_argument("--alpha_clm" , default=0.5 , type=__UpperCamelCase , help="Linear weight for the CLM loss. Must be >=0." )
parser.add_argument("--alpha_mse" , default=0.0 , type=__UpperCamelCase , help="Linear weight of the MSE loss. Must be >=0." )
parser.add_argument(
"--alpha_cos" , default=0.0 , type=__UpperCamelCase , help="Linear weight of the cosine embedding loss. Must be >=0." )
parser.add_argument(
"--mlm" , action="store_true" , help="The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM." )
parser.add_argument(
"--mlm_mask_prop" , default=0.15 , type=__UpperCamelCase , help="Proportion of tokens for which we need to make a prediction." , )
parser.add_argument("--word_mask" , default=0.8 , type=__UpperCamelCase , help="Proportion of tokens to mask out." )
parser.add_argument("--word_keep" , default=0.1 , type=__UpperCamelCase , help="Proportion of tokens to keep." )
parser.add_argument("--word_rand" , default=0.1 , type=__UpperCamelCase , help="Proportion of tokens to randomly replace." )
parser.add_argument(
"--mlm_smoothing" , default=0.7 , type=__UpperCamelCase , help="Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec)." , )
parser.add_argument("--token_counts" , type=__UpperCamelCase , help="The token counts in the data_file for MLM." )
parser.add_argument(
"--restrict_ce_to_mask" , action="store_true" , help="If true, compute the distillation loss only the [MLM] prediction distribution." , )
parser.add_argument(
"--freeze_pos_embs" , action="store_true" , help="Freeze positional embeddings during distillation. For student_type in ['roberta', 'gpt2'] only." , )
parser.add_argument(
"--freeze_token_type_embds" , action="store_true" , help="Freeze token type embeddings during distillation if existent. For student_type in ['roberta'] only." , )
parser.add_argument("--n_epoch" , type=__UpperCamelCase , default=3 , help="Number of pass on the whole dataset." )
parser.add_argument("--batch_size" , type=__UpperCamelCase , default=5 , help="Batch size (for each process)." )
parser.add_argument(
"--group_by_size" , action="store_false" , help="If true, group sequences that have similar length into the same batch. Default is true." , )
parser.add_argument(
"--gradient_accumulation_steps" , type=__UpperCamelCase , default=5_0 , help="Gradient accumulation for larger training batches." , )
parser.add_argument("--warmup_prop" , default=0.05 , type=__UpperCamelCase , help="Linear warmup proportion." )
parser.add_argument("--weight_decay" , default=0.0 , type=__UpperCamelCase , help="Weight decay if we apply some." )
parser.add_argument("--learning_rate" , default=5E-4 , type=__UpperCamelCase , help="The initial learning rate for Adam." )
parser.add_argument("--adam_epsilon" , default=1E-6 , type=__UpperCamelCase , help="Epsilon for Adam optimizer." )
parser.add_argument("--max_grad_norm" , default=5.0 , type=__UpperCamelCase , help="Max gradient norm." )
parser.add_argument("--initializer_range" , default=0.02 , type=__UpperCamelCase , help="Random initialization range." )
parser.add_argument(
"--fp16" , action="store_true" , help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit" , )
parser.add_argument(
"--fp16_opt_level" , type=__UpperCamelCase , default="O1" , help=(
"For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
"See details at https://nvidia.github.io/apex/amp.html"
) , )
parser.add_argument("--n_gpu" , type=__UpperCamelCase , default=1 , help="Number of GPUs in the node." )
parser.add_argument("--local_rank" , type=__UpperCamelCase , default=-1 , help="Distributed training - Local rank" )
parser.add_argument("--seed" , type=__UpperCamelCase , default=5_6 , help="Random seed" )
parser.add_argument("--log_interval" , type=__UpperCamelCase , default=5_0_0 , help="Tensorboard logging interval." )
parser.add_argument("--checkpoint_interval" , type=__UpperCamelCase , default=4_0_0_0 , help="Checkpoint interval." )
SCREAMING_SNAKE_CASE_ = parser.parse_args()
sanity_checks(__UpperCamelCase )
# ARGS #
init_gpu_params(__UpperCamelCase )
set_seed(__UpperCamelCase )
if args.is_master:
if os.path.exists(args.dump_path ):
if not args.force:
raise ValueError(
F'''Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite'''
" itUse `--force` if you want to overwrite it" )
else:
shutil.rmtree(args.dump_path )
if not os.path.exists(args.dump_path ):
os.makedirs(args.dump_path )
logger.info(F'''Experiment will be dumped and logged in {args.dump_path}''' )
# SAVE PARAMS #
logger.info(F'''Param: {args}''' )
with open(os.path.join(args.dump_path , "parameters.json" ) , "w" ) as f:
json.dump(vars(__UpperCamelCase ) , __UpperCamelCase , indent=4 )
git_log(args.dump_path )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = MODEL_CLASSES[args.student_type]
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = MODEL_CLASSES[args.teacher_type]
# TOKENIZER #
SCREAMING_SNAKE_CASE_ = teacher_tokenizer_class.from_pretrained(args.teacher_name )
SCREAMING_SNAKE_CASE_ = {}
for tok_name, tok_symbol in tokenizer.special_tokens_map.items():
SCREAMING_SNAKE_CASE_ = tokenizer.all_special_tokens.index(__UpperCamelCase )
SCREAMING_SNAKE_CASE_ = tokenizer.all_special_ids[idx]
logger.info(F'''Special tokens {special_tok_ids}''' )
SCREAMING_SNAKE_CASE_ = special_tok_ids
SCREAMING_SNAKE_CASE_ = tokenizer.max_model_input_sizes[args.teacher_name]
# DATA LOADER #
logger.info(F'''Loading data from {args.data_file}''' )
with open(args.data_file , "rb" ) as fp:
SCREAMING_SNAKE_CASE_ = pickle.load(__UpperCamelCase )
if args.mlm:
logger.info(F'''Loading token counts from {args.token_counts} (already pre-computed)''' )
with open(args.token_counts , "rb" ) as fp:
SCREAMING_SNAKE_CASE_ = pickle.load(__UpperCamelCase )
SCREAMING_SNAKE_CASE_ = np.maximum(__UpperCamelCase , 1 ) ** -args.mlm_smoothing
for idx in special_tok_ids.values():
SCREAMING_SNAKE_CASE_ = 0.0 # do not predict special tokens
SCREAMING_SNAKE_CASE_ = torch.from_numpy(__UpperCamelCase )
else:
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = LmSeqsDataset(params=__UpperCamelCase , data=__UpperCamelCase )
logger.info("Data loader created." )
# STUDENT #
logger.info(F'''Loading student config from {args.student_config}''' )
SCREAMING_SNAKE_CASE_ = student_config_class.from_pretrained(args.student_config )
SCREAMING_SNAKE_CASE_ = True
if args.student_pretrained_weights is not None:
logger.info(F'''Loading pretrained weights from {args.student_pretrained_weights}''' )
SCREAMING_SNAKE_CASE_ = student_model_class.from_pretrained(args.student_pretrained_weights , config=__UpperCamelCase )
else:
SCREAMING_SNAKE_CASE_ = student_model_class(__UpperCamelCase )
if args.n_gpu > 0:
student.to(F'''cuda:{args.local_rank}''' )
logger.info("Student loaded." )
# TEACHER #
SCREAMING_SNAKE_CASE_ = teacher_model_class.from_pretrained(args.teacher_name , output_hidden_states=__UpperCamelCase )
if args.n_gpu > 0:
teacher.to(F'''cuda:{args.local_rank}''' )
logger.info(F'''Teacher loaded from {args.teacher_name}.''' )
# FREEZING #
if args.freeze_pos_embs:
freeze_pos_embeddings(__UpperCamelCase , __UpperCamelCase )
if args.freeze_token_type_embds:
freeze_token_type_embeddings(__UpperCamelCase , __UpperCamelCase )
# SANITY CHECKS #
assert student.config.vocab_size == teacher.config.vocab_size
assert student.config.hidden_size == teacher.config.hidden_size
assert student.config.max_position_embeddings == teacher.config.max_position_embeddings
if args.mlm:
assert token_probs.size(0 ) == stu_architecture_config.vocab_size
# DISTILLER #
torch.cuda.empty_cache()
SCREAMING_SNAKE_CASE_ = Distiller(
params=__UpperCamelCase , dataset=__UpperCamelCase , token_probs=__UpperCamelCase , student=__UpperCamelCase , teacher=__UpperCamelCase )
distiller.train()
logger.info("Let's go get some drinks." )
if __name__ == "__main__":
main()
| 305
|
from collections import deque
class lowerCamelCase :
"""simple docstring"""
def __init__( self : str , __magic_name__ : str , __magic_name__ : int , __magic_name__ : int ) -> None:
SCREAMING_SNAKE_CASE_ = process_name # process name
SCREAMING_SNAKE_CASE_ = arrival_time # arrival time of the process
# completion time of finished process or last interrupted time
SCREAMING_SNAKE_CASE_ = arrival_time
SCREAMING_SNAKE_CASE_ = burst_time # remaining burst time
SCREAMING_SNAKE_CASE_ = 0 # total time of the process wait in ready queue
SCREAMING_SNAKE_CASE_ = 0 # time from arrival time to completion time
class lowerCamelCase :
"""simple docstring"""
def __init__( self : Tuple , __magic_name__ : int , __magic_name__ : list[int] , __magic_name__ : deque[Process] , __magic_name__ : int , ) -> None:
# total number of mlfq's queues
SCREAMING_SNAKE_CASE_ = number_of_queues
# time slice of queues that round robin algorithm applied
SCREAMING_SNAKE_CASE_ = time_slices
# unfinished process is in this ready_queue
SCREAMING_SNAKE_CASE_ = queue
# current time
SCREAMING_SNAKE_CASE_ = current_time
# finished process is in this sequence queue
SCREAMING_SNAKE_CASE_ = deque()
def __A ( self : Dict ) -> list[str]:
SCREAMING_SNAKE_CASE_ = []
for i in range(len(self.finish_queue ) ):
sequence.append(self.finish_queue[i].process_name )
return sequence
def __A ( self : List[str] , __magic_name__ : list[Process] ) -> list[int]:
SCREAMING_SNAKE_CASE_ = []
for i in range(len(__magic_name__ ) ):
waiting_times.append(queue[i].waiting_time )
return waiting_times
def __A ( self : List[str] , __magic_name__ : list[Process] ) -> list[int]:
SCREAMING_SNAKE_CASE_ = []
for i in range(len(__magic_name__ ) ):
turnaround_times.append(queue[i].turnaround_time )
return turnaround_times
def __A ( self : Tuple , __magic_name__ : list[Process] ) -> list[int]:
SCREAMING_SNAKE_CASE_ = []
for i in range(len(__magic_name__ ) ):
completion_times.append(queue[i].stop_time )
return completion_times
def __A ( self : str , __magic_name__ : deque[Process] ) -> list[int]:
return [q.burst_time for q in queue]
def __A ( self : Optional[Any] , __magic_name__ : Process ) -> int:
process.waiting_time += self.current_time - process.stop_time
return process.waiting_time
def __A ( self : Optional[Any] , __magic_name__ : deque[Process] ) -> deque[Process]:
SCREAMING_SNAKE_CASE_ = deque() # sequence deque of finished process
while len(__magic_name__ ) != 0:
SCREAMING_SNAKE_CASE_ = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of current process
self.update_waiting_time(__magic_name__ )
# update current time
self.current_time += cp.burst_time
# finish the process and set the process's burst-time 0
SCREAMING_SNAKE_CASE_ = 0
# set the process's turnaround time because it is finished
SCREAMING_SNAKE_CASE_ = self.current_time - cp.arrival_time
# set the completion time
SCREAMING_SNAKE_CASE_ = self.current_time
# add the process to queue that has finished queue
finished.append(__magic_name__ )
self.finish_queue.extend(__magic_name__ ) # add finished process to finish queue
# FCFS will finish all remaining processes
return finished
def __A ( self : Any , __magic_name__ : deque[Process] , __magic_name__ : int ) -> tuple[deque[Process], deque[Process]]:
SCREAMING_SNAKE_CASE_ = deque() # sequence deque of terminated process
# just for 1 cycle and unfinished processes will go back to queue
for _ in range(len(__magic_name__ ) ):
SCREAMING_SNAKE_CASE_ = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of unfinished processes
self.update_waiting_time(__magic_name__ )
# if the burst time of process is bigger than time-slice
if cp.burst_time > time_slice:
# use CPU for only time-slice
self.current_time += time_slice
# update remaining burst time
cp.burst_time -= time_slice
# update end point time
SCREAMING_SNAKE_CASE_ = self.current_time
# locate the process behind the queue because it is not finished
ready_queue.append(__magic_name__ )
else:
# use CPU for remaining burst time
self.current_time += cp.burst_time
# set burst time 0 because the process is finished
SCREAMING_SNAKE_CASE_ = 0
# set the finish time
SCREAMING_SNAKE_CASE_ = self.current_time
# update the process' turnaround time because it is finished
SCREAMING_SNAKE_CASE_ = self.current_time - cp.arrival_time
# add the process to queue that has finished queue
finished.append(__magic_name__ )
self.finish_queue.extend(__magic_name__ ) # add finished process to finish queue
# return finished processes queue and remaining processes queue
return finished, ready_queue
def __A ( self : Any ) -> deque[Process]:
# all queues except last one have round_robin algorithm
for i in range(self.number_of_queues - 1 ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.round_robin(
self.ready_queue , self.time_slices[i] )
# the last queue has first_come_first_served algorithm
self.first_come_first_served(self.ready_queue )
return self.finish_queue
if __name__ == "__main__":
import doctest
A : Dict = Process("P1", 0, 53)
A : str = Process("P2", 0, 17)
A : List[Any] = Process("P3", 0, 68)
A : List[str] = Process("P4", 0, 24)
A : Dict = 3
A : Any = [17, 25]
A : Dict = deque([Pa, Pa, Pa, Pa])
if len(time_slices) != number_of_queues - 1:
raise SystemExit(0)
doctest.testmod(extraglobs={"queue": deque([Pa, Pa, Pa, Pa])})
A : Union[str, Any] = Process("P1", 0, 53)
A : Any = Process("P2", 0, 17)
A : Dict = Process("P3", 0, 68)
A : List[str] = Process("P4", 0, 24)
A : Optional[int] = 3
A : int = [17, 25]
A : Union[str, Any] = deque([Pa, Pa, Pa, Pa])
A : Tuple = MLFQ(number_of_queues, time_slices, queue, 0)
A : Tuple = mlfq.multi_level_feedback_queue()
# print total waiting times of processes(P1, P2, P3, P4)
print(
f"waiting time:\
\t\t\t{MLFQ.calculate_waiting_time(mlfq, [Pa, Pa, Pa, Pa])}"
)
# print completion times of processes(P1, P2, P3, P4)
print(
f"completion time:\
\t\t{MLFQ.calculate_completion_time(mlfq, [Pa, Pa, Pa, Pa])}"
)
# print total turnaround times of processes(P1, P2, P3, P4)
print(
f"turnaround time:\
\t\t{MLFQ.calculate_turnaround_time(mlfq, [Pa, Pa, Pa, Pa])}"
)
# print sequence of finished processes
print(
f"sequence of finished processes:\
{mlfq.calculate_sequence_of_finish_queue()}"
)
| 305
| 1
|
from __future__ import annotations
import numpy as np
def a__ ( __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = np.shape(__UpperCamelCase )
if rows != columns:
SCREAMING_SNAKE_CASE_ = (
"'table' has to be of square shaped array but got a "
F'''{rows}x{columns} array:\n{table}'''
)
raise ValueError(__UpperCamelCase )
SCREAMING_SNAKE_CASE_ = np.zeros((rows, columns) )
SCREAMING_SNAKE_CASE_ = np.zeros((rows, columns) )
for i in range(__UpperCamelCase ):
for j in range(__UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = sum(lower[i][k] * upper[k][j] for k in range(__UpperCamelCase ) )
if upper[j][j] == 0:
raise ArithmeticError("No LU decomposition exists" )
SCREAMING_SNAKE_CASE_ = (table[i][j] - total) / upper[j][j]
SCREAMING_SNAKE_CASE_ = 1
for j in range(__UpperCamelCase , __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = sum(lower[i][k] * upper[k][j] for k in range(__UpperCamelCase ) )
SCREAMING_SNAKE_CASE_ = table[i][j] - total
return lower, upper
if __name__ == "__main__":
import doctest
doctest.testmod()
| 305
|
import torch
def a__ ( ):
if torch.cuda.is_available():
SCREAMING_SNAKE_CASE_ = torch.cuda.device_count()
else:
SCREAMING_SNAKE_CASE_ = 0
print(F'''Successfully ran on {num_gpus} GPUs''' )
if __name__ == "__main__":
main()
| 305
| 1
|
from collections.abc import Generator
from math import sin
def a__ ( __UpperCamelCase ):
if len(__UpperCamelCase ) != 3_2:
raise ValueError("Input must be of length 32" )
SCREAMING_SNAKE_CASE_ = b""
for i in [3, 2, 1, 0]:
little_endian += string_aa[8 * i : 8 * i + 8]
return little_endian
def a__ ( __UpperCamelCase ):
if i < 0:
raise ValueError("Input must be non-negative" )
SCREAMING_SNAKE_CASE_ = format(__UpperCamelCase , "08x" )[-8:]
SCREAMING_SNAKE_CASE_ = b""
for i in [3, 2, 1, 0]:
little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode("utf-8" )
return little_endian_hex
def a__ ( __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = b""
for char in message:
bit_string += format(__UpperCamelCase , "08b" ).encode("utf-8" )
SCREAMING_SNAKE_CASE_ = format(len(__UpperCamelCase ) , "064b" ).encode("utf-8" )
# Pad bit_string to a multiple of 512 chars
bit_string += b"1"
while len(__UpperCamelCase ) % 5_1_2 != 4_4_8:
bit_string += b"0"
bit_string += to_little_endian(start_len[3_2:] ) + to_little_endian(start_len[:3_2] )
return bit_string
def a__ ( __UpperCamelCase ):
if len(__UpperCamelCase ) % 5_1_2 != 0:
raise ValueError("Input must have length that's a multiple of 512" )
for pos in range(0 , len(__UpperCamelCase ) , 5_1_2 ):
SCREAMING_SNAKE_CASE_ = bit_string[pos : pos + 5_1_2]
SCREAMING_SNAKE_CASE_ = []
for i in range(0 , 5_1_2 , 3_2 ):
block_words.append(int(to_little_endian(block[i : i + 3_2] ) , 2 ) )
yield block_words
def a__ ( __UpperCamelCase ):
if i < 0:
raise ValueError("Input must be non-negative" )
SCREAMING_SNAKE_CASE_ = format(__UpperCamelCase , "032b" )
SCREAMING_SNAKE_CASE_ = ""
for c in i_str:
new_str += "1" if c == "0" else "0"
return int(__UpperCamelCase , 2 )
def a__ ( __UpperCamelCase , __UpperCamelCase ):
return (a + b) % 2**3_2
def a__ ( __UpperCamelCase , __UpperCamelCase ):
if i < 0:
raise ValueError("Input must be non-negative" )
if shift < 0:
raise ValueError("Shift must be non-negative" )
return ((i << shift) ^ (i >> (3_2 - shift))) % 2**3_2
def a__ ( __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = preprocess(__UpperCamelCase )
SCREAMING_SNAKE_CASE_ = [int(2**3_2 * abs(sin(i + 1 ) ) ) for i in range(6_4 )]
# Starting states
SCREAMING_SNAKE_CASE_ = 0X67452301
SCREAMING_SNAKE_CASE_ = 0Xefcdab89
SCREAMING_SNAKE_CASE_ = 0X98badcfe
SCREAMING_SNAKE_CASE_ = 0X10325476
SCREAMING_SNAKE_CASE_ = [
7,
1_2,
1_7,
2_2,
7,
1_2,
1_7,
2_2,
7,
1_2,
1_7,
2_2,
7,
1_2,
1_7,
2_2,
5,
9,
1_4,
2_0,
5,
9,
1_4,
2_0,
5,
9,
1_4,
2_0,
5,
9,
1_4,
2_0,
4,
1_1,
1_6,
2_3,
4,
1_1,
1_6,
2_3,
4,
1_1,
1_6,
2_3,
4,
1_1,
1_6,
2_3,
6,
1_0,
1_5,
2_1,
6,
1_0,
1_5,
2_1,
6,
1_0,
1_5,
2_1,
6,
1_0,
1_5,
2_1,
]
# Process bit string in chunks, each with 16 32-char words
for block_words in get_block_words(__UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = aa
SCREAMING_SNAKE_CASE_ = ba
SCREAMING_SNAKE_CASE_ = ca
SCREAMING_SNAKE_CASE_ = da
# Hash current chunk
for i in range(6_4 ):
if i <= 1_5:
# f = (b & c) | (not_32(b) & d) # Alternate definition for f
SCREAMING_SNAKE_CASE_ = d ^ (b & (c ^ d))
SCREAMING_SNAKE_CASE_ = i
elif i <= 3_1:
# f = (d & b) | (not_32(d) & c) # Alternate definition for f
SCREAMING_SNAKE_CASE_ = c ^ (d & (b ^ c))
SCREAMING_SNAKE_CASE_ = (5 * i + 1) % 1_6
elif i <= 4_7:
SCREAMING_SNAKE_CASE_ = b ^ c ^ d
SCREAMING_SNAKE_CASE_ = (3 * i + 5) % 1_6
else:
SCREAMING_SNAKE_CASE_ = c ^ (b | not_aa(__UpperCamelCase ))
SCREAMING_SNAKE_CASE_ = (7 * i) % 1_6
SCREAMING_SNAKE_CASE_ = (f + a + added_consts[i] + block_words[g]) % 2**3_2
SCREAMING_SNAKE_CASE_ = d
SCREAMING_SNAKE_CASE_ = c
SCREAMING_SNAKE_CASE_ = b
SCREAMING_SNAKE_CASE_ = sum_aa(__UpperCamelCase , left_rotate_aa(__UpperCamelCase , shift_amounts[i] ) )
# Add hashed chunk to running total
SCREAMING_SNAKE_CASE_ = sum_aa(__UpperCamelCase , __UpperCamelCase )
SCREAMING_SNAKE_CASE_ = sum_aa(__UpperCamelCase , __UpperCamelCase )
SCREAMING_SNAKE_CASE_ = sum_aa(__UpperCamelCase , __UpperCamelCase )
SCREAMING_SNAKE_CASE_ = sum_aa(__UpperCamelCase , __UpperCamelCase )
SCREAMING_SNAKE_CASE_ = reformat_hex(__UpperCamelCase ) + reformat_hex(__UpperCamelCase ) + reformat_hex(__UpperCamelCase ) + reformat_hex(__UpperCamelCase )
return digest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 305
|
from collections.abc import Generator
from math import sin
def a__ ( __UpperCamelCase ):
if len(__UpperCamelCase ) != 3_2:
raise ValueError("Input must be of length 32" )
SCREAMING_SNAKE_CASE_ = b""
for i in [3, 2, 1, 0]:
little_endian += string_aa[8 * i : 8 * i + 8]
return little_endian
def a__ ( __UpperCamelCase ):
if i < 0:
raise ValueError("Input must be non-negative" )
SCREAMING_SNAKE_CASE_ = format(__UpperCamelCase , "08x" )[-8:]
SCREAMING_SNAKE_CASE_ = b""
for i in [3, 2, 1, 0]:
little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode("utf-8" )
return little_endian_hex
def a__ ( __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = b""
for char in message:
bit_string += format(__UpperCamelCase , "08b" ).encode("utf-8" )
SCREAMING_SNAKE_CASE_ = format(len(__UpperCamelCase ) , "064b" ).encode("utf-8" )
# Pad bit_string to a multiple of 512 chars
bit_string += b"1"
while len(__UpperCamelCase ) % 5_1_2 != 4_4_8:
bit_string += b"0"
bit_string += to_little_endian(start_len[3_2:] ) + to_little_endian(start_len[:3_2] )
return bit_string
def a__ ( __UpperCamelCase ):
if len(__UpperCamelCase ) % 5_1_2 != 0:
raise ValueError("Input must have length that's a multiple of 512" )
for pos in range(0 , len(__UpperCamelCase ) , 5_1_2 ):
SCREAMING_SNAKE_CASE_ = bit_string[pos : pos + 5_1_2]
SCREAMING_SNAKE_CASE_ = []
for i in range(0 , 5_1_2 , 3_2 ):
block_words.append(int(to_little_endian(block[i : i + 3_2] ) , 2 ) )
yield block_words
def a__ ( __UpperCamelCase ):
if i < 0:
raise ValueError("Input must be non-negative" )
SCREAMING_SNAKE_CASE_ = format(__UpperCamelCase , "032b" )
SCREAMING_SNAKE_CASE_ = ""
for c in i_str:
new_str += "1" if c == "0" else "0"
return int(__UpperCamelCase , 2 )
def a__ ( __UpperCamelCase , __UpperCamelCase ):
return (a + b) % 2**3_2
def a__ ( __UpperCamelCase , __UpperCamelCase ):
if i < 0:
raise ValueError("Input must be non-negative" )
if shift < 0:
raise ValueError("Shift must be non-negative" )
return ((i << shift) ^ (i >> (3_2 - shift))) % 2**3_2
def a__ ( __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = preprocess(__UpperCamelCase )
SCREAMING_SNAKE_CASE_ = [int(2**3_2 * abs(sin(i + 1 ) ) ) for i in range(6_4 )]
# Starting states
SCREAMING_SNAKE_CASE_ = 0X67452301
SCREAMING_SNAKE_CASE_ = 0Xefcdab89
SCREAMING_SNAKE_CASE_ = 0X98badcfe
SCREAMING_SNAKE_CASE_ = 0X10325476
SCREAMING_SNAKE_CASE_ = [
7,
1_2,
1_7,
2_2,
7,
1_2,
1_7,
2_2,
7,
1_2,
1_7,
2_2,
7,
1_2,
1_7,
2_2,
5,
9,
1_4,
2_0,
5,
9,
1_4,
2_0,
5,
9,
1_4,
2_0,
5,
9,
1_4,
2_0,
4,
1_1,
1_6,
2_3,
4,
1_1,
1_6,
2_3,
4,
1_1,
1_6,
2_3,
4,
1_1,
1_6,
2_3,
6,
1_0,
1_5,
2_1,
6,
1_0,
1_5,
2_1,
6,
1_0,
1_5,
2_1,
6,
1_0,
1_5,
2_1,
]
# Process bit string in chunks, each with 16 32-char words
for block_words in get_block_words(__UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = aa
SCREAMING_SNAKE_CASE_ = ba
SCREAMING_SNAKE_CASE_ = ca
SCREAMING_SNAKE_CASE_ = da
# Hash current chunk
for i in range(6_4 ):
if i <= 1_5:
# f = (b & c) | (not_32(b) & d) # Alternate definition for f
SCREAMING_SNAKE_CASE_ = d ^ (b & (c ^ d))
SCREAMING_SNAKE_CASE_ = i
elif i <= 3_1:
# f = (d & b) | (not_32(d) & c) # Alternate definition for f
SCREAMING_SNAKE_CASE_ = c ^ (d & (b ^ c))
SCREAMING_SNAKE_CASE_ = (5 * i + 1) % 1_6
elif i <= 4_7:
SCREAMING_SNAKE_CASE_ = b ^ c ^ d
SCREAMING_SNAKE_CASE_ = (3 * i + 5) % 1_6
else:
SCREAMING_SNAKE_CASE_ = c ^ (b | not_aa(__UpperCamelCase ))
SCREAMING_SNAKE_CASE_ = (7 * i) % 1_6
SCREAMING_SNAKE_CASE_ = (f + a + added_consts[i] + block_words[g]) % 2**3_2
SCREAMING_SNAKE_CASE_ = d
SCREAMING_SNAKE_CASE_ = c
SCREAMING_SNAKE_CASE_ = b
SCREAMING_SNAKE_CASE_ = sum_aa(__UpperCamelCase , left_rotate_aa(__UpperCamelCase , shift_amounts[i] ) )
# Add hashed chunk to running total
SCREAMING_SNAKE_CASE_ = sum_aa(__UpperCamelCase , __UpperCamelCase )
SCREAMING_SNAKE_CASE_ = sum_aa(__UpperCamelCase , __UpperCamelCase )
SCREAMING_SNAKE_CASE_ = sum_aa(__UpperCamelCase , __UpperCamelCase )
SCREAMING_SNAKE_CASE_ = sum_aa(__UpperCamelCase , __UpperCamelCase )
SCREAMING_SNAKE_CASE_ = reformat_hex(__UpperCamelCase ) + reformat_hex(__UpperCamelCase ) + reformat_hex(__UpperCamelCase ) + reformat_hex(__UpperCamelCase )
return digest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 305
| 1
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A : Tuple = logging.get_logger(__name__)
A : List[str] = {
"google/mobilenet_v1_1.0_224": "https://huggingface.co/google/mobilenet_v1_1.0_224/resolve/main/config.json",
"google/mobilenet_v1_0.75_192": "https://huggingface.co/google/mobilenet_v1_0.75_192/resolve/main/config.json",
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
}
class lowerCamelCase (SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
lowerCamelCase__ = '''mobilenet_v1'''
def __init__( self : List[Any] , __magic_name__ : Optional[int]=3 , __magic_name__ : List[str]=224 , __magic_name__ : Optional[Any]=1.0 , __magic_name__ : List[str]=8 , __magic_name__ : Any="relu6" , __magic_name__ : List[Any]=True , __magic_name__ : Any=0.999 , __magic_name__ : Any=0.02 , __magic_name__ : Dict=0.001 , **__magic_name__ : List[Any] , ) -> Optional[Any]:
super().__init__(**__magic_name__ )
if depth_multiplier <= 0:
raise ValueError("depth_multiplier must be greater than zero." )
SCREAMING_SNAKE_CASE_ = num_channels
SCREAMING_SNAKE_CASE_ = image_size
SCREAMING_SNAKE_CASE_ = depth_multiplier
SCREAMING_SNAKE_CASE_ = min_depth
SCREAMING_SNAKE_CASE_ = hidden_act
SCREAMING_SNAKE_CASE_ = tf_padding
SCREAMING_SNAKE_CASE_ = classifier_dropout_prob
SCREAMING_SNAKE_CASE_ = initializer_range
SCREAMING_SNAKE_CASE_ = layer_norm_eps
class lowerCamelCase (SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
lowerCamelCase__ = version.parse('''1.11''' )
@property
def __A ( self : Any ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict([("pixel_values", {0: "batch"})] )
@property
def __A ( self : str ) -> Mapping[str, Mapping[int, str]]:
if self.task == "image-classification":
return OrderedDict([("logits", {0: "batch"})] )
else:
return OrderedDict([("last_hidden_state", {0: "batch"}), ("pooler_output", {0: "batch"})] )
@property
def __A ( self : Optional[Any] ) -> float:
return 1e-4
| 305
|
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BlipaProcessor, BlipImageProcessor, GPTaTokenizer, PreTrainedTokenizerFast
@require_vision
class lowerCamelCase (unittest.TestCase ):
"""simple docstring"""
def __A ( self : int ) -> Any:
SCREAMING_SNAKE_CASE_ = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE_ = BlipImageProcessor()
SCREAMING_SNAKE_CASE_ = GPTaTokenizer.from_pretrained("hf-internal-testing/tiny-random-GPT2Model" )
SCREAMING_SNAKE_CASE_ = BlipaProcessor(__magic_name__ , __magic_name__ )
processor.save_pretrained(self.tmpdirname )
def __A ( self : str , **__magic_name__ : int ) -> Union[str, Any]:
return AutoProcessor.from_pretrained(self.tmpdirname , **__magic_name__ ).tokenizer
def __A ( self : Dict , **__magic_name__ : List[Any] ) -> int:
return AutoProcessor.from_pretrained(self.tmpdirname , **__magic_name__ ).image_processor
def __A ( self : int ) -> Any:
shutil.rmtree(self.tmpdirname )
def __A ( self : Dict ) -> Dict:
SCREAMING_SNAKE_CASE_ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
SCREAMING_SNAKE_CASE_ = [Image.fromarray(np.moveaxis(__magic_name__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __A ( self : List[Any] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = BlipaProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE_ = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
SCREAMING_SNAKE_CASE_ = self.get_image_processor(do_normalize=__magic_name__ , padding_value=1.0 )
SCREAMING_SNAKE_CASE_ = BlipaProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=__magic_name__ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __magic_name__ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __magic_name__ )
def __A ( self : Tuple ) -> int:
SCREAMING_SNAKE_CASE_ = self.get_image_processor()
SCREAMING_SNAKE_CASE_ = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ = BlipaProcessor(tokenizer=__magic_name__ , image_processor=__magic_name__ )
SCREAMING_SNAKE_CASE_ = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE_ = image_processor(__magic_name__ , return_tensors="np" )
SCREAMING_SNAKE_CASE_ = processor(images=__magic_name__ , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def __A ( self : str ) -> Tuple:
SCREAMING_SNAKE_CASE_ = self.get_image_processor()
SCREAMING_SNAKE_CASE_ = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ = BlipaProcessor(tokenizer=__magic_name__ , image_processor=__magic_name__ )
SCREAMING_SNAKE_CASE_ = "lower newer"
SCREAMING_SNAKE_CASE_ = processor(text=__magic_name__ )
SCREAMING_SNAKE_CASE_ = tokenizer(__magic_name__ , return_token_type_ids=__magic_name__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __A ( self : Dict ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = self.get_image_processor()
SCREAMING_SNAKE_CASE_ = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ = BlipaProcessor(tokenizer=__magic_name__ , image_processor=__magic_name__ )
SCREAMING_SNAKE_CASE_ = "lower newer"
SCREAMING_SNAKE_CASE_ = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE_ = processor(text=__magic_name__ , images=__magic_name__ )
self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "input_ids", "attention_mask"] )
# test if it raises when no input is passed
with pytest.raises(__magic_name__ ):
processor()
def __A ( self : Dict ) -> Tuple:
SCREAMING_SNAKE_CASE_ = self.get_image_processor()
SCREAMING_SNAKE_CASE_ = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ = BlipaProcessor(tokenizer=__magic_name__ , image_processor=__magic_name__ )
SCREAMING_SNAKE_CASE_ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
SCREAMING_SNAKE_CASE_ = processor.batch_decode(__magic_name__ )
SCREAMING_SNAKE_CASE_ = tokenizer.batch_decode(__magic_name__ )
self.assertListEqual(__magic_name__ , __magic_name__ )
def __A ( self : List[str] ) -> int:
SCREAMING_SNAKE_CASE_ = self.get_image_processor()
SCREAMING_SNAKE_CASE_ = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ = BlipaProcessor(tokenizer=__magic_name__ , image_processor=__magic_name__ )
SCREAMING_SNAKE_CASE_ = "lower newer"
SCREAMING_SNAKE_CASE_ = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE_ = processor(text=__magic_name__ , images=__magic_name__ )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "input_ids", "attention_mask"] )
| 305
| 1
|
A : Dict = {0: [2, 3], 1: [0], 2: [1], 3: [4], 4: []}
A : List[Any] = {0: [1, 2, 3], 1: [2], 2: [0], 3: [4], 4: [5], 5: [3]}
def a__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = []
for neighbour in graph[vert]:
if not visited[neighbour]:
order += topology_sort(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
order.append(__UpperCamelCase )
return order
def a__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = [vert]
for neighbour in reversed_graph[vert]:
if not visited[neighbour]:
component += find_components(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
return component
def a__ ( __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = len(__UpperCamelCase ) * [False]
SCREAMING_SNAKE_CASE_ = {vert: [] for vert in range(len(__UpperCamelCase ) )}
for vert, neighbours in graph.items():
for neighbour in neighbours:
reversed_graph[neighbour].append(__UpperCamelCase )
SCREAMING_SNAKE_CASE_ = []
for i, was_visited in enumerate(__UpperCamelCase ):
if not was_visited:
order += topology_sort(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = len(__UpperCamelCase ) * [False]
for i in range(len(__UpperCamelCase ) ):
SCREAMING_SNAKE_CASE_ = order[len(__UpperCamelCase ) - i - 1]
if not visited[vert]:
SCREAMING_SNAKE_CASE_ = find_components(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
components_list.append(__UpperCamelCase )
return components_list
| 305
|
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available
from ...utils import OptionalDependencyNotAvailable
A : List[Any] = {"configuration_dpt": ["DPT_PRETRAINED_CONFIG_ARCHIVE_MAP", "DPTConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Optional[Any] = ["DPTFeatureExtractor"]
A : str = ["DPTImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Optional[Any] = [
"DPT_PRETRAINED_MODEL_ARCHIVE_LIST",
"DPTForDepthEstimation",
"DPTForSemanticSegmentation",
"DPTModel",
"DPTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_dpt import DPTFeatureExtractor
from .image_processing_dpt import DPTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dpt import (
DPT_PRETRAINED_MODEL_ARCHIVE_LIST,
DPTForDepthEstimation,
DPTForSemanticSegmentation,
DPTModel,
DPTPreTrainedModel,
)
else:
import sys
A : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 305
| 1
|
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AlignProcessor, EfficientNetImageProcessor
@require_vision
class lowerCamelCase (unittest.TestCase ):
"""simple docstring"""
def __A ( self : List[str] ) -> str:
SCREAMING_SNAKE_CASE_ = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE_ = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
SCREAMING_SNAKE_CASE_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
SCREAMING_SNAKE_CASE_ = {
"do_resize": True,
"size": 20,
"do_center_crop": True,
"crop_size": 18,
"do_normalize": True,
"image_mean": [0.4814_5466, 0.457_8275, 0.4082_1073],
"image_std": [0.2686_2954, 0.2613_0258, 0.2757_7711],
}
SCREAMING_SNAKE_CASE_ = os.path.join(self.tmpdirname , __magic_name__ )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(__magic_name__ , __magic_name__ )
def __A ( self : Optional[int] , **__magic_name__ : Tuple ) -> Dict:
return BertTokenizer.from_pretrained(self.tmpdirname , **__magic_name__ )
def __A ( self : Union[str, Any] , **__magic_name__ : Dict ) -> str:
return BertTokenizerFast.from_pretrained(self.tmpdirname , **__magic_name__ )
def __A ( self : Any , **__magic_name__ : Tuple ) -> Dict:
return EfficientNetImageProcessor.from_pretrained(self.tmpdirname , **__magic_name__ )
def __A ( self : Any ) -> int:
shutil.rmtree(self.tmpdirname )
def __A ( self : str ) -> Dict:
SCREAMING_SNAKE_CASE_ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
SCREAMING_SNAKE_CASE_ = [Image.fromarray(np.moveaxis(__magic_name__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __A ( self : List[Any] ) -> Tuple:
SCREAMING_SNAKE_CASE_ = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE_ = self.get_image_processor()
SCREAMING_SNAKE_CASE_ = AlignProcessor(tokenizer=__magic_name__ , image_processor=__magic_name__ )
processor_slow.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE_ = AlignProcessor.from_pretrained(self.tmpdirname , use_fast=__magic_name__ )
SCREAMING_SNAKE_CASE_ = AlignProcessor(tokenizer=__magic_name__ , image_processor=__magic_name__ )
processor_fast.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE_ = AlignProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , __magic_name__ )
self.assertIsInstance(processor_fast.tokenizer , __magic_name__ )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , __magic_name__ )
self.assertIsInstance(processor_fast.image_processor , __magic_name__ )
def __A ( self : Optional[Any] ) -> int:
SCREAMING_SNAKE_CASE_ = AlignProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE_ = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
SCREAMING_SNAKE_CASE_ = self.get_image_processor(do_normalize=__magic_name__ , padding_value=1.0 )
SCREAMING_SNAKE_CASE_ = AlignProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=__magic_name__ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __magic_name__ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __magic_name__ )
def __A ( self : str ) -> Any:
SCREAMING_SNAKE_CASE_ = self.get_image_processor()
SCREAMING_SNAKE_CASE_ = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ = AlignProcessor(tokenizer=__magic_name__ , image_processor=__magic_name__ )
SCREAMING_SNAKE_CASE_ = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE_ = image_processor(__magic_name__ , return_tensors="np" )
SCREAMING_SNAKE_CASE_ = processor(images=__magic_name__ , return_tensors="np" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def __A ( self : Tuple ) -> int:
SCREAMING_SNAKE_CASE_ = self.get_image_processor()
SCREAMING_SNAKE_CASE_ = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ = AlignProcessor(tokenizer=__magic_name__ , image_processor=__magic_name__ )
SCREAMING_SNAKE_CASE_ = "lower newer"
SCREAMING_SNAKE_CASE_ = processor(text=__magic_name__ )
SCREAMING_SNAKE_CASE_ = tokenizer(__magic_name__ , padding="max_length" , max_length=64 )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __A ( self : int ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = self.get_image_processor()
SCREAMING_SNAKE_CASE_ = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ = AlignProcessor(tokenizer=__magic_name__ , image_processor=__magic_name__ )
SCREAMING_SNAKE_CASE_ = "lower newer"
SCREAMING_SNAKE_CASE_ = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE_ = processor(text=__magic_name__ , images=__magic_name__ )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "token_type_ids", "attention_mask", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(__magic_name__ ):
processor()
def __A ( self : Any ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = self.get_image_processor()
SCREAMING_SNAKE_CASE_ = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ = AlignProcessor(tokenizer=__magic_name__ , image_processor=__magic_name__ )
SCREAMING_SNAKE_CASE_ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
SCREAMING_SNAKE_CASE_ = processor.batch_decode(__magic_name__ )
SCREAMING_SNAKE_CASE_ = tokenizer.batch_decode(__magic_name__ )
self.assertListEqual(__magic_name__ , __magic_name__ )
def __A ( self : Union[str, Any] ) -> Dict:
SCREAMING_SNAKE_CASE_ = self.get_image_processor()
SCREAMING_SNAKE_CASE_ = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ = AlignProcessor(tokenizer=__magic_name__ , image_processor=__magic_name__ )
SCREAMING_SNAKE_CASE_ = "lower newer"
SCREAMING_SNAKE_CASE_ = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE_ = processor(text=__magic_name__ , images=__magic_name__ )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 305
|
from __future__ import annotations
import collections
import pprint
from pathlib import Path
def a__ ( __UpperCamelCase ):
return "".join(sorted(__UpperCamelCase ) )
def a__ ( __UpperCamelCase ):
return word_by_signature[signature(__UpperCamelCase )]
A : str = Path(__file__).parent.joinpath("words.txt").read_text(encoding="utf-8")
A : int = sorted({word.strip().lower() for word in data.splitlines()})
A : Tuple = collections.defaultdict(list)
for word in word_list:
word_by_signature[signature(word)].append(word)
if __name__ == "__main__":
A : Union[str, Any] = {word: anagram(word) for word in word_list if len(anagram(word)) > 1}
with open("anagrams.txt", "w") as file:
file.write("all_anagrams = \n ")
file.write(pprint.pformat(all_anagrams))
| 305
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_torch_available,
is_vision_available,
)
A : str = {"configuration_beit": ["BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "BeitConfig", "BeitOnnxConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Any = ["BeitFeatureExtractor"]
A : Dict = ["BeitImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Optional[Any] = [
"BEIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"BeitForImageClassification",
"BeitForMaskedImageModeling",
"BeitForSemanticSegmentation",
"BeitModel",
"BeitPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : int = [
"FlaxBeitForImageClassification",
"FlaxBeitForMaskedImageModeling",
"FlaxBeitModel",
"FlaxBeitPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_beit import BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, BeitConfig, BeitOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_beit import BeitFeatureExtractor
from .image_processing_beit import BeitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_beit import (
BEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
BeitPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_beit import (
FlaxBeitForImageClassification,
FlaxBeitForMaskedImageModeling,
FlaxBeitModel,
FlaxBeitPreTrainedModel,
)
else:
import sys
A : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 305
|
import copy
import os
from typing import TYPE_CHECKING, List, Union
if TYPE_CHECKING:
pass
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A : int = logging.get_logger(__name__)
A : str = {
"kakaobrain/align-base": "https://huggingface.co/kakaobrain/align-base/resolve/main/config.json",
}
class lowerCamelCase (SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
lowerCamelCase__ = '''align_text_model'''
def __init__( self : Optional[Any] , __magic_name__ : Union[str, Any]=30_522 , __magic_name__ : Tuple=768 , __magic_name__ : List[str]=12 , __magic_name__ : Optional[Any]=12 , __magic_name__ : str=3_072 , __magic_name__ : Dict="gelu" , __magic_name__ : Any=0.1 , __magic_name__ : Optional[int]=0.1 , __magic_name__ : List[str]=512 , __magic_name__ : Any=2 , __magic_name__ : Optional[Any]=0.02 , __magic_name__ : int=1e-12 , __magic_name__ : str=0 , __magic_name__ : Optional[Any]="absolute" , __magic_name__ : Optional[Any]=True , **__magic_name__ : Tuple , ) -> Union[str, Any]:
super().__init__(**__magic_name__ )
SCREAMING_SNAKE_CASE_ = vocab_size
SCREAMING_SNAKE_CASE_ = hidden_size
SCREAMING_SNAKE_CASE_ = num_hidden_layers
SCREAMING_SNAKE_CASE_ = num_attention_heads
SCREAMING_SNAKE_CASE_ = hidden_act
SCREAMING_SNAKE_CASE_ = intermediate_size
SCREAMING_SNAKE_CASE_ = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ = max_position_embeddings
SCREAMING_SNAKE_CASE_ = type_vocab_size
SCREAMING_SNAKE_CASE_ = initializer_range
SCREAMING_SNAKE_CASE_ = layer_norm_eps
SCREAMING_SNAKE_CASE_ = position_embedding_type
SCREAMING_SNAKE_CASE_ = use_cache
SCREAMING_SNAKE_CASE_ = pad_token_id
@classmethod
def __A ( cls : Any , __magic_name__ : Union[str, os.PathLike] , **__magic_name__ : Optional[Any] ) -> "PretrainedConfig":
cls._set_token_in_kwargs(__magic_name__ )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = cls.get_config_dict(__magic_name__ , **__magic_name__ )
# get the text config dict if we are loading from AlignConfig
if config_dict.get("model_type" ) == "align":
SCREAMING_SNAKE_CASE_ = config_dict["text_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__magic_name__ , **__magic_name__ )
class lowerCamelCase (SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
lowerCamelCase__ = '''align_vision_model'''
def __init__( self : List[str] , __magic_name__ : int = 3 , __magic_name__ : int = 600 , __magic_name__ : float = 2.0 , __magic_name__ : float = 3.1 , __magic_name__ : int = 8 , __magic_name__ : List[int] = [3, 3, 5, 3, 5, 5, 3] , __magic_name__ : List[int] = [32, 16, 24, 40, 80, 112, 192] , __magic_name__ : List[int] = [16, 24, 40, 80, 112, 192, 320] , __magic_name__ : List[int] = [] , __magic_name__ : List[int] = [1, 2, 2, 2, 1, 2, 1] , __magic_name__ : List[int] = [1, 2, 2, 3, 3, 4, 1] , __magic_name__ : List[int] = [1, 6, 6, 6, 6, 6, 6] , __magic_name__ : float = 0.25 , __magic_name__ : str = "swish" , __magic_name__ : int = 2_560 , __magic_name__ : str = "mean" , __magic_name__ : float = 0.02 , __magic_name__ : float = 0.001 , __magic_name__ : float = 0.99 , __magic_name__ : float = 0.2 , **__magic_name__ : List[Any] , ) -> Tuple:
super().__init__(**__magic_name__ )
SCREAMING_SNAKE_CASE_ = num_channels
SCREAMING_SNAKE_CASE_ = image_size
SCREAMING_SNAKE_CASE_ = width_coefficient
SCREAMING_SNAKE_CASE_ = depth_coefficient
SCREAMING_SNAKE_CASE_ = depth_divisor
SCREAMING_SNAKE_CASE_ = kernel_sizes
SCREAMING_SNAKE_CASE_ = in_channels
SCREAMING_SNAKE_CASE_ = out_channels
SCREAMING_SNAKE_CASE_ = depthwise_padding
SCREAMING_SNAKE_CASE_ = strides
SCREAMING_SNAKE_CASE_ = num_block_repeats
SCREAMING_SNAKE_CASE_ = expand_ratios
SCREAMING_SNAKE_CASE_ = squeeze_expansion_ratio
SCREAMING_SNAKE_CASE_ = hidden_act
SCREAMING_SNAKE_CASE_ = hidden_dim
SCREAMING_SNAKE_CASE_ = pooling_type
SCREAMING_SNAKE_CASE_ = initializer_range
SCREAMING_SNAKE_CASE_ = batch_norm_eps
SCREAMING_SNAKE_CASE_ = batch_norm_momentum
SCREAMING_SNAKE_CASE_ = drop_connect_rate
SCREAMING_SNAKE_CASE_ = sum(__magic_name__ ) * 4
@classmethod
def __A ( cls : List[str] , __magic_name__ : Union[str, os.PathLike] , **__magic_name__ : Dict ) -> "PretrainedConfig":
cls._set_token_in_kwargs(__magic_name__ )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = cls.get_config_dict(__magic_name__ , **__magic_name__ )
# get the vision config dict if we are loading from AlignConfig
if config_dict.get("model_type" ) == "align":
SCREAMING_SNAKE_CASE_ = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__magic_name__ , **__magic_name__ )
class lowerCamelCase (SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
lowerCamelCase__ = '''align'''
lowerCamelCase__ = True
def __init__( self : Optional[Any] , __magic_name__ : Dict=None , __magic_name__ : List[Any]=None , __magic_name__ : str=640 , __magic_name__ : Any=1.0 , __magic_name__ : Dict=0.02 , **__magic_name__ : Union[str, Any] , ) -> int:
super().__init__(**__magic_name__ )
if text_config is None:
SCREAMING_SNAKE_CASE_ = {}
logger.info("text_config is None. Initializing the AlignTextConfig with default values." )
if vision_config is None:
SCREAMING_SNAKE_CASE_ = {}
logger.info("vision_config is None. Initializing the AlignVisionConfig with default values." )
SCREAMING_SNAKE_CASE_ = AlignTextConfig(**__magic_name__ )
SCREAMING_SNAKE_CASE_ = AlignVisionConfig(**__magic_name__ )
SCREAMING_SNAKE_CASE_ = projection_dim
SCREAMING_SNAKE_CASE_ = temperature_init_value
SCREAMING_SNAKE_CASE_ = initializer_range
@classmethod
def __A ( cls : List[str] , __magic_name__ : AlignTextConfig , __magic_name__ : AlignVisionConfig , **__magic_name__ : Tuple ) -> Any:
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **__magic_name__ )
def __A ( self : int ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = copy.deepcopy(self.__dict__ )
SCREAMING_SNAKE_CASE_ = self.text_config.to_dict()
SCREAMING_SNAKE_CASE_ = self.vision_config.to_dict()
SCREAMING_SNAKE_CASE_ = self.__class__.model_type
return output
| 305
| 1
|
import math
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A : List[str] = logging.get_logger(__name__)
A : Union[str, Any] = {
"facebook/data2vec-base-960h": "https://huggingface.co/facebook/data2vec-audio-base-960h/resolve/main/config.json",
# See all Data2VecAudio models at https://huggingface.co/models?filter=data2vec-audio
}
class lowerCamelCase (SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
lowerCamelCase__ = '''data2vec-audio'''
def __init__( self : Any , __magic_name__ : List[str]=32 , __magic_name__ : Tuple=768 , __magic_name__ : str=12 , __magic_name__ : Tuple=12 , __magic_name__ : Tuple=3_072 , __magic_name__ : str="gelu" , __magic_name__ : Any=0.1 , __magic_name__ : str=0.1 , __magic_name__ : List[Any]=0.1 , __magic_name__ : Union[str, Any]=0.0 , __magic_name__ : List[str]=0.1 , __magic_name__ : Union[str, Any]=0.1 , __magic_name__ : List[Any]=0.02 , __magic_name__ : List[str]=1e-5 , __magic_name__ : Union[str, Any]="gelu" , __magic_name__ : int=(512, 512, 512, 512, 512, 512, 512) , __magic_name__ : Optional[int]=(5, 2, 2, 2, 2, 2, 2) , __magic_name__ : List[Any]=(10, 3, 3, 3, 3, 2, 2) , __magic_name__ : Tuple=False , __magic_name__ : Optional[int]=16 , __magic_name__ : List[Any]=19 , __magic_name__ : Optional[Any]=5 , __magic_name__ : str=0.05 , __magic_name__ : str=10 , __magic_name__ : Dict=2 , __magic_name__ : int=0.0 , __magic_name__ : Optional[int]=10 , __magic_name__ : List[str]=0 , __magic_name__ : List[Any]="sum" , __magic_name__ : Union[str, Any]=False , __magic_name__ : Optional[int]=False , __magic_name__ : str=256 , __magic_name__ : Dict=(512, 512, 512, 512, 1_500) , __magic_name__ : List[Any]=(5, 3, 3, 1, 1) , __magic_name__ : Optional[Any]=(1, 2, 3, 1, 1) , __magic_name__ : int=512 , __magic_name__ : Tuple=0 , __magic_name__ : Optional[int]=1 , __magic_name__ : Optional[int]=2 , __magic_name__ : Dict=False , __magic_name__ : Tuple=3 , __magic_name__ : Tuple=2 , __magic_name__ : str=3 , __magic_name__ : Any=None , **__magic_name__ : Union[str, Any] , ) -> Union[str, Any]:
super().__init__(**__magic_name__ , pad_token_id=__magic_name__ , bos_token_id=__magic_name__ , eos_token_id=__magic_name__ )
SCREAMING_SNAKE_CASE_ = hidden_size
SCREAMING_SNAKE_CASE_ = feat_extract_activation
SCREAMING_SNAKE_CASE_ = list(__magic_name__ )
SCREAMING_SNAKE_CASE_ = list(__magic_name__ )
SCREAMING_SNAKE_CASE_ = list(__magic_name__ )
SCREAMING_SNAKE_CASE_ = conv_bias
SCREAMING_SNAKE_CASE_ = num_conv_pos_embeddings
SCREAMING_SNAKE_CASE_ = num_conv_pos_embedding_groups
SCREAMING_SNAKE_CASE_ = conv_pos_kernel_size
SCREAMING_SNAKE_CASE_ = len(self.conv_dim )
SCREAMING_SNAKE_CASE_ = num_hidden_layers
SCREAMING_SNAKE_CASE_ = intermediate_size
SCREAMING_SNAKE_CASE_ = hidden_act
SCREAMING_SNAKE_CASE_ = num_attention_heads
SCREAMING_SNAKE_CASE_ = hidden_dropout
SCREAMING_SNAKE_CASE_ = attention_dropout
SCREAMING_SNAKE_CASE_ = activation_dropout
SCREAMING_SNAKE_CASE_ = feat_proj_dropout
SCREAMING_SNAKE_CASE_ = final_dropout
SCREAMING_SNAKE_CASE_ = layerdrop
SCREAMING_SNAKE_CASE_ = layer_norm_eps
SCREAMING_SNAKE_CASE_ = initializer_range
SCREAMING_SNAKE_CASE_ = vocab_size
SCREAMING_SNAKE_CASE_ = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="
" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="
F''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
F''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
SCREAMING_SNAKE_CASE_ = mask_time_prob
SCREAMING_SNAKE_CASE_ = mask_time_length
SCREAMING_SNAKE_CASE_ = mask_time_min_masks
SCREAMING_SNAKE_CASE_ = mask_feature_prob
SCREAMING_SNAKE_CASE_ = mask_feature_length
SCREAMING_SNAKE_CASE_ = mask_feature_min_masks
# ctc loss
SCREAMING_SNAKE_CASE_ = ctc_loss_reduction
SCREAMING_SNAKE_CASE_ = ctc_zero_infinity
# adapter
SCREAMING_SNAKE_CASE_ = add_adapter
SCREAMING_SNAKE_CASE_ = adapter_kernel_size
SCREAMING_SNAKE_CASE_ = adapter_stride
SCREAMING_SNAKE_CASE_ = num_adapter_layers
SCREAMING_SNAKE_CASE_ = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
SCREAMING_SNAKE_CASE_ = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
SCREAMING_SNAKE_CASE_ = list(__magic_name__ )
SCREAMING_SNAKE_CASE_ = list(__magic_name__ )
SCREAMING_SNAKE_CASE_ = list(__magic_name__ )
SCREAMING_SNAKE_CASE_ = xvector_output_dim
@property
def __A ( self : Tuple ) -> Optional[int]:
return math.prod(self.conv_stride )
| 305
|
import unittest
from transformers.testing_utils import CaptureStdout
from transformers.tools.python_interpreter import evaluate
def a__ ( __UpperCamelCase ):
return x + 2
class lowerCamelCase (unittest.TestCase ):
"""simple docstring"""
def __A ( self : List[Any] ) -> int:
SCREAMING_SNAKE_CASE_ = "x = 3"
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = evaluate(__magic_name__ , {} , state=__magic_name__ )
assert result == 3
self.assertDictEqual(__magic_name__ , {"x": 3} )
SCREAMING_SNAKE_CASE_ = "x = y"
SCREAMING_SNAKE_CASE_ = {"y": 5}
SCREAMING_SNAKE_CASE_ = evaluate(__magic_name__ , {} , state=__magic_name__ )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(__magic_name__ , {"x": 5, "y": 5} )
def __A ( self : Union[str, Any] ) -> str:
SCREAMING_SNAKE_CASE_ = "y = add_two(x)"
SCREAMING_SNAKE_CASE_ = {"x": 3}
SCREAMING_SNAKE_CASE_ = evaluate(__magic_name__ , {"add_two": add_two} , state=__magic_name__ )
assert result == 5
self.assertDictEqual(__magic_name__ , {"x": 3, "y": 5} )
# Won't work without the tool
with CaptureStdout() as out:
SCREAMING_SNAKE_CASE_ = evaluate(__magic_name__ , {} , state=__magic_name__ )
assert result is None
assert "tried to execute add_two" in out.out
def __A ( self : List[str] ) -> int:
SCREAMING_SNAKE_CASE_ = "x = 3"
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = evaluate(__magic_name__ , {} , state=__magic_name__ )
assert result == 3
self.assertDictEqual(__magic_name__ , {"x": 3} )
def __A ( self : Optional[Any] ) -> str:
SCREAMING_SNAKE_CASE_ = "test_dict = {'x': x, 'y': add_two(x)}"
SCREAMING_SNAKE_CASE_ = {"x": 3}
SCREAMING_SNAKE_CASE_ = evaluate(__magic_name__ , {"add_two": add_two} , state=__magic_name__ )
self.assertDictEqual(__magic_name__ , {"x": 3, "y": 5} )
self.assertDictEqual(__magic_name__ , {"x": 3, "test_dict": {"x": 3, "y": 5}} )
def __A ( self : Optional[int] ) -> List[str]:
SCREAMING_SNAKE_CASE_ = "x = 3\ny = 5"
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = evaluate(__magic_name__ , {} , state=__magic_name__ )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(__magic_name__ , {"x": 3, "y": 5} )
def __A ( self : Any ) -> List[str]:
SCREAMING_SNAKE_CASE_ = "text = f'This is x: {x}.'"
SCREAMING_SNAKE_CASE_ = {"x": 3}
SCREAMING_SNAKE_CASE_ = evaluate(__magic_name__ , {} , state=__magic_name__ )
# evaluate returns the value of the last assignment.
assert result == "This is x: 3."
self.assertDictEqual(__magic_name__ , {"x": 3, "text": "This is x: 3."} )
def __A ( self : int ) -> Tuple:
SCREAMING_SNAKE_CASE_ = "if x <= 3:\n y = 2\nelse:\n y = 5"
SCREAMING_SNAKE_CASE_ = {"x": 3}
SCREAMING_SNAKE_CASE_ = evaluate(__magic_name__ , {} , state=__magic_name__ )
# evaluate returns the value of the last assignment.
assert result == 2
self.assertDictEqual(__magic_name__ , {"x": 3, "y": 2} )
SCREAMING_SNAKE_CASE_ = {"x": 8}
SCREAMING_SNAKE_CASE_ = evaluate(__magic_name__ , {} , state=__magic_name__ )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(__magic_name__ , {"x": 8, "y": 5} )
def __A ( self : str ) -> str:
SCREAMING_SNAKE_CASE_ = "test_list = [x, add_two(x)]"
SCREAMING_SNAKE_CASE_ = {"x": 3}
SCREAMING_SNAKE_CASE_ = evaluate(__magic_name__ , {"add_two": add_two} , state=__magic_name__ )
self.assertListEqual(__magic_name__ , [3, 5] )
self.assertDictEqual(__magic_name__ , {"x": 3, "test_list": [3, 5]} )
def __A ( self : Union[str, Any] ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = "y = x"
SCREAMING_SNAKE_CASE_ = {"x": 3}
SCREAMING_SNAKE_CASE_ = evaluate(__magic_name__ , {} , state=__magic_name__ )
assert result == 3
self.assertDictEqual(__magic_name__ , {"x": 3, "y": 3} )
def __A ( self : Tuple ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = "test_list = [x, add_two(x)]\ntest_list[1]"
SCREAMING_SNAKE_CASE_ = {"x": 3}
SCREAMING_SNAKE_CASE_ = evaluate(__magic_name__ , {"add_two": add_two} , state=__magic_name__ )
assert result == 5
self.assertDictEqual(__magic_name__ , {"x": 3, "test_list": [3, 5]} )
SCREAMING_SNAKE_CASE_ = "test_dict = {'x': x, 'y': add_two(x)}\ntest_dict['y']"
SCREAMING_SNAKE_CASE_ = {"x": 3}
SCREAMING_SNAKE_CASE_ = evaluate(__magic_name__ , {"add_two": add_two} , state=__magic_name__ )
assert result == 5
self.assertDictEqual(__magic_name__ , {"x": 3, "test_dict": {"x": 3, "y": 5}} )
def __A ( self : Tuple ) -> Any:
SCREAMING_SNAKE_CASE_ = "x = 0\nfor i in range(3):\n x = i"
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = evaluate(__magic_name__ , {"range": range} , state=__magic_name__ )
assert result == 2
self.assertDictEqual(__magic_name__ , {"x": 2, "i": 2} )
| 305
| 1
|
from ...utils import is_torch_available, is_transformers_available
if is_transformers_available() and is_torch_available():
from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
| 305
|
import numpy as np
import pandas as pd
from sklearn.preprocessing import Normalizer
from sklearn.svm import SVR
from statsmodels.tsa.statespace.sarimax import SARIMAX
def a__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = np.array([[1, item, train_mtch[i]] for i, item in enumerate(__UpperCamelCase )] )
SCREAMING_SNAKE_CASE_ = np.array(__UpperCamelCase )
SCREAMING_SNAKE_CASE_ = np.dot(np.dot(np.linalg.inv(np.dot(x.transpose() , __UpperCamelCase ) ) , x.transpose() ) , __UpperCamelCase )
return abs(beta[0] + test_dt[0] * beta[1] + test_mtch[0] + beta[2] )
def a__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = (1, 2, 1)
SCREAMING_SNAKE_CASE_ = (1, 1, 0, 7)
SCREAMING_SNAKE_CASE_ = SARIMAX(
__UpperCamelCase , exog=__UpperCamelCase , order=__UpperCamelCase , seasonal_order=__UpperCamelCase )
SCREAMING_SNAKE_CASE_ = model.fit(disp=__UpperCamelCase , maxiter=6_0_0 , method="nm" )
SCREAMING_SNAKE_CASE_ = model_fit.predict(1 , len(__UpperCamelCase ) , exog=[test_match] )
return result[0]
def a__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = SVR(kernel="rbf" , C=1 , gamma=0.1 , epsilon=0.1 )
regressor.fit(__UpperCamelCase , __UpperCamelCase )
SCREAMING_SNAKE_CASE_ = regressor.predict(__UpperCamelCase )
return y_pred[0]
def a__ ( __UpperCamelCase ):
train_user.sort()
SCREAMING_SNAKE_CASE_ = np.percentile(__UpperCamelCase , 2_5 )
SCREAMING_SNAKE_CASE_ = np.percentile(__UpperCamelCase , 7_5 )
SCREAMING_SNAKE_CASE_ = qa - qa
SCREAMING_SNAKE_CASE_ = qa - (iqr * 0.1)
return low_lim
def a__ ( __UpperCamelCase , __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = 0
for i in list_vote:
if i > actual_result:
SCREAMING_SNAKE_CASE_ = not_safe + 1
else:
if abs(abs(__UpperCamelCase ) - abs(__UpperCamelCase ) ) <= 0.1:
safe += 1
else:
not_safe += 1
return safe > not_safe
if __name__ == "__main__":
# data_input_df = pd.read_csv("ex_data.csv", header=None)
A : Dict = [[1_82_31, 0.0, 1], [2_26_21, 1.0, 2], [1_56_75, 0.0, 3], [2_35_83, 1.0, 4]]
A : Optional[Any] = pd.DataFrame(
data_input, columns=["total_user", "total_even", "days"]
)
A : Union[str, Any] = Normalizer().fit_transform(data_input_df.values)
# split data
A : Optional[int] = normalize_df[:, 2].tolist()
A : List[str] = normalize_df[:, 0].tolist()
A : int = normalize_df[:, 1].tolist()
# for svr (input variable = total date and total match)
A : int = normalize_df[:, [1, 2]].tolist()
A : Tuple = x[: len(x) - 1]
A : str = x[len(x) - 1 :]
# for linear regression & sarimax
A : Tuple = total_date[: len(total_date) - 1]
A : Optional[int] = total_user[: len(total_user) - 1]
A : str = total_match[: len(total_match) - 1]
A : List[Any] = total_date[len(total_date) - 1 :]
A : List[Any] = total_user[len(total_user) - 1 :]
A : Optional[Any] = total_match[len(total_match) - 1 :]
# voting system with forecasting
A : Optional[int] = [
linear_regression_prediction(
trn_date, trn_user, trn_match, tst_date, tst_match
),
sarimax_predictor(trn_user, trn_match, tst_match),
support_vector_regressor(x_train, x_test, trn_user),
]
# check the safety of today's data
A : str = "" if data_safety_checker(res_vote, tst_user) else "not "
print("Today's data is {not_str}safe.")
| 305
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A : Optional[Any] = logging.get_logger(__name__)
A : Union[str, Any] = {
"tiiuae/falcon-40b": "https://huggingface.co/tiiuae/falcon-40b/resolve/main/config.json",
"tiiuae/falcon-7b": "https://huggingface.co/tiiuae/falcon-7b/resolve/main/config.json",
}
class lowerCamelCase (SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
lowerCamelCase__ = '''falcon'''
lowerCamelCase__ = ['''past_key_values''']
def __init__( self : Tuple , __magic_name__ : Optional[int]=65_024 , __magic_name__ : Any=4_544 , __magic_name__ : Optional[int]=32 , __magic_name__ : Tuple=71 , __magic_name__ : str=1e-5 , __magic_name__ : Dict=0.02 , __magic_name__ : List[str]=True , __magic_name__ : Union[str, Any]=0.0 , __magic_name__ : int=0.0 , __magic_name__ : List[str]=None , __magic_name__ : str=False , __magic_name__ : Tuple=False , __magic_name__ : List[str]=True , __magic_name__ : Dict=True , __magic_name__ : Union[str, Any]=False , __magic_name__ : Optional[Any]=11 , __magic_name__ : Dict=11 , **__magic_name__ : List[Any] , ) -> Any:
SCREAMING_SNAKE_CASE_ = vocab_size
# Backward compatibility with n_embed kwarg
SCREAMING_SNAKE_CASE_ = kwargs.pop("n_embed" , __magic_name__ )
SCREAMING_SNAKE_CASE_ = hidden_size if n_embed is None else n_embed
SCREAMING_SNAKE_CASE_ = num_hidden_layers
SCREAMING_SNAKE_CASE_ = num_attention_heads
SCREAMING_SNAKE_CASE_ = layer_norm_epsilon
SCREAMING_SNAKE_CASE_ = initializer_range
SCREAMING_SNAKE_CASE_ = use_cache
SCREAMING_SNAKE_CASE_ = hidden_dropout
SCREAMING_SNAKE_CASE_ = attention_dropout
SCREAMING_SNAKE_CASE_ = bos_token_id
SCREAMING_SNAKE_CASE_ = eos_token_id
SCREAMING_SNAKE_CASE_ = num_attention_heads if num_kv_heads is None else num_kv_heads
SCREAMING_SNAKE_CASE_ = alibi
SCREAMING_SNAKE_CASE_ = new_decoder_architecture
SCREAMING_SNAKE_CASE_ = multi_query # Ignored when new_decoder_architecture is True
SCREAMING_SNAKE_CASE_ = parallel_attn
SCREAMING_SNAKE_CASE_ = bias
super().__init__(bos_token_id=__magic_name__ , eos_token_id=__magic_name__ , **__magic_name__ )
@property
def __A ( self : List[str] ) -> int:
return self.hidden_size // self.num_attention_heads
@property
def __A ( self : Any ) -> List[str]:
return not self.alibi
| 305
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
A : List[str] = {"configuration_swin": ["SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP", "SwinConfig", "SwinOnnxConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Any = [
"SWIN_PRETRAINED_MODEL_ARCHIVE_LIST",
"SwinForImageClassification",
"SwinForMaskedImageModeling",
"SwinModel",
"SwinPreTrainedModel",
"SwinBackbone",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : str = [
"TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFSwinForImageClassification",
"TFSwinForMaskedImageModeling",
"TFSwinModel",
"TFSwinPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_swin import SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinConfig, SwinOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swin import (
SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinBackbone,
SwinForImageClassification,
SwinForMaskedImageModeling,
SwinModel,
SwinPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_swin import (
TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSwinForImageClassification,
TFSwinForMaskedImageModeling,
TFSwinModel,
TFSwinPreTrainedModel,
)
else:
import sys
A : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 305
| 1
|
from math import factorial
A : dict[str, int] = {str(digit): factorial(digit) for digit in range(10)}
def a__ ( __UpperCamelCase ):
if not isinstance(__UpperCamelCase , __UpperCamelCase ):
raise TypeError("Parameter number must be int" )
if number < 0:
raise ValueError("Parameter number must be greater than or equal to 0" )
# Converts number in string to iterate on its digits and adds its factorial.
return sum(DIGIT_FACTORIAL[digit] for digit in str(__UpperCamelCase ) )
def a__ ( __UpperCamelCase = 6_0 , __UpperCamelCase = 1_0_0_0_0_0_0 ):
if not isinstance(__UpperCamelCase , __UpperCamelCase ) or not isinstance(__UpperCamelCase , __UpperCamelCase ):
raise TypeError("Parameters chain_length and number_limit must be int" )
if chain_length <= 0 or number_limit <= 0:
raise ValueError(
"Parameters chain_length and number_limit must be greater than 0" )
# the counter for the chains with the exact desired length
SCREAMING_SNAKE_CASE_ = 0
# the cached sizes of the previous chains
SCREAMING_SNAKE_CASE_ = {}
for start_chain_element in range(1 , __UpperCamelCase ):
# The temporary set will contain the elements of the chain
SCREAMING_SNAKE_CASE_ = set()
SCREAMING_SNAKE_CASE_ = 0
# Stop computing the chain when you find a cached size, a repeating item or the
# length is greater then the desired one.
SCREAMING_SNAKE_CASE_ = start_chain_element
while (
chain_element not in chain_sets_lengths
and chain_element not in chain_set
and chain_set_length <= chain_length
):
chain_set.add(__UpperCamelCase )
chain_set_length += 1
SCREAMING_SNAKE_CASE_ = digit_factorial_sum(__UpperCamelCase )
if chain_element in chain_sets_lengths:
chain_set_length += chain_sets_lengths[chain_element]
SCREAMING_SNAKE_CASE_ = chain_set_length
# If chain contains the exact amount of elements increase the counter
if chain_set_length == chain_length:
chains_counter += 1
return chains_counter
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f"{solution()}")
| 305
|
import tempfile
import unittest
import numpy as np
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_flax
if is_flax_available():
import os
from flax.core.frozen_dict import unfreeze
from flax.traverse_util import flatten_dict
from transformers import FlaxBertModel
A : Union[str, Any] = "0.12" # assumed parallelism: 8
@require_flax
@is_staging_test
class lowerCamelCase (unittest.TestCase ):
"""simple docstring"""
@classmethod
def __A ( cls : Any ) -> Dict:
SCREAMING_SNAKE_CASE_ = TOKEN
HfFolder.save_token(__magic_name__ )
@classmethod
def __A ( cls : Optional[int] ) -> Tuple:
try:
delete_repo(token=cls._token , repo_id="test-model-flax" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-model-flax-org" )
except HTTPError:
pass
def __A ( self : str ) -> str:
SCREAMING_SNAKE_CASE_ = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
SCREAMING_SNAKE_CASE_ = FlaxBertModel(__magic_name__ )
model.push_to_hub("test-model-flax" , use_auth_token=self._token )
SCREAMING_SNAKE_CASE_ = FlaxBertModel.from_pretrained(F'''{USER}/test-model-flax''' )
SCREAMING_SNAKE_CASE_ = flatten_dict(unfreeze(model.params ) )
SCREAMING_SNAKE_CASE_ = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
SCREAMING_SNAKE_CASE_ = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(__magic_name__ , 1e-3 , msg=F'''{key} not identical''' )
# Reset repo
delete_repo(token=self._token , repo_id="test-model-flax" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(__magic_name__ , repo_id="test-model-flax" , push_to_hub=__magic_name__ , use_auth_token=self._token )
SCREAMING_SNAKE_CASE_ = FlaxBertModel.from_pretrained(F'''{USER}/test-model-flax''' )
SCREAMING_SNAKE_CASE_ = flatten_dict(unfreeze(model.params ) )
SCREAMING_SNAKE_CASE_ = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
SCREAMING_SNAKE_CASE_ = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(__magic_name__ , 1e-3 , msg=F'''{key} not identical''' )
def __A ( self : int ) -> Tuple:
SCREAMING_SNAKE_CASE_ = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
SCREAMING_SNAKE_CASE_ = FlaxBertModel(__magic_name__ )
model.push_to_hub("valid_org/test-model-flax-org" , use_auth_token=self._token )
SCREAMING_SNAKE_CASE_ = FlaxBertModel.from_pretrained("valid_org/test-model-flax-org" )
SCREAMING_SNAKE_CASE_ = flatten_dict(unfreeze(model.params ) )
SCREAMING_SNAKE_CASE_ = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
SCREAMING_SNAKE_CASE_ = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(__magic_name__ , 1e-3 , msg=F'''{key} not identical''' )
# Reset repo
delete_repo(token=self._token , repo_id="valid_org/test-model-flax-org" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(
__magic_name__ , repo_id="valid_org/test-model-flax-org" , push_to_hub=__magic_name__ , use_auth_token=self._token )
SCREAMING_SNAKE_CASE_ = FlaxBertModel.from_pretrained("valid_org/test-model-flax-org" )
SCREAMING_SNAKE_CASE_ = flatten_dict(unfreeze(model.params ) )
SCREAMING_SNAKE_CASE_ = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
SCREAMING_SNAKE_CASE_ = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(__magic_name__ , 1e-3 , msg=F'''{key} not identical''' )
def a__ ( __UpperCamelCase , __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = flatten_dict(modela.params )
SCREAMING_SNAKE_CASE_ = flatten_dict(modela.params )
for key in flat_params_a.keys():
if np.sum(np.abs(flat_params_a[key] - flat_params_a[key] ) ) > 1E-4:
SCREAMING_SNAKE_CASE_ = False
return models_are_equal
@require_flax
class lowerCamelCase (unittest.TestCase ):
"""simple docstring"""
def __A ( self : str ) -> Dict:
SCREAMING_SNAKE_CASE_ = BertConfig.from_pretrained("hf-internal-testing/tiny-bert-flax-only" )
SCREAMING_SNAKE_CASE_ = FlaxBertModel(__magic_name__ )
SCREAMING_SNAKE_CASE_ = "bert"
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(__magic_name__ , __magic_name__ ) )
with self.assertRaises(__magic_name__ ):
SCREAMING_SNAKE_CASE_ = FlaxBertModel.from_pretrained(__magic_name__ )
SCREAMING_SNAKE_CASE_ = FlaxBertModel.from_pretrained(__magic_name__ , subfolder=__magic_name__ )
self.assertTrue(check_models_equal(__magic_name__ , __magic_name__ ) )
def __A ( self : Optional[Any] ) -> Tuple:
SCREAMING_SNAKE_CASE_ = BertConfig.from_pretrained("hf-internal-testing/tiny-bert-flax-only" )
SCREAMING_SNAKE_CASE_ = FlaxBertModel(__magic_name__ )
SCREAMING_SNAKE_CASE_ = "bert"
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(__magic_name__ , __magic_name__ ) , max_shard_size="10KB" )
with self.assertRaises(__magic_name__ ):
SCREAMING_SNAKE_CASE_ = FlaxBertModel.from_pretrained(__magic_name__ )
SCREAMING_SNAKE_CASE_ = FlaxBertModel.from_pretrained(__magic_name__ , subfolder=__magic_name__ )
self.assertTrue(check_models_equal(__magic_name__ , __magic_name__ ) )
def __A ( self : Optional[int] ) -> Dict:
SCREAMING_SNAKE_CASE_ = "bert"
SCREAMING_SNAKE_CASE_ = "hf-internal-testing/tiny-random-bert-subfolder"
with self.assertRaises(__magic_name__ ):
SCREAMING_SNAKE_CASE_ = FlaxBertModel.from_pretrained(__magic_name__ )
SCREAMING_SNAKE_CASE_ = FlaxBertModel.from_pretrained(__magic_name__ , subfolder=__magic_name__ )
self.assertIsNotNone(__magic_name__ )
def __A ( self : List[str] ) -> Dict:
SCREAMING_SNAKE_CASE_ = "bert"
SCREAMING_SNAKE_CASE_ = "hf-internal-testing/tiny-random-bert-sharded-subfolder"
with self.assertRaises(__magic_name__ ):
SCREAMING_SNAKE_CASE_ = FlaxBertModel.from_pretrained(__magic_name__ )
SCREAMING_SNAKE_CASE_ = FlaxBertModel.from_pretrained(__magic_name__ , subfolder=__magic_name__ )
self.assertIsNotNone(__magic_name__ )
| 305
| 1
|
# flake8: noqa
# Lint as: python3
from typing import Dict, List, Optional, Type
from .. import config
from ..utils import logging
from .formatting import (
ArrowFormatter,
CustomFormatter,
Formatter,
PandasFormatter,
PythonFormatter,
TensorFormatter,
format_table,
query_table,
)
from .np_formatter import NumpyFormatter
A : int = logging.get_logger(__name__)
A : Dict[Optional[str], Type[Formatter]] = {}
A : Dict[Optional[str], str] = {}
A : Dict[Optional[str], Exception] = {}
def a__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = None , ):
SCREAMING_SNAKE_CASE_ = aliases if aliases is not None else []
if format_type in _FORMAT_TYPES:
logger.warning(
F'''Overwriting format type \'{format_type}\' ({_FORMAT_TYPES[format_type].__name__} -> {formatter_cls.__name__})''' )
SCREAMING_SNAKE_CASE_ = formatter_cls
for alias in set(aliases + [format_type] ):
if alias in _FORMAT_TYPES_ALIASES:
logger.warning(
F'''Overwriting format type alias \'{alias}\' ({_FORMAT_TYPES_ALIASES[alias]} -> {format_type})''' )
SCREAMING_SNAKE_CASE_ = format_type
def a__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = None ):
SCREAMING_SNAKE_CASE_ = aliases if aliases is not None else []
for alias in set(aliases + [format_type] ):
SCREAMING_SNAKE_CASE_ = unavailable_error
# Here we define all the available formatting functions that can be used by `Dataset.set_format`
_register_formatter(PythonFormatter, None, aliases=["python"])
_register_formatter(ArrowFormatter, "arrow", aliases=["pa", "pyarrow"])
_register_formatter(NumpyFormatter, "numpy", aliases=["np"])
_register_formatter(PandasFormatter, "pandas", aliases=["pd"])
_register_formatter(CustomFormatter, "custom")
if config.TORCH_AVAILABLE:
from .torch_formatter import TorchFormatter
_register_formatter(TorchFormatter, "torch", aliases=["pt", "pytorch"])
else:
A : str = ValueError("PyTorch needs to be installed to be able to return PyTorch tensors.")
_register_unavailable_formatter(_torch_error, "torch", aliases=["pt", "pytorch"])
if config.TF_AVAILABLE:
from .tf_formatter import TFFormatter
_register_formatter(TFFormatter, "tensorflow", aliases=["tf"])
else:
A : int = ValueError("Tensorflow needs to be installed to be able to return Tensorflow tensors.")
_register_unavailable_formatter(_tf_error, "tensorflow", aliases=["tf"])
if config.JAX_AVAILABLE:
from .jax_formatter import JaxFormatter
_register_formatter(JaxFormatter, "jax", aliases=[])
else:
A : List[Any] = ValueError("JAX needs to be installed to be able to return JAX arrays.")
_register_unavailable_formatter(_jax_error, "jax", aliases=[])
def a__ ( __UpperCamelCase ):
if format_type in _FORMAT_TYPES_ALIASES:
return _FORMAT_TYPES_ALIASES[format_type]
else:
return format_type
def a__ ( __UpperCamelCase , **__UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = get_format_type_from_alias(__UpperCamelCase )
if format_type in _FORMAT_TYPES:
return _FORMAT_TYPES[format_type](**__UpperCamelCase )
if format_type in _FORMAT_TYPES_ALIASES_UNAVAILABLE:
raise _FORMAT_TYPES_ALIASES_UNAVAILABLE[format_type]
else:
raise ValueError(
F'''Return type should be None or selected in {list(type for type in _FORMAT_TYPES.keys() if type != None )}, but got \'{format_type}\'''' )
| 305
|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.utils import ComputeEnvironment
from .cluster import get_cluster_input
from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
from .sagemaker import get_sagemaker_input
A : Union[str, Any] = "Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine"
def a__ ( ):
SCREAMING_SNAKE_CASE_ = _ask_options(
"In which compute environment are you running?" , ["This machine", "AWS (Amazon SageMaker)"] , _convert_compute_environment , )
if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
SCREAMING_SNAKE_CASE_ = get_sagemaker_input()
else:
SCREAMING_SNAKE_CASE_ = get_cluster_input()
return config
def a__ ( __UpperCamelCase=None ):
if subparsers is not None:
SCREAMING_SNAKE_CASE_ = subparsers.add_parser("config" , description=__UpperCamelCase )
else:
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser("Accelerate config command" , description=__UpperCamelCase )
parser.add_argument(
"--config_file" , default=__UpperCamelCase , help=(
"The path to use to store the config file. Will default to a file named default_config.yaml in the cache "
"location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have "
"such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed "
"with 'huggingface'."
) , )
if subparsers is not None:
parser.set_defaults(func=__UpperCamelCase )
return parser
def a__ ( __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = get_user_input()
if args.config_file is not None:
SCREAMING_SNAKE_CASE_ = args.config_file
else:
if not os.path.isdir(__UpperCamelCase ):
os.makedirs(__UpperCamelCase )
SCREAMING_SNAKE_CASE_ = default_yaml_config_file
if config_file.endswith(".json" ):
config.to_json_file(__UpperCamelCase )
else:
config.to_yaml_file(__UpperCamelCase )
print(F'''accelerate configuration saved at {config_file}''' )
def a__ ( ):
SCREAMING_SNAKE_CASE_ = config_command_parser()
SCREAMING_SNAKE_CASE_ = parser.parse_args()
config_command(__UpperCamelCase )
if __name__ == "__main__":
main()
| 305
| 1
|
def a__ ( __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = set({"(", "[", "{"} )
SCREAMING_SNAKE_CASE_ = set({")", "]", "}"} )
SCREAMING_SNAKE_CASE_ = {"{": "}", "[": "]", "(": ")"}
for i in range(len(__UpperCamelCase ) ):
if s[i] in open_brackets:
stack.append(s[i] )
elif s[i] in closed_brackets and (
len(__UpperCamelCase ) == 0 or (len(__UpperCamelCase ) > 0 and open_to_closed[stack.pop()] != s[i])
):
return False
return len(__UpperCamelCase ) == 0
def a__ ( ):
SCREAMING_SNAKE_CASE_ = input("Enter sequence of brackets: " )
if is_balanced(__UpperCamelCase ):
print(__UpperCamelCase , "is balanced" )
else:
print(__UpperCamelCase , "is not balanced" )
if __name__ == "__main__":
main()
| 305
|
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=SCREAMING_SNAKE_CASE__ )
class lowerCamelCase (SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
lowerCamelCase__ = field(default='''summarization''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
lowerCamelCase__ = Features({'''text''': Value('''string''' )} )
lowerCamelCase__ = Features({'''summary''': Value('''string''' )} )
lowerCamelCase__ = "text"
lowerCamelCase__ = "summary"
@property
def __A ( self : Dict ) -> Dict[str, str]:
return {self.text_column: "text", self.summary_column: "summary"}
| 305
| 1
|
from typing import Dict, Optional
import numpy as np
import datasets
A : Any = "\nIoU is the area of overlap between the predicted segmentation and the ground truth divided by the area of union\nbetween the predicted segmentation and the ground truth. For binary (two classes) or multi-class segmentation,\nthe mean IoU of the image is calculated by taking the IoU of each class and averaging them.\n"
A : List[Any] = "\nArgs:\n predictions (`List[ndarray]`):\n List of predicted segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.\n references (`List[ndarray]`):\n List of ground truth segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.\n num_labels (`int`):\n Number of classes (categories).\n ignore_index (`int`):\n Index that will be ignored during evaluation.\n nan_to_num (`int`, *optional*):\n If specified, NaN values will be replaced by the number defined by the user.\n label_map (`dict`, *optional*):\n If specified, dictionary mapping old label indices to new label indices.\n reduce_labels (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is used for background,\n and background itself is not included in all classes of a dataset (e.g. ADE20k). The background label will be replaced by 255.\n\nReturns:\n `Dict[str, float | ndarray]` comprising various elements:\n - *mean_iou* (`float`):\n Mean Intersection-over-Union (IoU averaged over all categories).\n - *mean_accuracy* (`float`):\n Mean accuracy (averaged over all categories).\n - *overall_accuracy* (`float`):\n Overall accuracy on all images.\n - *per_category_accuracy* (`ndarray` of shape `(num_labels,)`):\n Per category accuracy.\n - *per_category_iou* (`ndarray` of shape `(num_labels,)`):\n Per category IoU.\n\nExamples:\n\n >>> import numpy as np\n\n >>> mean_iou = datasets.load_metric(\"mean_iou\")\n\n >>> # suppose one has 3 different segmentation maps predicted\n >>> predicted_1 = np.array([[1, 2], [3, 4], [5, 255]])\n >>> actual_1 = np.array([[0, 3], [5, 4], [6, 255]])\n\n >>> predicted_2 = np.array([[2, 7], [9, 2], [3, 6]])\n >>> actual_2 = np.array([[1, 7], [9, 2], [3, 6]])\n\n >>> predicted_3 = np.array([[2, 2, 3], [8, 2, 4], [3, 255, 2]])\n >>> actual_3 = np.array([[1, 2, 2], [8, 2, 1], [3, 255, 1]])\n\n >>> predicted = [predicted_1, predicted_2, predicted_3]\n >>> ground_truth = [actual_1, actual_2, actual_3]\n\n >>> results = mean_iou.compute(predictions=predicted, references=ground_truth, num_labels=10, ignore_index=255, reduce_labels=False)\n >>> print(results) # doctest: +NORMALIZE_WHITESPACE\n {'mean_iou': 0.47750000000000004, 'mean_accuracy': 0.5916666666666666, 'overall_accuracy': 0.5263157894736842, 'per_category_iou': array([0. , 0. , 0.375, 0.4 , 0.5 , 0. , 0.5 , 1. , 1. , 1. ]), 'per_category_accuracy': array([0. , 0. , 0.75 , 0.66666667, 1. , 0. , 0.5 , 1. , 1. , 1. ])}\n"
A : List[str] = "\\n@software{MMSegmentation_Contributors_OpenMMLab_Semantic_Segmentation_2020,\nauthor = {{MMSegmentation Contributors}},\nlicense = {Apache-2.0},\nmonth = {7},\ntitle = {{OpenMMLab Semantic Segmentation Toolbox and Benchmark}},\nurl = {https://github.com/open-mmlab/mmsegmentation},\nyear = {2020}\n}"
def a__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = False , ):
if label_map is not None:
for old_id, new_id in label_map.items():
SCREAMING_SNAKE_CASE_ = new_id
# turn into Numpy arrays
SCREAMING_SNAKE_CASE_ = np.array(__UpperCamelCase )
SCREAMING_SNAKE_CASE_ = np.array(__UpperCamelCase )
if reduce_labels:
SCREAMING_SNAKE_CASE_ = 2_5_5
SCREAMING_SNAKE_CASE_ = label - 1
SCREAMING_SNAKE_CASE_ = 2_5_5
SCREAMING_SNAKE_CASE_ = label != ignore_index
SCREAMING_SNAKE_CASE_ = np.not_equal(__UpperCamelCase , __UpperCamelCase )
SCREAMING_SNAKE_CASE_ = pred_label[mask]
SCREAMING_SNAKE_CASE_ = np.array(__UpperCamelCase )[mask]
SCREAMING_SNAKE_CASE_ = pred_label[pred_label == label]
SCREAMING_SNAKE_CASE_ = np.histogram(__UpperCamelCase , bins=__UpperCamelCase , range=(0, num_labels - 1) )[0]
SCREAMING_SNAKE_CASE_ = np.histogram(__UpperCamelCase , bins=__UpperCamelCase , range=(0, num_labels - 1) )[0]
SCREAMING_SNAKE_CASE_ = np.histogram(__UpperCamelCase , bins=__UpperCamelCase , range=(0, num_labels - 1) )[0]
SCREAMING_SNAKE_CASE_ = area_pred_label + area_label - area_intersect
return area_intersect, area_union, area_pred_label, area_label
def a__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = False , ):
SCREAMING_SNAKE_CASE_ = np.zeros((num_labels,) , dtype=np.floataa )
SCREAMING_SNAKE_CASE_ = np.zeros((num_labels,) , dtype=np.floataa )
SCREAMING_SNAKE_CASE_ = np.zeros((num_labels,) , dtype=np.floataa )
SCREAMING_SNAKE_CASE_ = np.zeros((num_labels,) , dtype=np.floataa )
for result, gt_seg_map in zip(__UpperCamelCase , __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = intersect_and_union(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
total_area_intersect += area_intersect
total_area_union += area_union
total_area_pred_label += area_pred_label
total_area_label += area_label
return total_area_intersect, total_area_union, total_area_pred_label, total_area_label
def a__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = False , ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = total_intersect_and_union(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# compute metrics
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = total_area_intersect.sum() / total_area_label.sum()
SCREAMING_SNAKE_CASE_ = total_area_intersect / total_area_union
SCREAMING_SNAKE_CASE_ = total_area_intersect / total_area_label
SCREAMING_SNAKE_CASE_ = np.nanmean(__UpperCamelCase )
SCREAMING_SNAKE_CASE_ = np.nanmean(__UpperCamelCase )
SCREAMING_SNAKE_CASE_ = all_acc
SCREAMING_SNAKE_CASE_ = iou
SCREAMING_SNAKE_CASE_ = acc
if nan_to_num is not None:
SCREAMING_SNAKE_CASE_ = {metric: np.nan_to_num(__UpperCamelCase , nan=__UpperCamelCase ) for metric, metric_value in metrics.items()}
return metrics
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCamelCase (datasets.Metric ):
"""simple docstring"""
def __A ( self : Any ) -> Tuple:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
# 1st Seq - height dim, 2nd - width dim
{
"predictions": datasets.Sequence(datasets.Sequence(datasets.Value("uint16" ) ) ),
"references": datasets.Sequence(datasets.Sequence(datasets.Value("uint16" ) ) ),
} ) , reference_urls=[
"https://github.com/open-mmlab/mmsegmentation/blob/71c201b1813267d78764f306a297ca717827c4bf/mmseg/core/evaluation/metrics.py"
] , )
def __A ( self : Any , __magic_name__ : int , __magic_name__ : Any , __magic_name__ : int , __magic_name__ : bool , __magic_name__ : Optional[int] = None , __magic_name__ : Optional[Dict[int, int]] = None , __magic_name__ : bool = False , ) -> int:
SCREAMING_SNAKE_CASE_ = mean_iou(
results=__magic_name__ , gt_seg_maps=__magic_name__ , num_labels=__magic_name__ , ignore_index=__magic_name__ , nan_to_num=__magic_name__ , label_map=__magic_name__ , reduce_labels=__magic_name__ , )
return iou_result
| 305
|
from ....utils import logging
A : List[str] = logging.get_logger(__name__)
class lowerCamelCase (SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def __init__( self : List[str] , __magic_name__ : Optional[Any] , __magic_name__ : Any=None , __magic_name__ : List[str]=2_048 ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = config.__dict__
SCREAMING_SNAKE_CASE_ = modal_hidden_size
if num_labels:
SCREAMING_SNAKE_CASE_ = num_labels
| 305
| 1
|
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_herbert import HerbertTokenizer
A : Dict = logging.get_logger(__name__)
A : int = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
A : str = {
"vocab_file": {
"allegro/herbert-base-cased": "https://huggingface.co/allegro/herbert-base-cased/resolve/main/vocab.json"
},
"merges_file": {
"allegro/herbert-base-cased": "https://huggingface.co/allegro/herbert-base-cased/resolve/main/merges.txt"
},
}
A : Optional[Any] = {"allegro/herbert-base-cased": 5_14}
A : Optional[Any] = {}
class lowerCamelCase (SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
lowerCamelCase__ = VOCAB_FILES_NAMES
lowerCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ = PRETRAINED_INIT_CONFIGURATION
lowerCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ = HerbertTokenizer
def __init__( self : List[str] , __magic_name__ : Union[str, Any]=None , __magic_name__ : List[Any]=None , __magic_name__ : List[Any]=None , __magic_name__ : Tuple="<s>" , __magic_name__ : Tuple="<unk>" , __magic_name__ : Union[str, Any]="<pad>" , __magic_name__ : int="<mask>" , __magic_name__ : List[str]="</s>" , **__magic_name__ : List[Any] , ) -> List[Any]:
super().__init__(
__magic_name__ , __magic_name__ , tokenizer_file=__magic_name__ , cls_token=__magic_name__ , unk_token=__magic_name__ , pad_token=__magic_name__ , mask_token=__magic_name__ , sep_token=__magic_name__ , **__magic_name__ , )
def __A ( self : List[str] , __magic_name__ : List[int] , __magic_name__ : Optional[List[int]] = None ) -> List[int]:
SCREAMING_SNAKE_CASE_ = [self.cls_token_id]
SCREAMING_SNAKE_CASE_ = [self.sep_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def __A ( self : Optional[int] , __magic_name__ : List[int] , __magic_name__ : Optional[List[int]] = None , __magic_name__ : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__magic_name__ , token_ids_a=__magic_name__ , already_has_special_tokens=__magic_name__ )
if token_ids_a is None:
return [1] + ([0] * len(__magic_name__ )) + [1]
return [1] + ([0] * len(__magic_name__ )) + [1] + ([0] * len(__magic_name__ )) + [1]
def __A ( self : Union[str, Any] , __magic_name__ : List[int] , __magic_name__ : Optional[List[int]] = None ) -> List[int]:
SCREAMING_SNAKE_CASE_ = [self.sep_token_id]
SCREAMING_SNAKE_CASE_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __A ( self : Dict , __magic_name__ : str , __magic_name__ : Optional[str] = None ) -> Tuple[str]:
SCREAMING_SNAKE_CASE_ = self._tokenizer.model.save(__magic_name__ , name=__magic_name__ )
return tuple(__magic_name__ )
| 305
|
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class lowerCamelCase (SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
lowerCamelCase__ = ['''image_processor''', '''tokenizer''']
lowerCamelCase__ = '''ViltImageProcessor'''
lowerCamelCase__ = ('''BertTokenizer''', '''BertTokenizerFast''')
def __init__( self : Optional[int] , __magic_name__ : str=None , __magic_name__ : List[str]=None , **__magic_name__ : Any ) -> str:
SCREAMING_SNAKE_CASE_ = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , __magic_name__ , )
SCREAMING_SNAKE_CASE_ = kwargs.pop("feature_extractor" )
SCREAMING_SNAKE_CASE_ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(__magic_name__ , __magic_name__ )
SCREAMING_SNAKE_CASE_ = self.image_processor
def __call__( self : List[str] , __magic_name__ : List[str] , __magic_name__ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , __magic_name__ : bool = True , __magic_name__ : Union[bool, str, PaddingStrategy] = False , __magic_name__ : Union[bool, str, TruncationStrategy] = None , __magic_name__ : Optional[int] = None , __magic_name__ : int = 0 , __magic_name__ : Optional[int] = None , __magic_name__ : Optional[bool] = None , __magic_name__ : Optional[bool] = None , __magic_name__ : bool = False , __magic_name__ : bool = False , __magic_name__ : bool = False , __magic_name__ : bool = False , __magic_name__ : bool = True , __magic_name__ : Optional[Union[str, TensorType]] = None , **__magic_name__ : str , ) -> BatchEncoding:
SCREAMING_SNAKE_CASE_ = self.tokenizer(
text=__magic_name__ , add_special_tokens=__magic_name__ , padding=__magic_name__ , truncation=__magic_name__ , max_length=__magic_name__ , stride=__magic_name__ , pad_to_multiple_of=__magic_name__ , return_token_type_ids=__magic_name__ , return_attention_mask=__magic_name__ , return_overflowing_tokens=__magic_name__ , return_special_tokens_mask=__magic_name__ , return_offsets_mapping=__magic_name__ , return_length=__magic_name__ , verbose=__magic_name__ , return_tensors=__magic_name__ , **__magic_name__ , )
# add pixel_values + pixel_mask
SCREAMING_SNAKE_CASE_ = self.image_processor(__magic_name__ , return_tensors=__magic_name__ )
encoding.update(__magic_name__ )
return encoding
def __A ( self : Optional[int] , *__magic_name__ : List[Any] , **__magic_name__ : Optional[Any] ) -> Any:
return self.tokenizer.batch_decode(*__magic_name__ , **__magic_name__ )
def __A ( self : Dict , *__magic_name__ : List[Any] , **__magic_name__ : Union[str, Any] ) -> str:
return self.tokenizer.decode(*__magic_name__ , **__magic_name__ )
@property
def __A ( self : Optional[int] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = self.tokenizer.model_input_names
SCREAMING_SNAKE_CASE_ = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def __A ( self : Dict ) -> List[Any]:
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , __magic_name__ , )
return self.image_processor_class
@property
def __A ( self : int ) -> List[Any]:
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , __magic_name__ , )
return self.image_processor
| 305
| 1
|
import os
import sys
import warnings
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen
from ..table import array_cast
from ..utils.file_utils import is_local_path
from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
import PIL.Image
from .features import FeatureType
A : Optional[List[str]] = None
A : Optional[int] = "<" if sys.byteorder == "little" else ">"
# Origin: https://github.com/python-pillow/Pillow/blob/698951e19e19972aeed56df686868f1329981c12/src/PIL/Image.py#L3126 minus "|i1" which values are not preserved correctly when saving and loading an image
A : List[Any] = [
np.dtype("|b1"),
np.dtype("|u1"),
np.dtype("<u2"),
np.dtype(">u2"),
np.dtype("<i2"),
np.dtype(">i2"),
np.dtype("<u4"),
np.dtype(">u4"),
np.dtype("<i4"),
np.dtype(">i4"),
np.dtype("<f4"),
np.dtype(">f4"),
np.dtype("<f8"),
np.dtype(">f8"),
]
@dataclass
class lowerCamelCase :
"""simple docstring"""
lowerCamelCase__ = True
lowerCamelCase__ = None
# Automatically constructed
lowerCamelCase__ = "PIL.Image.Image"
lowerCamelCase__ = pa.struct({'''bytes''': pa.binary(), '''path''': pa.string()} )
lowerCamelCase__ = field(default='''Image''' , init=SCREAMING_SNAKE_CASE__ , repr=SCREAMING_SNAKE_CASE__ )
def __call__( self : List[Any] ) -> str:
return self.pa_type
def __A ( self : Optional[Any] , __magic_name__ : Union[str, bytes, dict, np.ndarray, "PIL.Image.Image"] ) -> dict:
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'." )
if isinstance(__magic_name__ , __magic_name__ ):
SCREAMING_SNAKE_CASE_ = np.array(__magic_name__ )
if isinstance(__magic_name__ , __magic_name__ ):
return {"path": value, "bytes": None}
elif isinstance(__magic_name__ , __magic_name__ ):
return {"path": None, "bytes": value}
elif isinstance(__magic_name__ , np.ndarray ):
# convert the image array to PNG/TIFF bytes
return encode_np_array(__magic_name__ )
elif isinstance(__magic_name__ , PIL.Image.Image ):
# convert the PIL image to bytes (default format is PNG/TIFF)
return encode_pil_image(__magic_name__ )
elif value.get("path" ) is not None and os.path.isfile(value["path"] ):
# we set "bytes": None to not duplicate the data if they're already available locally
return {"bytes": None, "path": value.get("path" )}
elif value.get("bytes" ) is not None or value.get("path" ) is not None:
# store the image bytes, and path is used to infer the image format using the file extension
return {"bytes": value.get("bytes" ), "path": value.get("path" )}
else:
raise ValueError(
F'''An image sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.''' )
def __A ( self : List[Any] , __magic_name__ : dict , __magic_name__ : List[str]=None ) -> "PIL.Image.Image":
if not self.decode:
raise RuntimeError("Decoding is disabled for this feature. Please use Image(decode=True) instead." )
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support decoding images, please install 'Pillow'." )
if token_per_repo_id is None:
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = value["path"], value["bytes"]
if bytes_ is None:
if path is None:
raise ValueError(F'''An image should have one of \'path\' or \'bytes\' but both are None in {value}.''' )
else:
if is_local_path(__magic_name__ ):
SCREAMING_SNAKE_CASE_ = PIL.Image.open(__magic_name__ )
else:
SCREAMING_SNAKE_CASE_ = path.split("::" )[-1]
try:
SCREAMING_SNAKE_CASE_ = string_to_dict(__magic_name__ , config.HUB_DATASETS_URL )["repo_id"]
SCREAMING_SNAKE_CASE_ = token_per_repo_id.get(__magic_name__ )
except ValueError:
SCREAMING_SNAKE_CASE_ = None
with xopen(__magic_name__ , "rb" , use_auth_token=__magic_name__ ) as f:
SCREAMING_SNAKE_CASE_ = BytesIO(f.read() )
SCREAMING_SNAKE_CASE_ = PIL.Image.open(bytes_ )
else:
SCREAMING_SNAKE_CASE_ = PIL.Image.open(BytesIO(bytes_ ) )
image.load() # to avoid "Too many open files" errors
return image
def __A ( self : str ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
from .features import Value
return (
self
if self.decode
else {
"bytes": Value("binary" ),
"path": Value("string" ),
}
)
def __A ( self : Any , __magic_name__ : Union[pa.StringArray, pa.StructArray, pa.ListArray] ) -> pa.StructArray:
if pa.types.is_string(storage.type ):
SCREAMING_SNAKE_CASE_ = pa.array([None] * len(__magic_name__ ) , type=pa.binary() )
SCREAMING_SNAKE_CASE_ = pa.StructArray.from_arrays([bytes_array, storage] , ["bytes", "path"] , mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
SCREAMING_SNAKE_CASE_ = pa.array([None] * len(__magic_name__ ) , type=pa.string() )
SCREAMING_SNAKE_CASE_ = pa.StructArray.from_arrays([storage, path_array] , ["bytes", "path"] , mask=storage.is_null() )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index("bytes" ) >= 0:
SCREAMING_SNAKE_CASE_ = storage.field("bytes" )
else:
SCREAMING_SNAKE_CASE_ = pa.array([None] * len(__magic_name__ ) , type=pa.binary() )
if storage.type.get_field_index("path" ) >= 0:
SCREAMING_SNAKE_CASE_ = storage.field("path" )
else:
SCREAMING_SNAKE_CASE_ = pa.array([None] * len(__magic_name__ ) , type=pa.string() )
SCREAMING_SNAKE_CASE_ = pa.StructArray.from_arrays([bytes_array, path_array] , ["bytes", "path"] , mask=storage.is_null() )
elif pa.types.is_list(storage.type ):
SCREAMING_SNAKE_CASE_ = pa.array(
[encode_np_array(np.array(__magic_name__ ) )["bytes"] if arr is not None else None for arr in storage.to_pylist()] , type=pa.binary() , )
SCREAMING_SNAKE_CASE_ = pa.array([None] * len(__magic_name__ ) , type=pa.string() )
SCREAMING_SNAKE_CASE_ = pa.StructArray.from_arrays(
[bytes_array, path_array] , ["bytes", "path"] , mask=bytes_array.is_null() )
return array_cast(__magic_name__ , self.pa_type )
def __A ( self : Any , __magic_name__ : pa.StructArray ) -> pa.StructArray:
@no_op_if_value_is_null
def path_to_bytes(__magic_name__ : Optional[int] ):
with xopen(__magic_name__ , "rb" ) as f:
SCREAMING_SNAKE_CASE_ = f.read()
return bytes_
SCREAMING_SNAKE_CASE_ = pa.array(
[
(path_to_bytes(x["path"] ) if x["bytes"] is None else x["bytes"]) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
SCREAMING_SNAKE_CASE_ = pa.array(
[os.path.basename(__magic_name__ ) if path is not None else None for path in storage.field("path" ).to_pylist()] , type=pa.string() , )
SCREAMING_SNAKE_CASE_ = pa.StructArray.from_arrays([bytes_array, path_array] , ["bytes", "path"] , mask=bytes_array.is_null() )
return array_cast(__magic_name__ , self.pa_type )
def a__ ( ):
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'." )
global _IMAGE_COMPRESSION_FORMATS
if _IMAGE_COMPRESSION_FORMATS is None:
PIL.Image.init()
SCREAMING_SNAKE_CASE_ = list(set(PIL.Image.OPEN.keys() ) & set(PIL.Image.SAVE.keys() ) )
return _IMAGE_COMPRESSION_FORMATS
def a__ ( __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = BytesIO()
if image.format in list_image_compression_formats():
SCREAMING_SNAKE_CASE_ = image.format
else:
SCREAMING_SNAKE_CASE_ = "PNG" if image.mode in ["1", "L", "LA", "RGB", "RGBA"] else "TIFF"
image.save(__UpperCamelCase , format=__UpperCamelCase )
return buffer.getvalue()
def a__ ( __UpperCamelCase ):
if hasattr(__UpperCamelCase , "filename" ) and image.filename != "":
return {"path": image.filename, "bytes": None}
else:
return {"path": None, "bytes": image_to_bytes(__UpperCamelCase )}
def a__ ( __UpperCamelCase ):
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'." )
SCREAMING_SNAKE_CASE_ = array.dtype
SCREAMING_SNAKE_CASE_ = dtype.byteorder if dtype.byteorder != "=" else _NATIVE_BYTEORDER
SCREAMING_SNAKE_CASE_ = dtype.kind
SCREAMING_SNAKE_CASE_ = dtype.itemsize
SCREAMING_SNAKE_CASE_ = None
# Multi-channel array case (only np.dtype("|u1") is allowed)
if array.shape[2:]:
SCREAMING_SNAKE_CASE_ = np.dtype("|u1" )
if dtype_kind not in ["u", "i"]:
raise TypeError(
F'''Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays.''' )
if dtype is not dest_dtype:
warnings.warn(F'''Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'''' )
# Exact match
elif dtype in _VALID_IMAGE_ARRAY_DTPYES:
SCREAMING_SNAKE_CASE_ = dtype
else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually)
while dtype_itemsize >= 1:
SCREAMING_SNAKE_CASE_ = dtype_byteorder + dtype_kind + str(__UpperCamelCase )
SCREAMING_SNAKE_CASE_ = np.dtype(__UpperCamelCase )
if dest_dtype in _VALID_IMAGE_ARRAY_DTPYES:
warnings.warn(F'''Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'''' )
break
else:
dtype_itemsize //= 2
if dest_dtype is None:
raise TypeError(
F'''Cannot convert dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}''' )
SCREAMING_SNAKE_CASE_ = PIL.Image.fromarray(array.astype(__UpperCamelCase ) )
return {"path": None, "bytes": image_to_bytes(__UpperCamelCase )}
def a__ ( __UpperCamelCase ):
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'." )
if objs:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = first_non_null_value(__UpperCamelCase )
if isinstance(__UpperCamelCase , __UpperCamelCase ):
return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs]
if isinstance(__UpperCamelCase , np.ndarray ):
SCREAMING_SNAKE_CASE_ = no_op_if_value_is_null(__UpperCamelCase )
return [obj_to_image_dict_func(__UpperCamelCase ) for obj in objs]
elif isinstance(__UpperCamelCase , PIL.Image.Image ):
SCREAMING_SNAKE_CASE_ = no_op_if_value_is_null(__UpperCamelCase )
return [obj_to_image_dict_func(__UpperCamelCase ) for obj in objs]
else:
return objs
else:
return objs
| 305
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
A : str = logging.get_logger(__name__)
A : Optional[int] = {
"microsoft/table-transformer-detection": (
"https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json"
),
}
class lowerCamelCase (SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
lowerCamelCase__ = '''table-transformer'''
lowerCamelCase__ = ['''past_key_values''']
lowerCamelCase__ = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
}
def __init__( self : List[Any] , __magic_name__ : Optional[Any]=True , __magic_name__ : Dict=None , __magic_name__ : Any=3 , __magic_name__ : List[str]=100 , __magic_name__ : Union[str, Any]=6 , __magic_name__ : Dict=2_048 , __magic_name__ : str=8 , __magic_name__ : int=6 , __magic_name__ : List[Any]=2_048 , __magic_name__ : Optional[int]=8 , __magic_name__ : Optional[int]=0.0 , __magic_name__ : List[Any]=0.0 , __magic_name__ : Optional[Any]=True , __magic_name__ : List[Any]="relu" , __magic_name__ : List[str]=256 , __magic_name__ : List[str]=0.1 , __magic_name__ : int=0.0 , __magic_name__ : Optional[Any]=0.0 , __magic_name__ : Tuple=0.02 , __magic_name__ : str=1.0 , __magic_name__ : int=False , __magic_name__ : Dict="sine" , __magic_name__ : Union[str, Any]="resnet50" , __magic_name__ : Optional[Any]=True , __magic_name__ : str=False , __magic_name__ : List[str]=1 , __magic_name__ : int=5 , __magic_name__ : Union[str, Any]=2 , __magic_name__ : Tuple=1 , __magic_name__ : Optional[int]=1 , __magic_name__ : Optional[Any]=5 , __magic_name__ : Optional[int]=2 , __magic_name__ : Union[str, Any]=0.1 , **__magic_name__ : Tuple , ) -> str:
if backbone_config is not None and use_timm_backbone:
raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`." )
if not use_timm_backbone:
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
SCREAMING_SNAKE_CASE_ = CONFIG_MAPPING["resnet"](out_features=["stage4"] )
elif isinstance(__magic_name__ , __magic_name__ ):
SCREAMING_SNAKE_CASE_ = backbone_config.get("model_type" )
SCREAMING_SNAKE_CASE_ = CONFIG_MAPPING[backbone_model_type]
SCREAMING_SNAKE_CASE_ = config_class.from_dict(__magic_name__ )
# set timm attributes to None
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None, None, None
SCREAMING_SNAKE_CASE_ = use_timm_backbone
SCREAMING_SNAKE_CASE_ = backbone_config
SCREAMING_SNAKE_CASE_ = num_channels
SCREAMING_SNAKE_CASE_ = num_queries
SCREAMING_SNAKE_CASE_ = d_model
SCREAMING_SNAKE_CASE_ = encoder_ffn_dim
SCREAMING_SNAKE_CASE_ = encoder_layers
SCREAMING_SNAKE_CASE_ = encoder_attention_heads
SCREAMING_SNAKE_CASE_ = decoder_ffn_dim
SCREAMING_SNAKE_CASE_ = decoder_layers
SCREAMING_SNAKE_CASE_ = decoder_attention_heads
SCREAMING_SNAKE_CASE_ = dropout
SCREAMING_SNAKE_CASE_ = attention_dropout
SCREAMING_SNAKE_CASE_ = activation_dropout
SCREAMING_SNAKE_CASE_ = activation_function
SCREAMING_SNAKE_CASE_ = init_std
SCREAMING_SNAKE_CASE_ = init_xavier_std
SCREAMING_SNAKE_CASE_ = encoder_layerdrop
SCREAMING_SNAKE_CASE_ = decoder_layerdrop
SCREAMING_SNAKE_CASE_ = encoder_layers
SCREAMING_SNAKE_CASE_ = auxiliary_loss
SCREAMING_SNAKE_CASE_ = position_embedding_type
SCREAMING_SNAKE_CASE_ = backbone
SCREAMING_SNAKE_CASE_ = use_pretrained_backbone
SCREAMING_SNAKE_CASE_ = dilation
# Hungarian matcher
SCREAMING_SNAKE_CASE_ = class_cost
SCREAMING_SNAKE_CASE_ = bbox_cost
SCREAMING_SNAKE_CASE_ = giou_cost
# Loss coefficients
SCREAMING_SNAKE_CASE_ = mask_loss_coefficient
SCREAMING_SNAKE_CASE_ = dice_loss_coefficient
SCREAMING_SNAKE_CASE_ = bbox_loss_coefficient
SCREAMING_SNAKE_CASE_ = giou_loss_coefficient
SCREAMING_SNAKE_CASE_ = eos_coefficient
super().__init__(is_encoder_decoder=__magic_name__ , **__magic_name__ )
@property
def __A ( self : Union[str, Any] ) -> int:
return self.encoder_attention_heads
@property
def __A ( self : Any ) -> int:
return self.d_model
class lowerCamelCase (SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
lowerCamelCase__ = version.parse('''1.11''' )
@property
def __A ( self : Tuple ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
("pixel_mask", {0: "batch"}),
] )
@property
def __A ( self : Any ) -> float:
return 1e-5
@property
def __A ( self : int ) -> int:
return 12
| 305
| 1
|
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMInverseScheduler,
DDIMScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
StableDiffusionDiffEditPipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_image, slow
from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, require_torch_gpu, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class lowerCamelCase (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase__ = StableDiffusionDiffEditPipeline
lowerCamelCase__ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''height''', '''width''', '''image'''} | {'''image_latents'''}
lowerCamelCase__ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS - {'''image'''} | {'''image_latents'''}
lowerCamelCase__ = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
lowerCamelCase__ = frozenset([] )
def __A ( self : Dict ) -> str:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=__magic_name__ , )
SCREAMING_SNAKE_CASE_ = DDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=__magic_name__ , set_alpha_to_one=__magic_name__ , )
SCREAMING_SNAKE_CASE_ = DDIMInverseScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=__magic_name__ , set_alpha_to_zero=__magic_name__ , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act="gelu" , projection_dim=512 , )
SCREAMING_SNAKE_CASE_ = CLIPTextModel(__magic_name__ )
SCREAMING_SNAKE_CASE_ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
SCREAMING_SNAKE_CASE_ = {
"unet": unet,
"scheduler": scheduler,
"inverse_scheduler": inverse_scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def __A ( self : Union[str, Any] , __magic_name__ : Optional[Any] , __magic_name__ : Dict=0 ) -> Dict:
SCREAMING_SNAKE_CASE_ = floats_tensor((1, 16, 16) , rng=random.Random(__magic_name__ ) ).to(__magic_name__ )
SCREAMING_SNAKE_CASE_ = floats_tensor((1, 2, 4, 16, 16) , rng=random.Random(__magic_name__ ) ).to(__magic_name__ )
if str(__magic_name__ ).startswith("mps" ):
SCREAMING_SNAKE_CASE_ = torch.manual_seed(__magic_name__ )
else:
SCREAMING_SNAKE_CASE_ = torch.Generator(device=__magic_name__ ).manual_seed(__magic_name__ )
SCREAMING_SNAKE_CASE_ = {
"prompt": "a dog and a newt",
"mask_image": mask,
"image_latents": latents,
"generator": generator,
"num_inference_steps": 2,
"inpaint_strength": 1.0,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def __A ( self : Optional[Any] , __magic_name__ : List[str] , __magic_name__ : Optional[Any]=0 ) -> List[str]:
SCREAMING_SNAKE_CASE_ = floats_tensor((1, 3, 32, 32) , rng=random.Random(__magic_name__ ) ).to(__magic_name__ )
SCREAMING_SNAKE_CASE_ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
SCREAMING_SNAKE_CASE_ = Image.fromarray(np.uinta(__magic_name__ ) ).convert("RGB" )
if str(__magic_name__ ).startswith("mps" ):
SCREAMING_SNAKE_CASE_ = torch.manual_seed(__magic_name__ )
else:
SCREAMING_SNAKE_CASE_ = torch.Generator(device=__magic_name__ ).manual_seed(__magic_name__ )
SCREAMING_SNAKE_CASE_ = {
"image": image,
"source_prompt": "a cat and a frog",
"target_prompt": "a dog and a newt",
"generator": generator,
"num_inference_steps": 2,
"num_maps_per_mask": 2,
"mask_encode_strength": 1.0,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def __A ( self : Any , __magic_name__ : Optional[int] , __magic_name__ : Any=0 ) -> List[str]:
SCREAMING_SNAKE_CASE_ = floats_tensor((1, 3, 32, 32) , rng=random.Random(__magic_name__ ) ).to(__magic_name__ )
SCREAMING_SNAKE_CASE_ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
SCREAMING_SNAKE_CASE_ = Image.fromarray(np.uinta(__magic_name__ ) ).convert("RGB" )
if str(__magic_name__ ).startswith("mps" ):
SCREAMING_SNAKE_CASE_ = torch.manual_seed(__magic_name__ )
else:
SCREAMING_SNAKE_CASE_ = torch.Generator(device=__magic_name__ ).manual_seed(__magic_name__ )
SCREAMING_SNAKE_CASE_ = {
"image": image,
"prompt": "a cat and a frog",
"generator": generator,
"num_inference_steps": 2,
"inpaint_strength": 1.0,
"guidance_scale": 6.0,
"decode_latents": True,
"output_type": "numpy",
}
return inputs
def __A ( self : int ) -> List[Any]:
if not hasattr(self.pipeline_class , "_optional_components" ):
return
SCREAMING_SNAKE_CASE_ = self.get_dummy_components()
SCREAMING_SNAKE_CASE_ = self.pipeline_class(**__magic_name__ )
pipe.to(__magic_name__ )
pipe.set_progress_bar_config(disable=__magic_name__ )
# set all optional components to None and update pipeline config accordingly
for optional_component in pipe._optional_components:
setattr(__magic_name__ , __magic_name__ , __magic_name__ )
pipe.register_modules(**{optional_component: None for optional_component in pipe._optional_components} )
SCREAMING_SNAKE_CASE_ = self.get_dummy_inputs(__magic_name__ )
SCREAMING_SNAKE_CASE_ = pipe(**__magic_name__ )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(__magic_name__ )
SCREAMING_SNAKE_CASE_ = self.pipeline_class.from_pretrained(__magic_name__ )
pipe_loaded.to(__magic_name__ )
pipe_loaded.set_progress_bar_config(disable=__magic_name__ )
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(__magic_name__ , __magic_name__ ) is None , F'''`{optional_component}` did not stay set to None after loading.''' , )
SCREAMING_SNAKE_CASE_ = self.get_dummy_inputs(__magic_name__ )
SCREAMING_SNAKE_CASE_ = pipe_loaded(**__magic_name__ )[0]
SCREAMING_SNAKE_CASE_ = np.abs(output - output_loaded ).max()
self.assertLess(__magic_name__ , 1e-4 )
def __A ( self : Tuple ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = "cpu"
SCREAMING_SNAKE_CASE_ = self.get_dummy_components()
SCREAMING_SNAKE_CASE_ = self.pipeline_class(**__magic_name__ )
pipe.to(__magic_name__ )
pipe.set_progress_bar_config(disable=__magic_name__ )
SCREAMING_SNAKE_CASE_ = self.get_dummy_mask_inputs(__magic_name__ )
SCREAMING_SNAKE_CASE_ = pipe.generate_mask(**__magic_name__ )
SCREAMING_SNAKE_CASE_ = mask[0, -3:, -3:]
self.assertEqual(mask.shape , (1, 16, 16) )
SCREAMING_SNAKE_CASE_ = np.array([0] * 9 )
SCREAMING_SNAKE_CASE_ = np.abs(mask_slice.flatten() - expected_slice ).max()
self.assertLessEqual(__magic_name__ , 1e-3 )
self.assertEqual(mask[0, -3, -4] , 0 )
def __A ( self : List[str] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = "cpu"
SCREAMING_SNAKE_CASE_ = self.get_dummy_components()
SCREAMING_SNAKE_CASE_ = self.pipeline_class(**__magic_name__ )
pipe.to(__magic_name__ )
pipe.set_progress_bar_config(disable=__magic_name__ )
SCREAMING_SNAKE_CASE_ = self.get_dummy_inversion_inputs(__magic_name__ )
SCREAMING_SNAKE_CASE_ = pipe.invert(**__magic_name__ ).images
SCREAMING_SNAKE_CASE_ = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 32, 32, 3) )
SCREAMING_SNAKE_CASE_ = np.array(
[0.5150, 0.5134, 0.5043, 0.5376, 0.4694, 0.5_1050, 0.5015, 0.4407, 0.4799] , )
SCREAMING_SNAKE_CASE_ = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(__magic_name__ , 1e-3 )
def __A ( self : Union[str, Any] ) -> Tuple:
super().test_inference_batch_single_identical(expected_max_diff=5e-3 )
def __A ( self : List[str] ) -> str:
SCREAMING_SNAKE_CASE_ = "cpu"
SCREAMING_SNAKE_CASE_ = self.get_dummy_components()
SCREAMING_SNAKE_CASE_ = {"beta_start": 0.0_0085, "beta_end": 0.012, "beta_schedule": "scaled_linear"}
SCREAMING_SNAKE_CASE_ = DPMSolverMultistepScheduler(**__magic_name__ )
SCREAMING_SNAKE_CASE_ = DPMSolverMultistepInverseScheduler(**__magic_name__ )
SCREAMING_SNAKE_CASE_ = self.pipeline_class(**__magic_name__ )
pipe.to(__magic_name__ )
pipe.set_progress_bar_config(disable=__magic_name__ )
SCREAMING_SNAKE_CASE_ = self.get_dummy_inversion_inputs(__magic_name__ )
SCREAMING_SNAKE_CASE_ = pipe.invert(**__magic_name__ ).images
SCREAMING_SNAKE_CASE_ = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 32, 32, 3) )
SCREAMING_SNAKE_CASE_ = np.array(
[0.5150, 0.5134, 0.5043, 0.5376, 0.4694, 0.5_1050, 0.5015, 0.4407, 0.4799] , )
SCREAMING_SNAKE_CASE_ = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(__magic_name__ , 1e-3 )
@require_torch_gpu
@slow
class lowerCamelCase (unittest.TestCase ):
"""simple docstring"""
def __A ( self : List[Any] ) -> Tuple:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@classmethod
def __A ( cls : List[str] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/diffedit/fruit.png" )
SCREAMING_SNAKE_CASE_ = raw_image.convert("RGB" ).resize((768, 768) )
SCREAMING_SNAKE_CASE_ = raw_image
def __A ( self : Dict ) -> Dict:
SCREAMING_SNAKE_CASE_ = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ = StableDiffusionDiffEditPipeline.from_pretrained(
"stabilityai/stable-diffusion-2-1" , safety_checker=__magic_name__ , torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE_ = DDIMScheduler.from_config(pipe.scheduler.config )
SCREAMING_SNAKE_CASE_ = DDIMInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=__magic_name__ )
SCREAMING_SNAKE_CASE_ = "a bowl of fruit"
SCREAMING_SNAKE_CASE_ = "a bowl of pears"
SCREAMING_SNAKE_CASE_ = pipe.generate_mask(
image=self.raw_image , source_prompt=__magic_name__ , target_prompt=__magic_name__ , generator=__magic_name__ , )
SCREAMING_SNAKE_CASE_ = pipe.invert(
prompt=__magic_name__ , image=self.raw_image , inpaint_strength=0.7 , generator=__magic_name__ ).latents
SCREAMING_SNAKE_CASE_ = pipe(
prompt=__magic_name__ , mask_image=__magic_name__ , image_latents=__magic_name__ , generator=__magic_name__ , negative_prompt=__magic_name__ , inpaint_strength=0.7 , output_type="numpy" , ).images[0]
SCREAMING_SNAKE_CASE_ = (
np.array(
load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/diffedit/pears.png" ).resize((768, 768) ) )
/ 255
)
assert np.abs((expected_image - image).max() ) < 5e-1
def __A ( self : Optional[Any] ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ = StableDiffusionDiffEditPipeline.from_pretrained(
"stabilityai/stable-diffusion-2-1" , safety_checker=__magic_name__ , torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE_ = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
SCREAMING_SNAKE_CASE_ = DPMSolverMultistepInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=__magic_name__ )
SCREAMING_SNAKE_CASE_ = "a bowl of fruit"
SCREAMING_SNAKE_CASE_ = "a bowl of pears"
SCREAMING_SNAKE_CASE_ = pipe.generate_mask(
image=self.raw_image , source_prompt=__magic_name__ , target_prompt=__magic_name__ , generator=__magic_name__ , )
SCREAMING_SNAKE_CASE_ = pipe.invert(
prompt=__magic_name__ , image=self.raw_image , inpaint_strength=0.7 , generator=__magic_name__ , num_inference_steps=25 , ).latents
SCREAMING_SNAKE_CASE_ = pipe(
prompt=__magic_name__ , mask_image=__magic_name__ , image_latents=__magic_name__ , generator=__magic_name__ , negative_prompt=__magic_name__ , inpaint_strength=0.7 , num_inference_steps=25 , output_type="numpy" , ).images[0]
SCREAMING_SNAKE_CASE_ = (
np.array(
load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/diffedit/pears.png" ).resize((768, 768) ) )
/ 255
)
assert np.abs((expected_image - image).max() ) < 5e-1
| 305
|
import warnings
from diffusers import StableDiffusionImgaImgPipeline # noqa F401
warnings.warn(
"The `image_to_image.py` script is outdated. Please use directly `from diffusers import"
" StableDiffusionImg2ImgPipeline` instead."
)
| 305
| 1
|
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
A : str = logging.get_logger(__name__)
A : Union[str, Any] = "▁"
A : List[Any] = {
"vocab_file": "vocab.json",
"spm_file": "sentencepiece.bpe.model",
}
A : Dict = {
"vocab_file": {
"facebook/s2t-small-librispeech-asr": (
"https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/vocab.json"
),
},
"spm_file": {
"facebook/s2t-small-librispeech-asr": (
"https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/sentencepiece.bpe.model"
)
},
}
A : Optional[Any] = {
"facebook/s2t-small-librispeech-asr": 10_24,
}
A : Optional[Any] = ["pt", "fr", "ru", "nl", "ro", "it", "es", "de"]
A : Dict = {"mustc": MUSTC_LANGS}
class lowerCamelCase (SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
lowerCamelCase__ = VOCAB_FILES_NAMES
lowerCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ = MAX_MODEL_INPUT_SIZES
lowerCamelCase__ = ['''input_ids''', '''attention_mask''']
lowerCamelCase__ = []
def __init__( self : List[str] , __magic_name__ : int , __magic_name__ : List[str] , __magic_name__ : List[Any]="<s>" , __magic_name__ : List[str]="</s>" , __magic_name__ : Any="<pad>" , __magic_name__ : Union[str, Any]="<unk>" , __magic_name__ : Tuple=False , __magic_name__ : int=False , __magic_name__ : List[Any]=None , __magic_name__ : Any=None , __magic_name__ : Optional[Dict[str, Any]] = None , **__magic_name__ : Any , ) -> None:
SCREAMING_SNAKE_CASE_ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__magic_name__ , eos_token=__magic_name__ , unk_token=__magic_name__ , pad_token=__magic_name__ , do_upper_case=__magic_name__ , do_lower_case=__magic_name__ , tgt_lang=__magic_name__ , lang_codes=__magic_name__ , sp_model_kwargs=self.sp_model_kwargs , **__magic_name__ , )
SCREAMING_SNAKE_CASE_ = do_upper_case
SCREAMING_SNAKE_CASE_ = do_lower_case
SCREAMING_SNAKE_CASE_ = load_json(__magic_name__ )
SCREAMING_SNAKE_CASE_ = {v: k for k, v in self.encoder.items()}
SCREAMING_SNAKE_CASE_ = spm_file
SCREAMING_SNAKE_CASE_ = load_spm(__magic_name__ , self.sp_model_kwargs )
if lang_codes is not None:
SCREAMING_SNAKE_CASE_ = lang_codes
SCREAMING_SNAKE_CASE_ = LANGUAGES[lang_codes]
SCREAMING_SNAKE_CASE_ = [F'''<lang:{lang}>''' for lang in self.langs]
SCREAMING_SNAKE_CASE_ = {lang: self.sp_model.PieceToId(F'''<lang:{lang}>''' ) for lang in self.langs}
SCREAMING_SNAKE_CASE_ = self.lang_tokens
SCREAMING_SNAKE_CASE_ = tgt_lang if tgt_lang is not None else self.langs[0]
self.set_tgt_lang_special_tokens(self._tgt_lang )
else:
SCREAMING_SNAKE_CASE_ = {}
@property
def __A ( self : Tuple ) -> int:
return len(self.encoder )
@property
def __A ( self : Any ) -> str:
return self._tgt_lang
@tgt_lang.setter
def __A ( self : Union[str, Any] , __magic_name__ : Optional[Any] ) -> None:
SCREAMING_SNAKE_CASE_ = new_tgt_lang
self.set_tgt_lang_special_tokens(__magic_name__ )
def __A ( self : Optional[int] , __magic_name__ : str ) -> None:
SCREAMING_SNAKE_CASE_ = self.lang_code_to_id[tgt_lang]
SCREAMING_SNAKE_CASE_ = [lang_code_id]
def __A ( self : Union[str, Any] , __magic_name__ : str ) -> List[str]:
return self.sp_model.encode(__magic_name__ , out_type=__magic_name__ )
def __A ( self : List[str] , __magic_name__ : str ) -> List[str]:
return self.encoder.get(__magic_name__ , self.encoder[self.unk_token] )
def __A ( self : int , __magic_name__ : int ) -> str:
return self.decoder.get(__magic_name__ , self.unk_token )
def __A ( self : List[Any] , __magic_name__ : List[str] ) -> str:
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = ""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
SCREAMING_SNAKE_CASE_ = self.sp_model.decode(__magic_name__ )
out_string += (decoded.upper() if self.do_upper_case else decoded) + token + " "
SCREAMING_SNAKE_CASE_ = []
else:
current_sub_tokens.append(__magic_name__ )
SCREAMING_SNAKE_CASE_ = self.sp_model.decode(__magic_name__ )
out_string += decoded.upper() if self.do_upper_case else decoded
return out_string.strip()
def __A ( self : Dict , __magic_name__ : Optional[int] , __magic_name__ : Optional[int]=None ) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + [self.eos_token_id]
def __A ( self : List[str] , __magic_name__ : List[int] , __magic_name__ : Optional[List[int]] = None , __magic_name__ : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__magic_name__ , token_ids_a=__magic_name__ , already_has_special_tokens=__magic_name__ )
SCREAMING_SNAKE_CASE_ = [1] * len(self.prefix_tokens )
SCREAMING_SNAKE_CASE_ = [1]
if token_ids_a is None:
return prefix_ones + ([0] * len(__magic_name__ )) + suffix_ones
return prefix_ones + ([0] * len(__magic_name__ )) + ([0] * len(__magic_name__ )) + suffix_ones
def __A ( self : Optional[Any] ) -> Dict:
SCREAMING_SNAKE_CASE_ = self.encoder.copy()
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : List[str] ) -> Dict:
SCREAMING_SNAKE_CASE_ = self.__dict__.copy()
SCREAMING_SNAKE_CASE_ = None
return state
def __setstate__( self : str , __magic_name__ : Dict ) -> None:
SCREAMING_SNAKE_CASE_ = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = load_spm(self.spm_file , self.sp_model_kwargs )
def __A ( self : Tuple , __magic_name__ : str , __magic_name__ : Optional[str] = None ) -> Tuple[str]:
SCREAMING_SNAKE_CASE_ = Path(__magic_name__ )
assert save_dir.is_dir(), F'''{save_directory} should be a directory'''
SCREAMING_SNAKE_CASE_ = save_dir / (
(filename_prefix + "-" if filename_prefix else "") + self.vocab_files_names["vocab_file"]
)
SCREAMING_SNAKE_CASE_ = save_dir / (
(filename_prefix + "-" if filename_prefix else "") + self.vocab_files_names["spm_file"]
)
save_json(self.encoder , __magic_name__ )
if os.path.abspath(self.spm_file ) != os.path.abspath(__magic_name__ ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , __magic_name__ )
elif not os.path.isfile(self.spm_file ):
with open(__magic_name__ , "wb" ) as fi:
SCREAMING_SNAKE_CASE_ = self.sp_model.serialized_model_proto()
fi.write(__magic_name__ )
return (str(__magic_name__ ), str(__magic_name__ ))
def a__ ( __UpperCamelCase , __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = sentencepiece.SentencePieceProcessor(**__UpperCamelCase )
spm.Load(str(__UpperCamelCase ) )
return spm
def a__ ( __UpperCamelCase ):
with open(__UpperCamelCase , "r" ) as f:
return json.load(__UpperCamelCase )
def a__ ( __UpperCamelCase , __UpperCamelCase ):
with open(__UpperCamelCase , "w" ) as f:
json.dump(__UpperCamelCase , __UpperCamelCase , indent=2 )
| 305
|
from __future__ import annotations
import numpy as np
def a__ ( __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = np.shape(__UpperCamelCase )
if rows != columns:
SCREAMING_SNAKE_CASE_ = (
"'table' has to be of square shaped array but got a "
F'''{rows}x{columns} array:\n{table}'''
)
raise ValueError(__UpperCamelCase )
SCREAMING_SNAKE_CASE_ = np.zeros((rows, columns) )
SCREAMING_SNAKE_CASE_ = np.zeros((rows, columns) )
for i in range(__UpperCamelCase ):
for j in range(__UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = sum(lower[i][k] * upper[k][j] for k in range(__UpperCamelCase ) )
if upper[j][j] == 0:
raise ArithmeticError("No LU decomposition exists" )
SCREAMING_SNAKE_CASE_ = (table[i][j] - total) / upper[j][j]
SCREAMING_SNAKE_CASE_ = 1
for j in range(__UpperCamelCase , __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = sum(lower[i][k] * upper[k][j] for k in range(__UpperCamelCase ) )
SCREAMING_SNAKE_CASE_ = table[i][j] - total
return lower, upper
if __name__ == "__main__":
import doctest
doctest.testmod()
| 305
| 1
|
def a__ ( __UpperCamelCase , __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = [0 for i in range(r + 1 )]
# nc0 = 1
SCREAMING_SNAKE_CASE_ = 1
for i in range(1 , n + 1 ):
# to compute current row from previous row.
SCREAMING_SNAKE_CASE_ = min(__UpperCamelCase , __UpperCamelCase )
while j > 0:
c[j] += c[j - 1]
j -= 1
return c[r]
print(binomial_coefficient(n=10, r=5))
| 305
|
from math import pi, sqrt, tan
def a__ ( __UpperCamelCase ):
if side_length < 0:
raise ValueError("surface_area_cube() only accepts non-negative values" )
return 6 * side_length**2
def a__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
if length < 0 or breadth < 0 or height < 0:
raise ValueError("surface_area_cuboid() only accepts non-negative values" )
return 2 * ((length * breadth) + (breadth * height) + (length * height))
def a__ ( __UpperCamelCase ):
if radius < 0:
raise ValueError("surface_area_sphere() only accepts non-negative values" )
return 4 * pi * radius**2
def a__ ( __UpperCamelCase ):
if radius < 0:
raise ValueError("surface_area_hemisphere() only accepts non-negative values" )
return 3 * pi * radius**2
def a__ ( __UpperCamelCase , __UpperCamelCase ):
if radius < 0 or height < 0:
raise ValueError("surface_area_cone() only accepts non-negative values" )
return pi * radius * (radius + (height**2 + radius**2) ** 0.5)
def a__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
if radius_a < 0 or radius_a < 0 or height < 0:
raise ValueError(
"surface_area_conical_frustum() only accepts non-negative values" )
SCREAMING_SNAKE_CASE_ = (height**2 + (radius_a - radius_a) ** 2) ** 0.5
return pi * ((slant_height * (radius_a + radius_a)) + radius_a**2 + radius_a**2)
def a__ ( __UpperCamelCase , __UpperCamelCase ):
if radius < 0 or height < 0:
raise ValueError("surface_area_cylinder() only accepts non-negative values" )
return 2 * pi * radius * (height + radius)
def a__ ( __UpperCamelCase , __UpperCamelCase ):
if torus_radius < 0 or tube_radius < 0:
raise ValueError("surface_area_torus() only accepts non-negative values" )
if torus_radius < tube_radius:
raise ValueError(
"surface_area_torus() does not support spindle or self intersecting tori" )
return 4 * pow(__UpperCamelCase , 2 ) * torus_radius * tube_radius
def a__ ( __UpperCamelCase , __UpperCamelCase ):
if length < 0 or width < 0:
raise ValueError("area_rectangle() only accepts non-negative values" )
return length * width
def a__ ( __UpperCamelCase ):
if side_length < 0:
raise ValueError("area_square() only accepts non-negative values" )
return side_length**2
def a__ ( __UpperCamelCase , __UpperCamelCase ):
if base < 0 or height < 0:
raise ValueError("area_triangle() only accepts non-negative values" )
return (base * height) / 2
def a__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
if sidea < 0 or sidea < 0 or sidea < 0:
raise ValueError("area_triangle_three_sides() only accepts non-negative values" )
elif sidea + sidea < sidea or sidea + sidea < sidea or sidea + sidea < sidea:
raise ValueError("Given three sides do not form a triangle" )
SCREAMING_SNAKE_CASE_ = (sidea + sidea + sidea) / 2
SCREAMING_SNAKE_CASE_ = sqrt(
semi_perimeter
* (semi_perimeter - sidea)
* (semi_perimeter - sidea)
* (semi_perimeter - sidea) )
return area
def a__ ( __UpperCamelCase , __UpperCamelCase ):
if base < 0 or height < 0:
raise ValueError("area_parallelogram() only accepts non-negative values" )
return base * height
def a__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
if basea < 0 or basea < 0 or height < 0:
raise ValueError("area_trapezium() only accepts non-negative values" )
return 1 / 2 * (basea + basea) * height
def a__ ( __UpperCamelCase ):
if radius < 0:
raise ValueError("area_circle() only accepts non-negative values" )
return pi * radius**2
def a__ ( __UpperCamelCase , __UpperCamelCase ):
if radius_x < 0 or radius_y < 0:
raise ValueError("area_ellipse() only accepts non-negative values" )
return pi * radius_x * radius_y
def a__ ( __UpperCamelCase , __UpperCamelCase ):
if diagonal_a < 0 or diagonal_a < 0:
raise ValueError("area_rhombus() only accepts non-negative values" )
return 1 / 2 * diagonal_a * diagonal_a
def a__ ( __UpperCamelCase , __UpperCamelCase ):
if not isinstance(__UpperCamelCase , __UpperCamelCase ) or sides < 3:
raise ValueError(
"area_reg_polygon() only accepts integers greater than or \
equal to three as number of sides" )
elif length < 0:
raise ValueError(
"area_reg_polygon() only accepts non-negative values as \
length of a side" )
return (sides * length**2) / (4 * tan(pi / sides ))
return (sides * length**2) / (4 * tan(pi / sides ))
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True) # verbose so we can see methods missing tests
print("[DEMO] Areas of various geometric shapes: \n")
print(f"Rectangle: {area_rectangle(10, 20) = }")
print(f"Square: {area_square(10) = }")
print(f"Triangle: {area_triangle(10, 10) = }")
print(f"Triangle: {area_triangle_three_sides(5, 12, 13) = }")
print(f"Parallelogram: {area_parallelogram(10, 20) = }")
print(f"Rhombus: {area_rhombus(10, 20) = }")
print(f"Trapezium: {area_trapezium(10, 20, 30) = }")
print(f"Circle: {area_circle(20) = }")
print(f"Ellipse: {area_ellipse(10, 20) = }")
print("\nSurface Areas of various geometric shapes: \n")
print(f"Cube: {surface_area_cube(20) = }")
print(f"Cuboid: {surface_area_cuboid(10, 20, 30) = }")
print(f"Sphere: {surface_area_sphere(20) = }")
print(f"Hemisphere: {surface_area_hemisphere(20) = }")
print(f"Cone: {surface_area_cone(10, 20) = }")
print(f"Conical Frustum: {surface_area_conical_frustum(10, 20, 30) = }")
print(f"Cylinder: {surface_area_cylinder(10, 20) = }")
print(f"Torus: {surface_area_torus(20, 10) = }")
print(f"Equilateral Triangle: {area_reg_polygon(3, 10) = }")
print(f"Square: {area_reg_polygon(4, 10) = }")
print(f"Reqular Pentagon: {area_reg_polygon(5, 10) = }")
| 305
| 1
|
def a__ ( __UpperCamelCase ):
if not isinstance(__UpperCamelCase , __UpperCamelCase ):
raise TypeError("Input value must be an 'int' type" )
SCREAMING_SNAKE_CASE_ = 0
while number:
position += 1
number >>= 1
return position
if __name__ == "__main__":
import doctest
doctest.testmod()
| 305
|
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...file_utils import TensorType, is_torch_available
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
A : List[str] = logging.get_logger(__name__)
A : int = {
"facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json",
# See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small
}
class lowerCamelCase (SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
lowerCamelCase__ = '''blenderbot-small'''
lowerCamelCase__ = ['''past_key_values''']
lowerCamelCase__ = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self : Dict , __magic_name__ : Dict=50_265 , __magic_name__ : str=512 , __magic_name__ : List[Any]=8 , __magic_name__ : Any=2_048 , __magic_name__ : Dict=16 , __magic_name__ : Any=8 , __magic_name__ : Optional[int]=2_048 , __magic_name__ : Dict=16 , __magic_name__ : Tuple=0.0 , __magic_name__ : Dict=0.0 , __magic_name__ : Optional[int]=True , __magic_name__ : Any=True , __magic_name__ : Dict="gelu" , __magic_name__ : Tuple=512 , __magic_name__ : List[str]=0.1 , __magic_name__ : List[Any]=0.0 , __magic_name__ : List[Any]=0.0 , __magic_name__ : Tuple=0.02 , __magic_name__ : Union[str, Any]=1 , __magic_name__ : List[Any]=False , __magic_name__ : str=0 , __magic_name__ : Dict=1 , __magic_name__ : str=2 , __magic_name__ : Union[str, Any]=2 , **__magic_name__ : Optional[Any] , ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = vocab_size
SCREAMING_SNAKE_CASE_ = max_position_embeddings
SCREAMING_SNAKE_CASE_ = d_model
SCREAMING_SNAKE_CASE_ = encoder_ffn_dim
SCREAMING_SNAKE_CASE_ = encoder_layers
SCREAMING_SNAKE_CASE_ = encoder_attention_heads
SCREAMING_SNAKE_CASE_ = decoder_ffn_dim
SCREAMING_SNAKE_CASE_ = decoder_layers
SCREAMING_SNAKE_CASE_ = decoder_attention_heads
SCREAMING_SNAKE_CASE_ = dropout
SCREAMING_SNAKE_CASE_ = attention_dropout
SCREAMING_SNAKE_CASE_ = activation_dropout
SCREAMING_SNAKE_CASE_ = activation_function
SCREAMING_SNAKE_CASE_ = init_std
SCREAMING_SNAKE_CASE_ = encoder_layerdrop
SCREAMING_SNAKE_CASE_ = decoder_layerdrop
SCREAMING_SNAKE_CASE_ = use_cache
SCREAMING_SNAKE_CASE_ = encoder_layers
SCREAMING_SNAKE_CASE_ = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=__magic_name__ , bos_token_id=__magic_name__ , eos_token_id=__magic_name__ , is_encoder_decoder=__magic_name__ , decoder_start_token_id=__magic_name__ , forced_eos_token_id=__magic_name__ , **__magic_name__ , )
class lowerCamelCase (SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
@property
def __A ( self : str ) -> Mapping[str, Mapping[int, str]]:
if self.task in ["default", "seq2seq-lm"]:
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
SCREAMING_SNAKE_CASE_ = {0: "batch"}
SCREAMING_SNAKE_CASE_ = {0: "batch", 1: "past_decoder_sequence + sequence"}
else:
SCREAMING_SNAKE_CASE_ = {0: "batch", 1: "decoder_sequence"}
SCREAMING_SNAKE_CASE_ = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(__magic_name__ , direction="inputs" )
elif self.task == "causal-lm":
# TODO: figure this case out.
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.num_layers
for i in range(__magic_name__ ):
SCREAMING_SNAKE_CASE_ = {0: "batch", 2: "past_sequence + sequence"}
SCREAMING_SNAKE_CASE_ = {0: "batch", 2: "past_sequence + sequence"}
else:
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
("decoder_input_ids", {0: "batch", 1: "decoder_sequence"}),
("decoder_attention_mask", {0: "batch", 1: "decoder_sequence"}),
] )
return common_inputs
@property
def __A ( self : Tuple ) -> Mapping[str, Mapping[int, str]]:
if self.task in ["default", "seq2seq-lm"]:
SCREAMING_SNAKE_CASE_ = super().outputs
else:
SCREAMING_SNAKE_CASE_ = super(__magic_name__ , self ).outputs
if self.use_past:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.num_layers
for i in range(__magic_name__ ):
SCREAMING_SNAKE_CASE_ = {0: "batch", 2: "past_sequence + sequence"}
SCREAMING_SNAKE_CASE_ = {0: "batch", 2: "past_sequence + sequence"}
return common_outputs
def __A ( self : int , __magic_name__ : PreTrainedTokenizer , __magic_name__ : int = -1 , __magic_name__ : int = -1 , __magic_name__ : bool = False , __magic_name__ : Optional[TensorType] = None , ) -> Mapping[str, Any]:
SCREAMING_SNAKE_CASE_ = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
# Generate decoder inputs
SCREAMING_SNAKE_CASE_ = seq_length if not self.use_past else 1
SCREAMING_SNAKE_CASE_ = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
SCREAMING_SNAKE_CASE_ = {F'''decoder_{name}''': tensor for name, tensor in decoder_inputs.items()}
SCREAMING_SNAKE_CASE_ = dict(**__magic_name__ , **__magic_name__ )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = common_inputs["input_ids"].shape
SCREAMING_SNAKE_CASE_ = common_inputs["decoder_input_ids"].shape[1]
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.num_attention_heads
SCREAMING_SNAKE_CASE_ = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
SCREAMING_SNAKE_CASE_ = decoder_seq_length + 3
SCREAMING_SNAKE_CASE_ = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
SCREAMING_SNAKE_CASE_ = torch.cat(
[common_inputs["decoder_attention_mask"], torch.ones(__magic_name__ , __magic_name__ )] , dim=1 )
SCREAMING_SNAKE_CASE_ = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.num_layers
SCREAMING_SNAKE_CASE_ = min(__magic_name__ , __magic_name__ )
SCREAMING_SNAKE_CASE_ = max(__magic_name__ , __magic_name__ ) - min_num_layers
SCREAMING_SNAKE_CASE_ = "encoder" if num_encoder_layers > num_decoder_layers else "decoder"
for _ in range(__magic_name__ ):
common_inputs["past_key_values"].append(
(
torch.zeros(__magic_name__ ),
torch.zeros(__magic_name__ ),
torch.zeros(__magic_name__ ),
torch.zeros(__magic_name__ ),
) )
# TODO: test this.
SCREAMING_SNAKE_CASE_ = encoder_shape if remaining_side_name == "encoder" else decoder_shape
for _ in range(__magic_name__ , __magic_name__ ):
common_inputs["past_key_values"].append((torch.zeros(__magic_name__ ), torch.zeros(__magic_name__ )) )
return common_inputs
def __A ( self : Union[str, Any] , __magic_name__ : PreTrainedTokenizer , __magic_name__ : int = -1 , __magic_name__ : int = -1 , __magic_name__ : bool = False , __magic_name__ : Optional[TensorType] = None , ) -> Mapping[str, Any]:
SCREAMING_SNAKE_CASE_ = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
SCREAMING_SNAKE_CASE_ = seqlen + 2
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.num_layers
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.num_attention_heads
SCREAMING_SNAKE_CASE_ = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
SCREAMING_SNAKE_CASE_ = common_inputs["attention_mask"].dtype
SCREAMING_SNAKE_CASE_ = torch.cat(
[common_inputs["attention_mask"], torch.ones(__magic_name__ , __magic_name__ , dtype=__magic_name__ )] , dim=1 )
SCREAMING_SNAKE_CASE_ = [
(torch.zeros(__magic_name__ ), torch.zeros(__magic_name__ )) for _ in range(__magic_name__ )
]
return common_inputs
def __A ( self : Dict , __magic_name__ : PreTrainedTokenizer , __magic_name__ : int = -1 , __magic_name__ : int = -1 , __magic_name__ : bool = False , __magic_name__ : Optional[TensorType] = None , ) -> Mapping[str, Any]:
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
SCREAMING_SNAKE_CASE_ = compute_effective_axis_dimension(
__magic_name__ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
SCREAMING_SNAKE_CASE_ = tokenizer.num_special_tokens_to_add(__magic_name__ )
SCREAMING_SNAKE_CASE_ = compute_effective_axis_dimension(
__magic_name__ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__magic_name__ )
# Generate dummy inputs according to compute batch and sequence
SCREAMING_SNAKE_CASE_ = [" ".join([tokenizer.unk_token] ) * seq_length] * batch_size
SCREAMING_SNAKE_CASE_ = dict(tokenizer(__magic_name__ , return_tensors=__magic_name__ ) )
return common_inputs
def __A ( self : Optional[Any] , __magic_name__ : PreTrainedTokenizer , __magic_name__ : int = -1 , __magic_name__ : int = -1 , __magic_name__ : bool = False , __magic_name__ : Optional[TensorType] = None , ) -> Mapping[str, Any]:
if self.task in ["default", "seq2seq-lm"]:
SCREAMING_SNAKE_CASE_ = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
__magic_name__ , batch_size=__magic_name__ , seq_length=__magic_name__ , is_pair=__magic_name__ , framework=__magic_name__ )
elif self.task == "causal-lm":
SCREAMING_SNAKE_CASE_ = self._generate_dummy_inputs_for_causal_lm(
__magic_name__ , batch_size=__magic_name__ , seq_length=__magic_name__ , is_pair=__magic_name__ , framework=__magic_name__ )
else:
SCREAMING_SNAKE_CASE_ = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__magic_name__ , batch_size=__magic_name__ , seq_length=__magic_name__ , is_pair=__magic_name__ , framework=__magic_name__ )
return common_inputs
def __A ( self : Optional[Any] , __magic_name__ : str , __magic_name__ : List[Any] , __magic_name__ : str , __magic_name__ : List[str] ) -> List[str]:
if self.task in ["default", "seq2seq-lm"]:
SCREAMING_SNAKE_CASE_ = super()._flatten_past_key_values_(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
else:
SCREAMING_SNAKE_CASE_ = super(__magic_name__ , self )._flatten_past_key_values_(
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
| 305
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A : Optional[int] = logging.get_logger(__name__)
A : Optional[Any] = {
"weiweishi/roc-bert-base-zh": "https://huggingface.co/weiweishi/roc-bert-base-zh/resolve/main/config.json",
}
class lowerCamelCase (SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
lowerCamelCase__ = '''roc_bert'''
def __init__( self : Union[str, Any] , __magic_name__ : int=30_522 , __magic_name__ : Optional[int]=768 , __magic_name__ : List[Any]=12 , __magic_name__ : Optional[int]=12 , __magic_name__ : List[str]=3_072 , __magic_name__ : str="gelu" , __magic_name__ : List[str]=0.1 , __magic_name__ : List[str]=0.1 , __magic_name__ : Union[str, Any]=512 , __magic_name__ : Union[str, Any]=2 , __magic_name__ : Tuple=0.02 , __magic_name__ : Union[str, Any]=1e-12 , __magic_name__ : Dict=True , __magic_name__ : List[Any]=0 , __magic_name__ : Optional[Any]="absolute" , __magic_name__ : Any=None , __magic_name__ : Any=True , __magic_name__ : List[Any]=True , __magic_name__ : str=768 , __magic_name__ : Tuple=910 , __magic_name__ : Optional[int]=512 , __magic_name__ : Optional[Any]=24_858 , __magic_name__ : str=True , **__magic_name__ : int , ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = vocab_size
SCREAMING_SNAKE_CASE_ = max_position_embeddings
SCREAMING_SNAKE_CASE_ = hidden_size
SCREAMING_SNAKE_CASE_ = num_hidden_layers
SCREAMING_SNAKE_CASE_ = num_attention_heads
SCREAMING_SNAKE_CASE_ = intermediate_size
SCREAMING_SNAKE_CASE_ = hidden_act
SCREAMING_SNAKE_CASE_ = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ = initializer_range
SCREAMING_SNAKE_CASE_ = type_vocab_size
SCREAMING_SNAKE_CASE_ = layer_norm_eps
SCREAMING_SNAKE_CASE_ = use_cache
SCREAMING_SNAKE_CASE_ = enable_pronunciation
SCREAMING_SNAKE_CASE_ = enable_shape
SCREAMING_SNAKE_CASE_ = pronunciation_embed_dim
SCREAMING_SNAKE_CASE_ = pronunciation_vocab_size
SCREAMING_SNAKE_CASE_ = shape_embed_dim
SCREAMING_SNAKE_CASE_ = shape_vocab_size
SCREAMING_SNAKE_CASE_ = concat_input
SCREAMING_SNAKE_CASE_ = position_embedding_type
SCREAMING_SNAKE_CASE_ = classifier_dropout
super().__init__(pad_token_id=__magic_name__ , **__magic_name__ )
| 305
|
import inspect
import unittest
from datasets import load_dataset
from packaging import version
from transformers import BeitConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_MAPPING,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
)
from transformers.models.beit.modeling_beit import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
import PIL
from PIL import Image
from transformers import BeitImageProcessor
class lowerCamelCase :
"""simple docstring"""
def __init__( self : List[Any] , __magic_name__ : List[str] , __magic_name__ : int=100 , __magic_name__ : Optional[Any]=13 , __magic_name__ : Dict=30 , __magic_name__ : Tuple=2 , __magic_name__ : str=3 , __magic_name__ : str=True , __magic_name__ : Optional[int]=True , __magic_name__ : Union[str, Any]=32 , __magic_name__ : Optional[int]=4 , __magic_name__ : Dict=4 , __magic_name__ : Tuple=37 , __magic_name__ : Any="gelu" , __magic_name__ : int=0.1 , __magic_name__ : List[str]=0.1 , __magic_name__ : Optional[int]=10 , __magic_name__ : Tuple=0.02 , __magic_name__ : Optional[int]=3 , __magic_name__ : List[str]=None , __magic_name__ : Tuple=[0, 1, 2, 3] , ) -> List[str]:
SCREAMING_SNAKE_CASE_ = parent
SCREAMING_SNAKE_CASE_ = 100
SCREAMING_SNAKE_CASE_ = batch_size
SCREAMING_SNAKE_CASE_ = image_size
SCREAMING_SNAKE_CASE_ = patch_size
SCREAMING_SNAKE_CASE_ = num_channels
SCREAMING_SNAKE_CASE_ = is_training
SCREAMING_SNAKE_CASE_ = use_labels
SCREAMING_SNAKE_CASE_ = hidden_size
SCREAMING_SNAKE_CASE_ = num_hidden_layers
SCREAMING_SNAKE_CASE_ = num_attention_heads
SCREAMING_SNAKE_CASE_ = intermediate_size
SCREAMING_SNAKE_CASE_ = hidden_act
SCREAMING_SNAKE_CASE_ = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ = type_sequence_label_size
SCREAMING_SNAKE_CASE_ = initializer_range
SCREAMING_SNAKE_CASE_ = scope
SCREAMING_SNAKE_CASE_ = out_indices
SCREAMING_SNAKE_CASE_ = num_labels
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
SCREAMING_SNAKE_CASE_ = (image_size // patch_size) ** 2
SCREAMING_SNAKE_CASE_ = num_patches + 1
def __A ( self : Any ) -> int:
SCREAMING_SNAKE_CASE_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = None
if self.use_labels:
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
SCREAMING_SNAKE_CASE_ = self.get_config()
return config, pixel_values, labels, pixel_labels
def __A ( self : Dict ) -> Optional[int]:
return BeitConfig(
vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__magic_name__ , initializer_range=self.initializer_range , out_indices=self.out_indices , )
def __A ( self : Optional[int] , __magic_name__ : List[str] , __magic_name__ : Optional[int] , __magic_name__ : Tuple , __magic_name__ : Tuple ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = BeitModel(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
SCREAMING_SNAKE_CASE_ = model(__magic_name__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __A ( self : Union[str, Any] , __magic_name__ : List[str] , __magic_name__ : int , __magic_name__ : int , __magic_name__ : str ) -> int:
SCREAMING_SNAKE_CASE_ = BeitForMaskedImageModeling(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
SCREAMING_SNAKE_CASE_ = model(__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) )
def __A ( self : Dict , __magic_name__ : List[Any] , __magic_name__ : int , __magic_name__ : Tuple , __magic_name__ : Optional[Any] ) -> int:
SCREAMING_SNAKE_CASE_ = self.type_sequence_label_size
SCREAMING_SNAKE_CASE_ = BeitForImageClassification(__magic_name__ )
model.to(__magic_name__ )
model.eval()
SCREAMING_SNAKE_CASE_ = model(__magic_name__ , labels=__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = BeitForImageClassification(__magic_name__ )
model.to(__magic_name__ )
model.eval()
SCREAMING_SNAKE_CASE_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE_ = model(__magic_name__ , labels=__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __A ( self : Tuple , __magic_name__ : Any , __magic_name__ : Optional[Any] , __magic_name__ : Tuple , __magic_name__ : int ) -> int:
SCREAMING_SNAKE_CASE_ = self.num_labels
SCREAMING_SNAKE_CASE_ = BeitForSemanticSegmentation(__magic_name__ )
model.to(__magic_name__ )
model.eval()
SCREAMING_SNAKE_CASE_ = model(__magic_name__ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
SCREAMING_SNAKE_CASE_ = model(__magic_name__ , labels=__magic_name__ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
def __A ( self : str ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = config_and_inputs
SCREAMING_SNAKE_CASE_ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class lowerCamelCase (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase__ = (
(BeitModel, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation)
if is_torch_available()
else ()
)
lowerCamelCase__ = (
{
'''feature-extraction''': BeitModel,
'''image-classification''': BeitForImageClassification,
'''image-segmentation''': BeitForSemanticSegmentation,
}
if is_torch_available()
else {}
)
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
def __A ( self : Tuple ) -> Any:
SCREAMING_SNAKE_CASE_ = BeitModelTester(self )
SCREAMING_SNAKE_CASE_ = ConfigTester(self , config_class=__magic_name__ , has_text_modality=__magic_name__ , hidden_size=37 )
def __A ( self : Dict ) -> List[Any]:
self.config_tester.run_common_tests()
@unittest.skip(reason="BEiT does not use inputs_embeds" )
def __A ( self : List[str] ) -> Optional[Any]:
pass
@require_torch_multi_gpu
@unittest.skip(reason="BEiT has some layers using `add_module` which doesn't work well with `nn.DataParallel`" )
def __A ( self : str ) -> List[str]:
pass
def __A ( self : List[Any] ) -> List[str]:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ = model_class(__magic_name__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
SCREAMING_SNAKE_CASE_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__magic_name__ , nn.Linear ) )
def __A ( self : Union[str, Any] ) -> int:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ = model_class(__magic_name__ )
SCREAMING_SNAKE_CASE_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE_ = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE_ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __magic_name__ )
def __A ( self : Tuple ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__magic_name__ )
def __A ( self : Dict ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__magic_name__ )
def __A ( self : Optional[int] ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__magic_name__ )
def __A ( self : Optional[Any] ) -> List[str]:
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__magic_name__ )
def __A ( self : int ) -> Optional[int]:
if not self.model_tester.is_training:
return
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if model_class in [*get_values(__magic_name__ ), BeitForMaskedImageModeling]:
continue
SCREAMING_SNAKE_CASE_ = model_class(__magic_name__ )
model.to(__magic_name__ )
model.train()
SCREAMING_SNAKE_CASE_ = self._prepare_for_class(__magic_name__ , __magic_name__ , return_labels=__magic_name__ )
SCREAMING_SNAKE_CASE_ = model(**__magic_name__ ).loss
loss.backward()
def __A ( self : Any ) -> Tuple:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if (
model_class in [*get_values(__magic_name__ ), BeitForMaskedImageModeling]
or not model_class.supports_gradient_checkpointing
):
continue
SCREAMING_SNAKE_CASE_ = model_class(__magic_name__ )
model.gradient_checkpointing_enable()
model.to(__magic_name__ )
model.train()
SCREAMING_SNAKE_CASE_ = self._prepare_for_class(__magic_name__ , __magic_name__ , return_labels=__magic_name__ )
SCREAMING_SNAKE_CASE_ = model(**__magic_name__ ).loss
loss.backward()
def __A ( self : List[str] ) -> str:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ = _config_zero_init(__magic_name__ )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ = model_class(config=__magic_name__ )
for name, param in model.named_parameters():
# we skip lambda parameters as these require special initial values
# determined by config.layer_scale_init_value
if "lambda" in name:
continue
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@slow
def __A ( self : int ) -> Optional[int]:
for model_name in BEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE_ = BeitModel.from_pretrained(__magic_name__ )
self.assertIsNotNone(__magic_name__ )
def a__ ( ):
SCREAMING_SNAKE_CASE_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class lowerCamelCase (unittest.TestCase ):
"""simple docstring"""
@cached_property
def __A ( self : List[Any] ) -> str:
return BeitImageProcessor.from_pretrained("microsoft/beit-base-patch16-224" ) if is_vision_available() else None
@slow
def __A ( self : List[str] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = BeitForMaskedImageModeling.from_pretrained("microsoft/beit-base-patch16-224-pt22k" ).to(__magic_name__ )
SCREAMING_SNAKE_CASE_ = self.default_image_processor
SCREAMING_SNAKE_CASE_ = prepare_img()
SCREAMING_SNAKE_CASE_ = image_processor(images=__magic_name__ , return_tensors="pt" ).pixel_values.to(__magic_name__ )
# prepare bool_masked_pos
SCREAMING_SNAKE_CASE_ = torch.ones((1, 196) , dtype=torch.bool ).to(__magic_name__ )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = model(pixel_values=__magic_name__ , bool_masked_pos=__magic_name__ )
SCREAMING_SNAKE_CASE_ = outputs.logits
# verify the logits
SCREAMING_SNAKE_CASE_ = torch.Size((1, 196, 8_192) )
self.assertEqual(logits.shape , __magic_name__ )
SCREAMING_SNAKE_CASE_ = torch.tensor(
[[-3.2437, 0.5072, -13.9174], [-3.2456, 0.4948, -13.9401], [-3.2033, 0.5121, -13.8550]] ).to(__magic_name__ )
self.assertTrue(torch.allclose(logits[bool_masked_pos][:3, :3] , __magic_name__ , atol=1e-2 ) )
@slow
def __A ( self : Tuple ) -> int:
SCREAMING_SNAKE_CASE_ = BeitForImageClassification.from_pretrained("microsoft/beit-base-patch16-224" ).to(__magic_name__ )
SCREAMING_SNAKE_CASE_ = self.default_image_processor
SCREAMING_SNAKE_CASE_ = prepare_img()
SCREAMING_SNAKE_CASE_ = image_processor(images=__magic_name__ , return_tensors="pt" ).to(__magic_name__ )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = model(**__magic_name__ )
SCREAMING_SNAKE_CASE_ = outputs.logits
# verify the logits
SCREAMING_SNAKE_CASE_ = torch.Size((1, 1_000) )
self.assertEqual(logits.shape , __magic_name__ )
SCREAMING_SNAKE_CASE_ = torch.tensor([-1.2385, -1.0987, -1.0108] ).to(__magic_name__ )
self.assertTrue(torch.allclose(logits[0, :3] , __magic_name__ , atol=1e-4 ) )
SCREAMING_SNAKE_CASE_ = 281
self.assertEqual(logits.argmax(-1 ).item() , __magic_name__ )
@slow
def __A ( self : Optional[Any] ) -> int:
SCREAMING_SNAKE_CASE_ = BeitForImageClassification.from_pretrained("microsoft/beit-large-patch16-224-pt22k-ft22k" ).to(
__magic_name__ )
SCREAMING_SNAKE_CASE_ = self.default_image_processor
SCREAMING_SNAKE_CASE_ = prepare_img()
SCREAMING_SNAKE_CASE_ = image_processor(images=__magic_name__ , return_tensors="pt" ).to(__magic_name__ )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = model(**__magic_name__ )
SCREAMING_SNAKE_CASE_ = outputs.logits
# verify the logits
SCREAMING_SNAKE_CASE_ = torch.Size((1, 21_841) )
self.assertEqual(logits.shape , __magic_name__ )
SCREAMING_SNAKE_CASE_ = torch.tensor([1.6881, -0.2787, 0.5901] ).to(__magic_name__ )
self.assertTrue(torch.allclose(logits[0, :3] , __magic_name__ , atol=1e-4 ) )
SCREAMING_SNAKE_CASE_ = 2_396
self.assertEqual(logits.argmax(-1 ).item() , __magic_name__ )
@slow
def __A ( self : Tuple ) -> Tuple:
SCREAMING_SNAKE_CASE_ = BeitForSemanticSegmentation.from_pretrained("microsoft/beit-base-finetuned-ade-640-640" )
SCREAMING_SNAKE_CASE_ = model.to(__magic_name__ )
SCREAMING_SNAKE_CASE_ = BeitImageProcessor(do_resize=__magic_name__ , size=640 , do_center_crop=__magic_name__ )
SCREAMING_SNAKE_CASE_ = load_dataset("hf-internal-testing/fixtures_ade20k" , split="test" )
SCREAMING_SNAKE_CASE_ = Image.open(ds[0]["file"] )
SCREAMING_SNAKE_CASE_ = image_processor(images=__magic_name__ , return_tensors="pt" ).to(__magic_name__ )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = model(**__magic_name__ )
SCREAMING_SNAKE_CASE_ = outputs.logits
# verify the logits
SCREAMING_SNAKE_CASE_ = torch.Size((1, 150, 160, 160) )
self.assertEqual(logits.shape , __magic_name__ )
SCREAMING_SNAKE_CASE_ = version.parse(PIL.__version__ ) < version.parse("9.0.0" )
if is_pillow_less_than_a:
SCREAMING_SNAKE_CASE_ = torch.tensor(
[
[[-4.9225, -2.3954, -3.0522], [-2.8822, -1.0046, -1.7561], [-2.9549, -1.3228, -2.1347]],
[[-5.8168, -3.4129, -4.0778], [-3.8651, -2.2214, -3.0277], [-3.8356, -2.4643, -3.3535]],
[[-0.0078, 3.9952, 4.0754], [2.9856, 4.6944, 5.0035], [3.2413, 4.7813, 4.9969]],
] , device=__magic_name__ , )
else:
SCREAMING_SNAKE_CASE_ = torch.tensor(
[
[[-4.8960, -2.3688, -3.0355], [-2.8478, -0.9836, -1.7418], [-2.9449, -1.3332, -2.1456]],
[[-5.8081, -3.4124, -4.1006], [-3.8561, -2.2081, -3.0323], [-3.8365, -2.4601, -3.3669]],
[[-0.0309, 3.9868, 4.0540], [2.9640, 4.6877, 4.9976], [3.2081, 4.7690, 4.9942]],
] , device=__magic_name__ , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , __magic_name__ , atol=1e-4 ) )
@slow
def __A ( self : List[str] ) -> Tuple:
SCREAMING_SNAKE_CASE_ = BeitForSemanticSegmentation.from_pretrained("microsoft/beit-base-finetuned-ade-640-640" )
SCREAMING_SNAKE_CASE_ = model.to(__magic_name__ )
SCREAMING_SNAKE_CASE_ = BeitImageProcessor(do_resize=__magic_name__ , size=640 , do_center_crop=__magic_name__ )
SCREAMING_SNAKE_CASE_ = load_dataset("hf-internal-testing/fixtures_ade20k" , split="test" )
SCREAMING_SNAKE_CASE_ = Image.open(ds[0]["file"] )
SCREAMING_SNAKE_CASE_ = image_processor(images=__magic_name__ , return_tensors="pt" ).to(__magic_name__ )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = model(**__magic_name__ )
SCREAMING_SNAKE_CASE_ = outputs.logits.detach().cpu()
SCREAMING_SNAKE_CASE_ = image_processor.post_process_semantic_segmentation(outputs=__magic_name__ , target_sizes=[(500, 300)] )
SCREAMING_SNAKE_CASE_ = torch.Size((500, 300) )
self.assertEqual(segmentation[0].shape , __magic_name__ )
SCREAMING_SNAKE_CASE_ = image_processor.post_process_semantic_segmentation(outputs=__magic_name__ )
SCREAMING_SNAKE_CASE_ = torch.Size((160, 160) )
self.assertEqual(segmentation[0].shape , __magic_name__ )
| 305
| 1
|
from math import pi, sqrt, tan
def a__ ( __UpperCamelCase ):
if side_length < 0:
raise ValueError("surface_area_cube() only accepts non-negative values" )
return 6 * side_length**2
def a__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
if length < 0 or breadth < 0 or height < 0:
raise ValueError("surface_area_cuboid() only accepts non-negative values" )
return 2 * ((length * breadth) + (breadth * height) + (length * height))
def a__ ( __UpperCamelCase ):
if radius < 0:
raise ValueError("surface_area_sphere() only accepts non-negative values" )
return 4 * pi * radius**2
def a__ ( __UpperCamelCase ):
if radius < 0:
raise ValueError("surface_area_hemisphere() only accepts non-negative values" )
return 3 * pi * radius**2
def a__ ( __UpperCamelCase , __UpperCamelCase ):
if radius < 0 or height < 0:
raise ValueError("surface_area_cone() only accepts non-negative values" )
return pi * radius * (radius + (height**2 + radius**2) ** 0.5)
def a__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
if radius_a < 0 or radius_a < 0 or height < 0:
raise ValueError(
"surface_area_conical_frustum() only accepts non-negative values" )
SCREAMING_SNAKE_CASE_ = (height**2 + (radius_a - radius_a) ** 2) ** 0.5
return pi * ((slant_height * (radius_a + radius_a)) + radius_a**2 + radius_a**2)
def a__ ( __UpperCamelCase , __UpperCamelCase ):
if radius < 0 or height < 0:
raise ValueError("surface_area_cylinder() only accepts non-negative values" )
return 2 * pi * radius * (height + radius)
def a__ ( __UpperCamelCase , __UpperCamelCase ):
if torus_radius < 0 or tube_radius < 0:
raise ValueError("surface_area_torus() only accepts non-negative values" )
if torus_radius < tube_radius:
raise ValueError(
"surface_area_torus() does not support spindle or self intersecting tori" )
return 4 * pow(__UpperCamelCase , 2 ) * torus_radius * tube_radius
def a__ ( __UpperCamelCase , __UpperCamelCase ):
if length < 0 or width < 0:
raise ValueError("area_rectangle() only accepts non-negative values" )
return length * width
def a__ ( __UpperCamelCase ):
if side_length < 0:
raise ValueError("area_square() only accepts non-negative values" )
return side_length**2
def a__ ( __UpperCamelCase , __UpperCamelCase ):
if base < 0 or height < 0:
raise ValueError("area_triangle() only accepts non-negative values" )
return (base * height) / 2
def a__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
if sidea < 0 or sidea < 0 or sidea < 0:
raise ValueError("area_triangle_three_sides() only accepts non-negative values" )
elif sidea + sidea < sidea or sidea + sidea < sidea or sidea + sidea < sidea:
raise ValueError("Given three sides do not form a triangle" )
SCREAMING_SNAKE_CASE_ = (sidea + sidea + sidea) / 2
SCREAMING_SNAKE_CASE_ = sqrt(
semi_perimeter
* (semi_perimeter - sidea)
* (semi_perimeter - sidea)
* (semi_perimeter - sidea) )
return area
def a__ ( __UpperCamelCase , __UpperCamelCase ):
if base < 0 or height < 0:
raise ValueError("area_parallelogram() only accepts non-negative values" )
return base * height
def a__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
if basea < 0 or basea < 0 or height < 0:
raise ValueError("area_trapezium() only accepts non-negative values" )
return 1 / 2 * (basea + basea) * height
def a__ ( __UpperCamelCase ):
if radius < 0:
raise ValueError("area_circle() only accepts non-negative values" )
return pi * radius**2
def a__ ( __UpperCamelCase , __UpperCamelCase ):
if radius_x < 0 or radius_y < 0:
raise ValueError("area_ellipse() only accepts non-negative values" )
return pi * radius_x * radius_y
def a__ ( __UpperCamelCase , __UpperCamelCase ):
if diagonal_a < 0 or diagonal_a < 0:
raise ValueError("area_rhombus() only accepts non-negative values" )
return 1 / 2 * diagonal_a * diagonal_a
def a__ ( __UpperCamelCase , __UpperCamelCase ):
if not isinstance(__UpperCamelCase , __UpperCamelCase ) or sides < 3:
raise ValueError(
"area_reg_polygon() only accepts integers greater than or \
equal to three as number of sides" )
elif length < 0:
raise ValueError(
"area_reg_polygon() only accepts non-negative values as \
length of a side" )
return (sides * length**2) / (4 * tan(pi / sides ))
return (sides * length**2) / (4 * tan(pi / sides ))
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True) # verbose so we can see methods missing tests
print("[DEMO] Areas of various geometric shapes: \n")
print(f"Rectangle: {area_rectangle(10, 20) = }")
print(f"Square: {area_square(10) = }")
print(f"Triangle: {area_triangle(10, 10) = }")
print(f"Triangle: {area_triangle_three_sides(5, 12, 13) = }")
print(f"Parallelogram: {area_parallelogram(10, 20) = }")
print(f"Rhombus: {area_rhombus(10, 20) = }")
print(f"Trapezium: {area_trapezium(10, 20, 30) = }")
print(f"Circle: {area_circle(20) = }")
print(f"Ellipse: {area_ellipse(10, 20) = }")
print("\nSurface Areas of various geometric shapes: \n")
print(f"Cube: {surface_area_cube(20) = }")
print(f"Cuboid: {surface_area_cuboid(10, 20, 30) = }")
print(f"Sphere: {surface_area_sphere(20) = }")
print(f"Hemisphere: {surface_area_hemisphere(20) = }")
print(f"Cone: {surface_area_cone(10, 20) = }")
print(f"Conical Frustum: {surface_area_conical_frustum(10, 20, 30) = }")
print(f"Cylinder: {surface_area_cylinder(10, 20) = }")
print(f"Torus: {surface_area_torus(20, 10) = }")
print(f"Equilateral Triangle: {area_reg_polygon(3, 10) = }")
print(f"Square: {area_reg_polygon(4, 10) = }")
print(f"Reqular Pentagon: {area_reg_polygon(5, 10) = }")
| 305
|
from __future__ import annotations
A : Dict = "#"
class lowerCamelCase :
"""simple docstring"""
def __init__( self : Dict ) -> None:
SCREAMING_SNAKE_CASE_ = {}
def __A ( self : List[Any] , __magic_name__ : str ) -> None:
SCREAMING_SNAKE_CASE_ = self._trie
for char in text:
if char not in trie:
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = trie[char]
SCREAMING_SNAKE_CASE_ = True
def __A ( self : Union[str, Any] , __magic_name__ : str ) -> tuple | list:
SCREAMING_SNAKE_CASE_ = self._trie
for char in prefix:
if char in trie:
SCREAMING_SNAKE_CASE_ = trie[char]
else:
return []
return self._elements(__magic_name__ )
def __A ( self : int , __magic_name__ : dict ) -> tuple:
SCREAMING_SNAKE_CASE_ = []
for c, v in d.items():
SCREAMING_SNAKE_CASE_ = [" "] if c == END else [(c + s) for s in self._elements(__magic_name__ )]
result.extend(__magic_name__ )
return tuple(__magic_name__ )
A : Union[str, Any] = Trie()
A : Optional[int] = ("depart", "detergent", "daring", "dog", "deer", "deal")
for word in words:
trie.insert_word(word)
def a__ ( __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = trie.find_word(__UpperCamelCase )
return tuple(string + word for word in suffixes )
def a__ ( ):
print(autocomplete_using_trie("de" ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 305
| 1
|
import argparse
import torch
from transformers import (
UniSpeechSatConfig,
UniSpeechSatForAudioFrameClassification,
UniSpeechSatForSequenceClassification,
UniSpeechSatForXVector,
WavaVecaFeatureExtractor,
logging,
)
logging.set_verbosity_info()
A : List[str] = logging.get_logger(__name__)
def a__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = UniSpeechSatForSequenceClassification.from_pretrained(__UpperCamelCase , config=__UpperCamelCase )
SCREAMING_SNAKE_CASE_ = downstream_dict["projector.weight"]
SCREAMING_SNAKE_CASE_ = downstream_dict["projector.bias"]
SCREAMING_SNAKE_CASE_ = downstream_dict["model.post_net.linear.weight"]
SCREAMING_SNAKE_CASE_ = downstream_dict["model.post_net.linear.bias"]
return model
def a__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = UniSpeechSatForAudioFrameClassification.from_pretrained(__UpperCamelCase , config=__UpperCamelCase )
SCREAMING_SNAKE_CASE_ = downstream_dict["model.linear.weight"]
SCREAMING_SNAKE_CASE_ = downstream_dict["model.linear.bias"]
return model
def a__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = UniSpeechSatForXVector.from_pretrained(__UpperCamelCase , config=__UpperCamelCase )
SCREAMING_SNAKE_CASE_ = downstream_dict["connector.weight"]
SCREAMING_SNAKE_CASE_ = downstream_dict["connector.bias"]
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
SCREAMING_SNAKE_CASE_ = downstream_dict[
F'''model.framelevel_feature_extractor.module.{i}.kernel.weight'''
]
SCREAMING_SNAKE_CASE_ = downstream_dict[F'''model.framelevel_feature_extractor.module.{i}.kernel.bias''']
SCREAMING_SNAKE_CASE_ = downstream_dict["model.utterancelevel_feature_extractor.linear1.weight"]
SCREAMING_SNAKE_CASE_ = downstream_dict["model.utterancelevel_feature_extractor.linear1.bias"]
SCREAMING_SNAKE_CASE_ = downstream_dict["model.utterancelevel_feature_extractor.linear2.weight"]
SCREAMING_SNAKE_CASE_ = downstream_dict["model.utterancelevel_feature_extractor.linear2.bias"]
SCREAMING_SNAKE_CASE_ = downstream_dict["objective.W"]
return model
@torch.no_grad()
def a__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = torch.load(__UpperCamelCase , map_location="cpu" )
SCREAMING_SNAKE_CASE_ = checkpoint["Downstream"]
SCREAMING_SNAKE_CASE_ = UniSpeechSatConfig.from_pretrained(__UpperCamelCase )
SCREAMING_SNAKE_CASE_ = WavaVecaFeatureExtractor.from_pretrained(
__UpperCamelCase , return_attention_mask=__UpperCamelCase , do_normalize=__UpperCamelCase )
SCREAMING_SNAKE_CASE_ = hf_config.architectures[0]
if arch.endswith("ForSequenceClassification" ):
SCREAMING_SNAKE_CASE_ = convert_classification(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
elif arch.endswith("ForAudioFrameClassification" ):
SCREAMING_SNAKE_CASE_ = convert_diarization(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
elif arch.endswith("ForXVector" ):
SCREAMING_SNAKE_CASE_ = convert_xvector(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
else:
raise NotImplementedError(F'''S3PRL weights conversion is not supported for {arch}''' )
if hf_config.use_weighted_layer_sum:
SCREAMING_SNAKE_CASE_ = checkpoint["Featurizer"]["weights"]
hf_feature_extractor.save_pretrained(__UpperCamelCase )
hf_model.save_pretrained(__UpperCamelCase )
if __name__ == "__main__":
A : Any = argparse.ArgumentParser()
parser.add_argument(
"--base_model_name", default=None, type=str, help="Name of the huggingface pretrained base model."
)
parser.add_argument("--config_path", default=None, type=str, help="Path to the huggingface classifier config.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to the s3prl checkpoint.")
parser.add_argument("--model_dump_path", default=None, type=str, help="Path to the final converted model.")
A : Any = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 305
|
from collections import deque
class lowerCamelCase :
"""simple docstring"""
def __init__( self : str , __magic_name__ : str , __magic_name__ : int , __magic_name__ : int ) -> None:
SCREAMING_SNAKE_CASE_ = process_name # process name
SCREAMING_SNAKE_CASE_ = arrival_time # arrival time of the process
# completion time of finished process or last interrupted time
SCREAMING_SNAKE_CASE_ = arrival_time
SCREAMING_SNAKE_CASE_ = burst_time # remaining burst time
SCREAMING_SNAKE_CASE_ = 0 # total time of the process wait in ready queue
SCREAMING_SNAKE_CASE_ = 0 # time from arrival time to completion time
class lowerCamelCase :
"""simple docstring"""
def __init__( self : Tuple , __magic_name__ : int , __magic_name__ : list[int] , __magic_name__ : deque[Process] , __magic_name__ : int , ) -> None:
# total number of mlfq's queues
SCREAMING_SNAKE_CASE_ = number_of_queues
# time slice of queues that round robin algorithm applied
SCREAMING_SNAKE_CASE_ = time_slices
# unfinished process is in this ready_queue
SCREAMING_SNAKE_CASE_ = queue
# current time
SCREAMING_SNAKE_CASE_ = current_time
# finished process is in this sequence queue
SCREAMING_SNAKE_CASE_ = deque()
def __A ( self : Dict ) -> list[str]:
SCREAMING_SNAKE_CASE_ = []
for i in range(len(self.finish_queue ) ):
sequence.append(self.finish_queue[i].process_name )
return sequence
def __A ( self : List[str] , __magic_name__ : list[Process] ) -> list[int]:
SCREAMING_SNAKE_CASE_ = []
for i in range(len(__magic_name__ ) ):
waiting_times.append(queue[i].waiting_time )
return waiting_times
def __A ( self : List[str] , __magic_name__ : list[Process] ) -> list[int]:
SCREAMING_SNAKE_CASE_ = []
for i in range(len(__magic_name__ ) ):
turnaround_times.append(queue[i].turnaround_time )
return turnaround_times
def __A ( self : Tuple , __magic_name__ : list[Process] ) -> list[int]:
SCREAMING_SNAKE_CASE_ = []
for i in range(len(__magic_name__ ) ):
completion_times.append(queue[i].stop_time )
return completion_times
def __A ( self : str , __magic_name__ : deque[Process] ) -> list[int]:
return [q.burst_time for q in queue]
def __A ( self : Optional[Any] , __magic_name__ : Process ) -> int:
process.waiting_time += self.current_time - process.stop_time
return process.waiting_time
def __A ( self : Optional[Any] , __magic_name__ : deque[Process] ) -> deque[Process]:
SCREAMING_SNAKE_CASE_ = deque() # sequence deque of finished process
while len(__magic_name__ ) != 0:
SCREAMING_SNAKE_CASE_ = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of current process
self.update_waiting_time(__magic_name__ )
# update current time
self.current_time += cp.burst_time
# finish the process and set the process's burst-time 0
SCREAMING_SNAKE_CASE_ = 0
# set the process's turnaround time because it is finished
SCREAMING_SNAKE_CASE_ = self.current_time - cp.arrival_time
# set the completion time
SCREAMING_SNAKE_CASE_ = self.current_time
# add the process to queue that has finished queue
finished.append(__magic_name__ )
self.finish_queue.extend(__magic_name__ ) # add finished process to finish queue
# FCFS will finish all remaining processes
return finished
def __A ( self : Any , __magic_name__ : deque[Process] , __magic_name__ : int ) -> tuple[deque[Process], deque[Process]]:
SCREAMING_SNAKE_CASE_ = deque() # sequence deque of terminated process
# just for 1 cycle and unfinished processes will go back to queue
for _ in range(len(__magic_name__ ) ):
SCREAMING_SNAKE_CASE_ = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of unfinished processes
self.update_waiting_time(__magic_name__ )
# if the burst time of process is bigger than time-slice
if cp.burst_time > time_slice:
# use CPU for only time-slice
self.current_time += time_slice
# update remaining burst time
cp.burst_time -= time_slice
# update end point time
SCREAMING_SNAKE_CASE_ = self.current_time
# locate the process behind the queue because it is not finished
ready_queue.append(__magic_name__ )
else:
# use CPU for remaining burst time
self.current_time += cp.burst_time
# set burst time 0 because the process is finished
SCREAMING_SNAKE_CASE_ = 0
# set the finish time
SCREAMING_SNAKE_CASE_ = self.current_time
# update the process' turnaround time because it is finished
SCREAMING_SNAKE_CASE_ = self.current_time - cp.arrival_time
# add the process to queue that has finished queue
finished.append(__magic_name__ )
self.finish_queue.extend(__magic_name__ ) # add finished process to finish queue
# return finished processes queue and remaining processes queue
return finished, ready_queue
def __A ( self : Any ) -> deque[Process]:
# all queues except last one have round_robin algorithm
for i in range(self.number_of_queues - 1 ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.round_robin(
self.ready_queue , self.time_slices[i] )
# the last queue has first_come_first_served algorithm
self.first_come_first_served(self.ready_queue )
return self.finish_queue
if __name__ == "__main__":
import doctest
A : Dict = Process("P1", 0, 53)
A : str = Process("P2", 0, 17)
A : List[Any] = Process("P3", 0, 68)
A : List[str] = Process("P4", 0, 24)
A : Dict = 3
A : Any = [17, 25]
A : Dict = deque([Pa, Pa, Pa, Pa])
if len(time_slices) != number_of_queues - 1:
raise SystemExit(0)
doctest.testmod(extraglobs={"queue": deque([Pa, Pa, Pa, Pa])})
A : Union[str, Any] = Process("P1", 0, 53)
A : Any = Process("P2", 0, 17)
A : Dict = Process("P3", 0, 68)
A : List[str] = Process("P4", 0, 24)
A : Optional[int] = 3
A : int = [17, 25]
A : Union[str, Any] = deque([Pa, Pa, Pa, Pa])
A : Tuple = MLFQ(number_of_queues, time_slices, queue, 0)
A : Tuple = mlfq.multi_level_feedback_queue()
# print total waiting times of processes(P1, P2, P3, P4)
print(
f"waiting time:\
\t\t\t{MLFQ.calculate_waiting_time(mlfq, [Pa, Pa, Pa, Pa])}"
)
# print completion times of processes(P1, P2, P3, P4)
print(
f"completion time:\
\t\t{MLFQ.calculate_completion_time(mlfq, [Pa, Pa, Pa, Pa])}"
)
# print total turnaround times of processes(P1, P2, P3, P4)
print(
f"turnaround time:\
\t\t{MLFQ.calculate_turnaround_time(mlfq, [Pa, Pa, Pa, Pa])}"
)
# print sequence of finished processes
print(
f"sequence of finished processes:\
{mlfq.calculate_sequence_of_finish_queue()}"
)
| 305
| 1
|
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
A : Optional[int] = logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class lowerCamelCase (SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
lowerCamelCase__ = field(default=SCREAMING_SNAKE_CASE__ , metadata={'''help''': '''Whether to use SortishSampler or not.'''} )
lowerCamelCase__ = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'''help''': '''Whether to use generate to calculate generative metrics (ROUGE, BLEU).'''} )
lowerCamelCase__ = field(
default=SCREAMING_SNAKE_CASE__ , metadata={
'''help''': (
'''The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default '''
'''to the `max_length` value of the model configuration.'''
)
} , )
lowerCamelCase__ = field(
default=SCREAMING_SNAKE_CASE__ , metadata={
'''help''': (
'''The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default '''
'''to the `num_beams` value of the model configuration.'''
)
} , )
lowerCamelCase__ = field(
default=SCREAMING_SNAKE_CASE__ , metadata={
'''help''': '''Model id, file path or url pointing to a GenerationConfig json file, to use during prediction.'''
} , )
def __A ( self : Optional[int] ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = super().to_dict()
for k, v in d.items():
if isinstance(__magic_name__ , __magic_name__ ):
SCREAMING_SNAKE_CASE_ = v.to_dict()
return d
| 305
|
import torch
def a__ ( ):
if torch.cuda.is_available():
SCREAMING_SNAKE_CASE_ = torch.cuda.device_count()
else:
SCREAMING_SNAKE_CASE_ = 0
print(F'''Successfully ran on {num_gpus} GPUs''' )
if __name__ == "__main__":
main()
| 305
| 1
|
import faiss # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import requests # noqa: F401 # Here to have a nice missing dependency error message early on
import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on
import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on
from mauve import compute_mauve # From: mauve-text
import datasets
A : List[str] = "\\n@inproceedings{pillutla-etal:mauve:neurips2021,\n title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers},\n author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid},\n booktitle = {NeurIPS},\n year = {2021}\n}\n\n"
A : Union[str, Any] = "\\nMAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure.\n\nMAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences.\n\nFor details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021).\n\nThis metrics is a wrapper around the official implementation of MAUVE:\nhttps://github.com/krishnap25/mauve\n"
A : Dict = "\nCalculates MAUVE scores between two lists of generated text and reference text.\nArgs:\n predictions: list of generated text to score. Each predictions\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\nOptional Args:\n num_buckets: the size of the histogram to quantize P and Q. Options: 'auto' (default) or an integer\n pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1\n kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9\n kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5\n kmeans_max_iter: maximum number of k-means iterations. Default 500\n featurize_model_name: name of the model from which features are obtained. Default 'gpt2-large' Use one of ['gpt2', 'gpt2-medium', 'gpt2-large', 'gpt2-xl'].\n device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU\n max_text_length: maximum number of tokens to consider. Default 1024\n divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25\n mauve_scaling_factor: \"c\" from the paper. Default 5.\n verbose: If True (default), print running time updates\n seed: random seed to initialize k-means cluster assignments.\nReturns:\n mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer,\n frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer,\n divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve,\n p_hist: a discrete distribution, which is a quantized version of the text distribution p_text,\n q_hist: same as above, but with q_text.\nExamples:\n\n >>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest\n >>> import datasets\n >>> mauve = datasets.load_metric('mauve')\n >>> predictions = [\"hello there\", \"general kenobi\"]\n >>> references = [\"hello there\", \"general kenobi\"]\n >>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP\n >>> print(out.mauve) # doctest: +SKIP\n 1.0\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCamelCase (datasets.Metric ):
"""simple docstring"""
def __A ( self : Any ) -> Dict:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="https://github.com/krishnap25/mauve" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , codebase_urls=["https://github.com/krishnap25/mauve"] , reference_urls=[
"https://arxiv.org/abs/2102.01454",
"https://github.com/krishnap25/mauve",
] , )
def __A ( self : List[str] , __magic_name__ : List[Any] , __magic_name__ : Dict , __magic_name__ : Optional[Any]=None , __magic_name__ : int=None , __magic_name__ : List[Any]=None , __magic_name__ : Optional[int]=None , __magic_name__ : Union[str, Any]="auto" , __magic_name__ : Dict=-1 , __magic_name__ : Optional[int]=0.9 , __magic_name__ : List[Any]=5 , __magic_name__ : List[Any]=500 , __magic_name__ : Tuple="gpt2-large" , __magic_name__ : List[str]=-1 , __magic_name__ : str=1_024 , __magic_name__ : Optional[Any]=25 , __magic_name__ : List[Any]=5 , __magic_name__ : Any=True , __magic_name__ : int=25 , ) -> int:
SCREAMING_SNAKE_CASE_ = compute_mauve(
p_text=__magic_name__ , q_text=__magic_name__ , p_features=__magic_name__ , q_features=__magic_name__ , p_tokens=__magic_name__ , q_tokens=__magic_name__ , num_buckets=__magic_name__ , pca_max_data=__magic_name__ , kmeans_explained_var=__magic_name__ , kmeans_num_redo=__magic_name__ , kmeans_max_iter=__magic_name__ , featurize_model_name=__magic_name__ , device_id=__magic_name__ , max_text_length=__magic_name__ , divergence_curve_discretization_size=__magic_name__ , mauve_scaling_factor=__magic_name__ , verbose=__magic_name__ , seed=__magic_name__ , )
return out
| 305
|
from collections.abc import Generator
from math import sin
def a__ ( __UpperCamelCase ):
if len(__UpperCamelCase ) != 3_2:
raise ValueError("Input must be of length 32" )
SCREAMING_SNAKE_CASE_ = b""
for i in [3, 2, 1, 0]:
little_endian += string_aa[8 * i : 8 * i + 8]
return little_endian
def a__ ( __UpperCamelCase ):
if i < 0:
raise ValueError("Input must be non-negative" )
SCREAMING_SNAKE_CASE_ = format(__UpperCamelCase , "08x" )[-8:]
SCREAMING_SNAKE_CASE_ = b""
for i in [3, 2, 1, 0]:
little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode("utf-8" )
return little_endian_hex
def a__ ( __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = b""
for char in message:
bit_string += format(__UpperCamelCase , "08b" ).encode("utf-8" )
SCREAMING_SNAKE_CASE_ = format(len(__UpperCamelCase ) , "064b" ).encode("utf-8" )
# Pad bit_string to a multiple of 512 chars
bit_string += b"1"
while len(__UpperCamelCase ) % 5_1_2 != 4_4_8:
bit_string += b"0"
bit_string += to_little_endian(start_len[3_2:] ) + to_little_endian(start_len[:3_2] )
return bit_string
def a__ ( __UpperCamelCase ):
if len(__UpperCamelCase ) % 5_1_2 != 0:
raise ValueError("Input must have length that's a multiple of 512" )
for pos in range(0 , len(__UpperCamelCase ) , 5_1_2 ):
SCREAMING_SNAKE_CASE_ = bit_string[pos : pos + 5_1_2]
SCREAMING_SNAKE_CASE_ = []
for i in range(0 , 5_1_2 , 3_2 ):
block_words.append(int(to_little_endian(block[i : i + 3_2] ) , 2 ) )
yield block_words
def a__ ( __UpperCamelCase ):
if i < 0:
raise ValueError("Input must be non-negative" )
SCREAMING_SNAKE_CASE_ = format(__UpperCamelCase , "032b" )
SCREAMING_SNAKE_CASE_ = ""
for c in i_str:
new_str += "1" if c == "0" else "0"
return int(__UpperCamelCase , 2 )
def a__ ( __UpperCamelCase , __UpperCamelCase ):
return (a + b) % 2**3_2
def a__ ( __UpperCamelCase , __UpperCamelCase ):
if i < 0:
raise ValueError("Input must be non-negative" )
if shift < 0:
raise ValueError("Shift must be non-negative" )
return ((i << shift) ^ (i >> (3_2 - shift))) % 2**3_2
def a__ ( __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = preprocess(__UpperCamelCase )
SCREAMING_SNAKE_CASE_ = [int(2**3_2 * abs(sin(i + 1 ) ) ) for i in range(6_4 )]
# Starting states
SCREAMING_SNAKE_CASE_ = 0X67452301
SCREAMING_SNAKE_CASE_ = 0Xefcdab89
SCREAMING_SNAKE_CASE_ = 0X98badcfe
SCREAMING_SNAKE_CASE_ = 0X10325476
SCREAMING_SNAKE_CASE_ = [
7,
1_2,
1_7,
2_2,
7,
1_2,
1_7,
2_2,
7,
1_2,
1_7,
2_2,
7,
1_2,
1_7,
2_2,
5,
9,
1_4,
2_0,
5,
9,
1_4,
2_0,
5,
9,
1_4,
2_0,
5,
9,
1_4,
2_0,
4,
1_1,
1_6,
2_3,
4,
1_1,
1_6,
2_3,
4,
1_1,
1_6,
2_3,
4,
1_1,
1_6,
2_3,
6,
1_0,
1_5,
2_1,
6,
1_0,
1_5,
2_1,
6,
1_0,
1_5,
2_1,
6,
1_0,
1_5,
2_1,
]
# Process bit string in chunks, each with 16 32-char words
for block_words in get_block_words(__UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = aa
SCREAMING_SNAKE_CASE_ = ba
SCREAMING_SNAKE_CASE_ = ca
SCREAMING_SNAKE_CASE_ = da
# Hash current chunk
for i in range(6_4 ):
if i <= 1_5:
# f = (b & c) | (not_32(b) & d) # Alternate definition for f
SCREAMING_SNAKE_CASE_ = d ^ (b & (c ^ d))
SCREAMING_SNAKE_CASE_ = i
elif i <= 3_1:
# f = (d & b) | (not_32(d) & c) # Alternate definition for f
SCREAMING_SNAKE_CASE_ = c ^ (d & (b ^ c))
SCREAMING_SNAKE_CASE_ = (5 * i + 1) % 1_6
elif i <= 4_7:
SCREAMING_SNAKE_CASE_ = b ^ c ^ d
SCREAMING_SNAKE_CASE_ = (3 * i + 5) % 1_6
else:
SCREAMING_SNAKE_CASE_ = c ^ (b | not_aa(__UpperCamelCase ))
SCREAMING_SNAKE_CASE_ = (7 * i) % 1_6
SCREAMING_SNAKE_CASE_ = (f + a + added_consts[i] + block_words[g]) % 2**3_2
SCREAMING_SNAKE_CASE_ = d
SCREAMING_SNAKE_CASE_ = c
SCREAMING_SNAKE_CASE_ = b
SCREAMING_SNAKE_CASE_ = sum_aa(__UpperCamelCase , left_rotate_aa(__UpperCamelCase , shift_amounts[i] ) )
# Add hashed chunk to running total
SCREAMING_SNAKE_CASE_ = sum_aa(__UpperCamelCase , __UpperCamelCase )
SCREAMING_SNAKE_CASE_ = sum_aa(__UpperCamelCase , __UpperCamelCase )
SCREAMING_SNAKE_CASE_ = sum_aa(__UpperCamelCase , __UpperCamelCase )
SCREAMING_SNAKE_CASE_ = sum_aa(__UpperCamelCase , __UpperCamelCase )
SCREAMING_SNAKE_CASE_ = reformat_hex(__UpperCamelCase ) + reformat_hex(__UpperCamelCase ) + reformat_hex(__UpperCamelCase ) + reformat_hex(__UpperCamelCase )
return digest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 305
| 1
|
import os
import unittest
from transformers.models.cpmant.tokenization_cpmant import VOCAB_FILES_NAMES, CpmAntTokenizer
from transformers.testing_utils import require_jieba, tooslow
from ...test_tokenization_common import TokenizerTesterMixin
@require_jieba
class lowerCamelCase (SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase__ = CpmAntTokenizer
lowerCamelCase__ = False
def __A ( self : str ) -> Dict:
super().setUp()
SCREAMING_SNAKE_CASE_ = [
"<d>",
"</d>",
"<s>",
"</s>",
"</_>",
"<unk>",
"<pad>",
"</n>",
"我",
"是",
"C",
"P",
"M",
"A",
"n",
"t",
]
SCREAMING_SNAKE_CASE_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
@tooslow
def __A ( self : Optional[Any] ) -> Tuple:
SCREAMING_SNAKE_CASE_ = CpmAntTokenizer.from_pretrained("openbmb/cpm-ant-10b" )
SCREAMING_SNAKE_CASE_ = "今天天气真好!"
SCREAMING_SNAKE_CASE_ = ["今天", "天气", "真", "好", "!"]
SCREAMING_SNAKE_CASE_ = tokenizer.tokenize(__magic_name__ )
self.assertListEqual(__magic_name__ , __magic_name__ )
SCREAMING_SNAKE_CASE_ = "今天天气真好!"
SCREAMING_SNAKE_CASE_ = [tokenizer.bos_token] + tokens
SCREAMING_SNAKE_CASE_ = [6, 9_802, 14_962, 2_082, 831, 244]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__magic_name__ ) , __magic_name__ )
SCREAMING_SNAKE_CASE_ = tokenizer.decode(__magic_name__ )
self.assertEqual(__magic_name__ , __magic_name__ )
| 305
|
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BlipaProcessor, BlipImageProcessor, GPTaTokenizer, PreTrainedTokenizerFast
@require_vision
class lowerCamelCase (unittest.TestCase ):
"""simple docstring"""
def __A ( self : int ) -> Any:
SCREAMING_SNAKE_CASE_ = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE_ = BlipImageProcessor()
SCREAMING_SNAKE_CASE_ = GPTaTokenizer.from_pretrained("hf-internal-testing/tiny-random-GPT2Model" )
SCREAMING_SNAKE_CASE_ = BlipaProcessor(__magic_name__ , __magic_name__ )
processor.save_pretrained(self.tmpdirname )
def __A ( self : str , **__magic_name__ : int ) -> Union[str, Any]:
return AutoProcessor.from_pretrained(self.tmpdirname , **__magic_name__ ).tokenizer
def __A ( self : Dict , **__magic_name__ : List[Any] ) -> int:
return AutoProcessor.from_pretrained(self.tmpdirname , **__magic_name__ ).image_processor
def __A ( self : int ) -> Any:
shutil.rmtree(self.tmpdirname )
def __A ( self : Dict ) -> Dict:
SCREAMING_SNAKE_CASE_ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
SCREAMING_SNAKE_CASE_ = [Image.fromarray(np.moveaxis(__magic_name__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __A ( self : List[Any] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = BlipaProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE_ = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
SCREAMING_SNAKE_CASE_ = self.get_image_processor(do_normalize=__magic_name__ , padding_value=1.0 )
SCREAMING_SNAKE_CASE_ = BlipaProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=__magic_name__ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __magic_name__ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __magic_name__ )
def __A ( self : Tuple ) -> int:
SCREAMING_SNAKE_CASE_ = self.get_image_processor()
SCREAMING_SNAKE_CASE_ = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ = BlipaProcessor(tokenizer=__magic_name__ , image_processor=__magic_name__ )
SCREAMING_SNAKE_CASE_ = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE_ = image_processor(__magic_name__ , return_tensors="np" )
SCREAMING_SNAKE_CASE_ = processor(images=__magic_name__ , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def __A ( self : str ) -> Tuple:
SCREAMING_SNAKE_CASE_ = self.get_image_processor()
SCREAMING_SNAKE_CASE_ = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ = BlipaProcessor(tokenizer=__magic_name__ , image_processor=__magic_name__ )
SCREAMING_SNAKE_CASE_ = "lower newer"
SCREAMING_SNAKE_CASE_ = processor(text=__magic_name__ )
SCREAMING_SNAKE_CASE_ = tokenizer(__magic_name__ , return_token_type_ids=__magic_name__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __A ( self : Dict ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = self.get_image_processor()
SCREAMING_SNAKE_CASE_ = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ = BlipaProcessor(tokenizer=__magic_name__ , image_processor=__magic_name__ )
SCREAMING_SNAKE_CASE_ = "lower newer"
SCREAMING_SNAKE_CASE_ = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE_ = processor(text=__magic_name__ , images=__magic_name__ )
self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "input_ids", "attention_mask"] )
# test if it raises when no input is passed
with pytest.raises(__magic_name__ ):
processor()
def __A ( self : Dict ) -> Tuple:
SCREAMING_SNAKE_CASE_ = self.get_image_processor()
SCREAMING_SNAKE_CASE_ = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ = BlipaProcessor(tokenizer=__magic_name__ , image_processor=__magic_name__ )
SCREAMING_SNAKE_CASE_ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
SCREAMING_SNAKE_CASE_ = processor.batch_decode(__magic_name__ )
SCREAMING_SNAKE_CASE_ = tokenizer.batch_decode(__magic_name__ )
self.assertListEqual(__magic_name__ , __magic_name__ )
def __A ( self : List[str] ) -> int:
SCREAMING_SNAKE_CASE_ = self.get_image_processor()
SCREAMING_SNAKE_CASE_ = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ = BlipaProcessor(tokenizer=__magic_name__ , image_processor=__magic_name__ )
SCREAMING_SNAKE_CASE_ = "lower newer"
SCREAMING_SNAKE_CASE_ = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE_ = processor(text=__magic_name__ , images=__magic_name__ )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "input_ids", "attention_mask"] )
| 305
| 1
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
A : str = logging.get_logger(__name__)
A : Optional[int] = {
"microsoft/table-transformer-detection": (
"https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json"
),
}
class lowerCamelCase (SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
lowerCamelCase__ = '''table-transformer'''
lowerCamelCase__ = ['''past_key_values''']
lowerCamelCase__ = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
}
def __init__( self : List[Any] , __magic_name__ : Optional[Any]=True , __magic_name__ : Dict=None , __magic_name__ : Any=3 , __magic_name__ : List[str]=100 , __magic_name__ : Union[str, Any]=6 , __magic_name__ : Dict=2_048 , __magic_name__ : str=8 , __magic_name__ : int=6 , __magic_name__ : List[Any]=2_048 , __magic_name__ : Optional[int]=8 , __magic_name__ : Optional[int]=0.0 , __magic_name__ : List[Any]=0.0 , __magic_name__ : Optional[Any]=True , __magic_name__ : List[Any]="relu" , __magic_name__ : List[str]=256 , __magic_name__ : List[str]=0.1 , __magic_name__ : int=0.0 , __magic_name__ : Optional[Any]=0.0 , __magic_name__ : Tuple=0.02 , __magic_name__ : str=1.0 , __magic_name__ : int=False , __magic_name__ : Dict="sine" , __magic_name__ : Union[str, Any]="resnet50" , __magic_name__ : Optional[Any]=True , __magic_name__ : str=False , __magic_name__ : List[str]=1 , __magic_name__ : int=5 , __magic_name__ : Union[str, Any]=2 , __magic_name__ : Tuple=1 , __magic_name__ : Optional[int]=1 , __magic_name__ : Optional[Any]=5 , __magic_name__ : Optional[int]=2 , __magic_name__ : Union[str, Any]=0.1 , **__magic_name__ : Tuple , ) -> str:
if backbone_config is not None and use_timm_backbone:
raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`." )
if not use_timm_backbone:
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
SCREAMING_SNAKE_CASE_ = CONFIG_MAPPING["resnet"](out_features=["stage4"] )
elif isinstance(__magic_name__ , __magic_name__ ):
SCREAMING_SNAKE_CASE_ = backbone_config.get("model_type" )
SCREAMING_SNAKE_CASE_ = CONFIG_MAPPING[backbone_model_type]
SCREAMING_SNAKE_CASE_ = config_class.from_dict(__magic_name__ )
# set timm attributes to None
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None, None, None
SCREAMING_SNAKE_CASE_ = use_timm_backbone
SCREAMING_SNAKE_CASE_ = backbone_config
SCREAMING_SNAKE_CASE_ = num_channels
SCREAMING_SNAKE_CASE_ = num_queries
SCREAMING_SNAKE_CASE_ = d_model
SCREAMING_SNAKE_CASE_ = encoder_ffn_dim
SCREAMING_SNAKE_CASE_ = encoder_layers
SCREAMING_SNAKE_CASE_ = encoder_attention_heads
SCREAMING_SNAKE_CASE_ = decoder_ffn_dim
SCREAMING_SNAKE_CASE_ = decoder_layers
SCREAMING_SNAKE_CASE_ = decoder_attention_heads
SCREAMING_SNAKE_CASE_ = dropout
SCREAMING_SNAKE_CASE_ = attention_dropout
SCREAMING_SNAKE_CASE_ = activation_dropout
SCREAMING_SNAKE_CASE_ = activation_function
SCREAMING_SNAKE_CASE_ = init_std
SCREAMING_SNAKE_CASE_ = init_xavier_std
SCREAMING_SNAKE_CASE_ = encoder_layerdrop
SCREAMING_SNAKE_CASE_ = decoder_layerdrop
SCREAMING_SNAKE_CASE_ = encoder_layers
SCREAMING_SNAKE_CASE_ = auxiliary_loss
SCREAMING_SNAKE_CASE_ = position_embedding_type
SCREAMING_SNAKE_CASE_ = backbone
SCREAMING_SNAKE_CASE_ = use_pretrained_backbone
SCREAMING_SNAKE_CASE_ = dilation
# Hungarian matcher
SCREAMING_SNAKE_CASE_ = class_cost
SCREAMING_SNAKE_CASE_ = bbox_cost
SCREAMING_SNAKE_CASE_ = giou_cost
# Loss coefficients
SCREAMING_SNAKE_CASE_ = mask_loss_coefficient
SCREAMING_SNAKE_CASE_ = dice_loss_coefficient
SCREAMING_SNAKE_CASE_ = bbox_loss_coefficient
SCREAMING_SNAKE_CASE_ = giou_loss_coefficient
SCREAMING_SNAKE_CASE_ = eos_coefficient
super().__init__(is_encoder_decoder=__magic_name__ , **__magic_name__ )
@property
def __A ( self : Union[str, Any] ) -> int:
return self.encoder_attention_heads
@property
def __A ( self : Any ) -> int:
return self.d_model
class lowerCamelCase (SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
lowerCamelCase__ = version.parse('''1.11''' )
@property
def __A ( self : Tuple ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
("pixel_mask", {0: "batch"}),
] )
@property
def __A ( self : Any ) -> float:
return 1e-5
@property
def __A ( self : int ) -> int:
return 12
| 305
|
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available
from ...utils import OptionalDependencyNotAvailable
A : List[Any] = {"configuration_dpt": ["DPT_PRETRAINED_CONFIG_ARCHIVE_MAP", "DPTConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Optional[Any] = ["DPTFeatureExtractor"]
A : str = ["DPTImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Optional[Any] = [
"DPT_PRETRAINED_MODEL_ARCHIVE_LIST",
"DPTForDepthEstimation",
"DPTForSemanticSegmentation",
"DPTModel",
"DPTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_dpt import DPTFeatureExtractor
from .image_processing_dpt import DPTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dpt import (
DPT_PRETRAINED_MODEL_ARCHIVE_LIST,
DPTForDepthEstimation,
DPTForSemanticSegmentation,
DPTModel,
DPTPreTrainedModel,
)
else:
import sys
A : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 305
| 1
|
from __future__ import annotations
import unittest
from transformers import EsmConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers.models.esm.modeling_tf_esm import (
TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
TFEsmModel,
)
class lowerCamelCase :
"""simple docstring"""
def __init__( self : Dict , __magic_name__ : List[Any] , ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = parent
SCREAMING_SNAKE_CASE_ = 13
SCREAMING_SNAKE_CASE_ = 7
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = 99
SCREAMING_SNAKE_CASE_ = 32
SCREAMING_SNAKE_CASE_ = 2
SCREAMING_SNAKE_CASE_ = 4
SCREAMING_SNAKE_CASE_ = 37
SCREAMING_SNAKE_CASE_ = "gelu"
SCREAMING_SNAKE_CASE_ = 0.1
SCREAMING_SNAKE_CASE_ = 0.1
SCREAMING_SNAKE_CASE_ = 512
SCREAMING_SNAKE_CASE_ = 16
SCREAMING_SNAKE_CASE_ = 2
SCREAMING_SNAKE_CASE_ = 0.02
SCREAMING_SNAKE_CASE_ = 3
SCREAMING_SNAKE_CASE_ = 4
SCREAMING_SNAKE_CASE_ = None
def __A ( self : List[Any] ) -> str:
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE_ = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE_ = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = None
if self.use_labels:
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE_ = EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , pad_token_id=1 , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def __A ( self : List[str] ) -> Optional[int]:
(
(
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) ,
) = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def __A ( self : Any , __magic_name__ : int , __magic_name__ : Tuple , __magic_name__ : Optional[Any] , __magic_name__ : Any , __magic_name__ : str , __magic_name__ : Union[str, Any] ) -> Dict:
SCREAMING_SNAKE_CASE_ = TFEsmModel(config=__magic_name__ )
SCREAMING_SNAKE_CASE_ = {"input_ids": input_ids, "attention_mask": input_mask}
SCREAMING_SNAKE_CASE_ = model(__magic_name__ )
SCREAMING_SNAKE_CASE_ = [input_ids, input_mask]
SCREAMING_SNAKE_CASE_ = model(__magic_name__ )
SCREAMING_SNAKE_CASE_ = model(__magic_name__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __A ( self : List[Any] , __magic_name__ : Optional[int] , __magic_name__ : Optional[int] , __magic_name__ : List[str] , __magic_name__ : Dict , __magic_name__ : Tuple , __magic_name__ : Any , __magic_name__ : List[str] , __magic_name__ : Optional[int] , ) -> Any:
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = TFEsmModel(config=__magic_name__ )
SCREAMING_SNAKE_CASE_ = {
"input_ids": input_ids,
"attention_mask": input_mask,
"encoder_hidden_states": encoder_hidden_states,
"encoder_attention_mask": encoder_attention_mask,
}
SCREAMING_SNAKE_CASE_ = model(__magic_name__ )
SCREAMING_SNAKE_CASE_ = [input_ids, input_mask]
SCREAMING_SNAKE_CASE_ = model(__magic_name__ , encoder_hidden_states=__magic_name__ )
# Also check the case where encoder outputs are not passed
SCREAMING_SNAKE_CASE_ = model(__magic_name__ , attention_mask=__magic_name__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __A ( self : Dict , __magic_name__ : List[Any] , __magic_name__ : Any , __magic_name__ : Tuple , __magic_name__ : Union[str, Any] , __magic_name__ : Dict , __magic_name__ : List[Any] ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = TFEsmForMaskedLM(config=__magic_name__ )
SCREAMING_SNAKE_CASE_ = model([input_ids, input_mask] )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __A ( self : List[str] , __magic_name__ : str , __magic_name__ : str , __magic_name__ : Union[str, Any] , __magic_name__ : Any , __magic_name__ : str , __magic_name__ : str ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = self.num_labels
SCREAMING_SNAKE_CASE_ = TFEsmForTokenClassification(config=__magic_name__ )
SCREAMING_SNAKE_CASE_ = {"input_ids": input_ids, "attention_mask": input_mask}
SCREAMING_SNAKE_CASE_ = model(__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __A ( self : str ) -> str:
SCREAMING_SNAKE_CASE_ = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) ,
) = config_and_inputs
SCREAMING_SNAKE_CASE_ = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class lowerCamelCase (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase__ = (
(
TFEsmModel,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
)
if is_tf_available()
else ()
)
lowerCamelCase__ = (
{
'''feature-extraction''': TFEsmModel,
'''fill-mask''': TFEsmForMaskedLM,
'''text-classification''': TFEsmForSequenceClassification,
'''token-classification''': TFEsmForTokenClassification,
'''zero-shot''': TFEsmForSequenceClassification,
}
if is_tf_available()
else {}
)
lowerCamelCase__ = False
lowerCamelCase__ = False
def __A ( self : Tuple ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = TFEsmModelTester(self )
SCREAMING_SNAKE_CASE_ = ConfigTester(self , config_class=__magic_name__ , hidden_size=37 )
def __A ( self : Union[str, Any] ) -> List[Any]:
self.config_tester.run_common_tests()
def __A ( self : int ) -> Tuple:
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__magic_name__ )
def __A ( self : Any ) -> str:
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*__magic_name__ )
def __A ( self : Tuple ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__magic_name__ )
def __A ( self : str ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__magic_name__ )
@slow
def __A ( self : Dict ) -> List[Any]:
for model_name in TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE_ = TFEsmModel.from_pretrained(__magic_name__ )
self.assertIsNotNone(__magic_name__ )
@unittest.skip("Protein models do not support embedding resizing." )
def __A ( self : Any ) -> Dict:
pass
@unittest.skip("Protein models do not support embedding resizing." )
def __A ( self : str ) -> List[Any]:
pass
def __A ( self : Dict ) -> str:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ = model_class(__magic_name__ )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class is TFEsmForMaskedLM:
# Output embedding test differs from the main test because they're a matrix, not a layer
SCREAMING_SNAKE_CASE_ = model.get_bias()
assert isinstance(__magic_name__ , __magic_name__ )
for k, v in name.items():
assert isinstance(__magic_name__ , tf.Variable )
else:
SCREAMING_SNAKE_CASE_ = model.get_output_embeddings()
assert x is None
SCREAMING_SNAKE_CASE_ = model.get_bias()
assert name is None
@require_tf
class lowerCamelCase (unittest.TestCase ):
"""simple docstring"""
@slow
def __A ( self : Optional[int] ) -> int:
SCREAMING_SNAKE_CASE_ = TFEsmForMaskedLM.from_pretrained("facebook/esm2_t6_8M_UR50D" )
SCREAMING_SNAKE_CASE_ = tf.constant([[0, 1, 2, 3, 4, 5]] )
SCREAMING_SNAKE_CASE_ = model(__magic_name__ )[0]
SCREAMING_SNAKE_CASE_ = [1, 6, 33]
self.assertEqual(list(output.numpy().shape ) , __magic_name__ )
# compare the actual values for a slice.
SCREAMING_SNAKE_CASE_ = tf.constant(
[
[
[8.92_1518, -10.58_9814, -6.467_1307],
[-6.396_7156, -13.91_1377, -1.121_1915],
[-7.78_1247, -13.95_1557, -3.74_0592],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-2 ) )
@slow
def __A ( self : Tuple ) -> Any:
SCREAMING_SNAKE_CASE_ = TFEsmModel.from_pretrained("facebook/esm2_t6_8M_UR50D" )
SCREAMING_SNAKE_CASE_ = tf.constant([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
SCREAMING_SNAKE_CASE_ = model(__magic_name__ )[0]
# compare the actual values for a slice.
SCREAMING_SNAKE_CASE_ = tf.constant(
[
[
[0.1444_3092, 0.5412_5327, 0.324_7739],
[0.3034_0484, 0.0052_6676, 0.3107_7722],
[0.3227_8043, -0.2498_7096, 0.341_4628],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 305
|
from __future__ import annotations
import collections
import pprint
from pathlib import Path
def a__ ( __UpperCamelCase ):
return "".join(sorted(__UpperCamelCase ) )
def a__ ( __UpperCamelCase ):
return word_by_signature[signature(__UpperCamelCase )]
A : str = Path(__file__).parent.joinpath("words.txt").read_text(encoding="utf-8")
A : int = sorted({word.strip().lower() for word in data.splitlines()})
A : Tuple = collections.defaultdict(list)
for word in word_list:
word_by_signature[signature(word)].append(word)
if __name__ == "__main__":
A : Union[str, Any] = {word: anagram(word) for word in word_list if len(anagram(word)) > 1}
with open("anagrams.txt", "w") as file:
file.write("all_anagrams = \n ")
file.write(pprint.pformat(all_anagrams))
| 305
| 1
|
def a__ ( __UpperCamelCase , __UpperCamelCase ):
return int((input_a, input_a).count(1 ) != 0 )
def a__ ( ):
assert or_gate(0 , 0 ) == 0
assert or_gate(0 , 1 ) == 1
assert or_gate(1 , 0 ) == 1
assert or_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(or_gate(0, 1))
print(or_gate(1, 0))
print(or_gate(0, 0))
print(or_gate(1, 1))
| 305
|
import copy
import os
from typing import TYPE_CHECKING, List, Union
if TYPE_CHECKING:
pass
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A : int = logging.get_logger(__name__)
A : str = {
"kakaobrain/align-base": "https://huggingface.co/kakaobrain/align-base/resolve/main/config.json",
}
class lowerCamelCase (SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
lowerCamelCase__ = '''align_text_model'''
def __init__( self : Optional[Any] , __magic_name__ : Union[str, Any]=30_522 , __magic_name__ : Tuple=768 , __magic_name__ : List[str]=12 , __magic_name__ : Optional[Any]=12 , __magic_name__ : str=3_072 , __magic_name__ : Dict="gelu" , __magic_name__ : Any=0.1 , __magic_name__ : Optional[int]=0.1 , __magic_name__ : List[str]=512 , __magic_name__ : Any=2 , __magic_name__ : Optional[Any]=0.02 , __magic_name__ : int=1e-12 , __magic_name__ : str=0 , __magic_name__ : Optional[Any]="absolute" , __magic_name__ : Optional[Any]=True , **__magic_name__ : Tuple , ) -> Union[str, Any]:
super().__init__(**__magic_name__ )
SCREAMING_SNAKE_CASE_ = vocab_size
SCREAMING_SNAKE_CASE_ = hidden_size
SCREAMING_SNAKE_CASE_ = num_hidden_layers
SCREAMING_SNAKE_CASE_ = num_attention_heads
SCREAMING_SNAKE_CASE_ = hidden_act
SCREAMING_SNAKE_CASE_ = intermediate_size
SCREAMING_SNAKE_CASE_ = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ = max_position_embeddings
SCREAMING_SNAKE_CASE_ = type_vocab_size
SCREAMING_SNAKE_CASE_ = initializer_range
SCREAMING_SNAKE_CASE_ = layer_norm_eps
SCREAMING_SNAKE_CASE_ = position_embedding_type
SCREAMING_SNAKE_CASE_ = use_cache
SCREAMING_SNAKE_CASE_ = pad_token_id
@classmethod
def __A ( cls : Any , __magic_name__ : Union[str, os.PathLike] , **__magic_name__ : Optional[Any] ) -> "PretrainedConfig":
cls._set_token_in_kwargs(__magic_name__ )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = cls.get_config_dict(__magic_name__ , **__magic_name__ )
# get the text config dict if we are loading from AlignConfig
if config_dict.get("model_type" ) == "align":
SCREAMING_SNAKE_CASE_ = config_dict["text_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__magic_name__ , **__magic_name__ )
class lowerCamelCase (SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
lowerCamelCase__ = '''align_vision_model'''
def __init__( self : List[str] , __magic_name__ : int = 3 , __magic_name__ : int = 600 , __magic_name__ : float = 2.0 , __magic_name__ : float = 3.1 , __magic_name__ : int = 8 , __magic_name__ : List[int] = [3, 3, 5, 3, 5, 5, 3] , __magic_name__ : List[int] = [32, 16, 24, 40, 80, 112, 192] , __magic_name__ : List[int] = [16, 24, 40, 80, 112, 192, 320] , __magic_name__ : List[int] = [] , __magic_name__ : List[int] = [1, 2, 2, 2, 1, 2, 1] , __magic_name__ : List[int] = [1, 2, 2, 3, 3, 4, 1] , __magic_name__ : List[int] = [1, 6, 6, 6, 6, 6, 6] , __magic_name__ : float = 0.25 , __magic_name__ : str = "swish" , __magic_name__ : int = 2_560 , __magic_name__ : str = "mean" , __magic_name__ : float = 0.02 , __magic_name__ : float = 0.001 , __magic_name__ : float = 0.99 , __magic_name__ : float = 0.2 , **__magic_name__ : List[Any] , ) -> Tuple:
super().__init__(**__magic_name__ )
SCREAMING_SNAKE_CASE_ = num_channels
SCREAMING_SNAKE_CASE_ = image_size
SCREAMING_SNAKE_CASE_ = width_coefficient
SCREAMING_SNAKE_CASE_ = depth_coefficient
SCREAMING_SNAKE_CASE_ = depth_divisor
SCREAMING_SNAKE_CASE_ = kernel_sizes
SCREAMING_SNAKE_CASE_ = in_channels
SCREAMING_SNAKE_CASE_ = out_channels
SCREAMING_SNAKE_CASE_ = depthwise_padding
SCREAMING_SNAKE_CASE_ = strides
SCREAMING_SNAKE_CASE_ = num_block_repeats
SCREAMING_SNAKE_CASE_ = expand_ratios
SCREAMING_SNAKE_CASE_ = squeeze_expansion_ratio
SCREAMING_SNAKE_CASE_ = hidden_act
SCREAMING_SNAKE_CASE_ = hidden_dim
SCREAMING_SNAKE_CASE_ = pooling_type
SCREAMING_SNAKE_CASE_ = initializer_range
SCREAMING_SNAKE_CASE_ = batch_norm_eps
SCREAMING_SNAKE_CASE_ = batch_norm_momentum
SCREAMING_SNAKE_CASE_ = drop_connect_rate
SCREAMING_SNAKE_CASE_ = sum(__magic_name__ ) * 4
@classmethod
def __A ( cls : List[str] , __magic_name__ : Union[str, os.PathLike] , **__magic_name__ : Dict ) -> "PretrainedConfig":
cls._set_token_in_kwargs(__magic_name__ )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = cls.get_config_dict(__magic_name__ , **__magic_name__ )
# get the vision config dict if we are loading from AlignConfig
if config_dict.get("model_type" ) == "align":
SCREAMING_SNAKE_CASE_ = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__magic_name__ , **__magic_name__ )
class lowerCamelCase (SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
lowerCamelCase__ = '''align'''
lowerCamelCase__ = True
def __init__( self : Optional[Any] , __magic_name__ : Dict=None , __magic_name__ : List[Any]=None , __magic_name__ : str=640 , __magic_name__ : Any=1.0 , __magic_name__ : Dict=0.02 , **__magic_name__ : Union[str, Any] , ) -> int:
super().__init__(**__magic_name__ )
if text_config is None:
SCREAMING_SNAKE_CASE_ = {}
logger.info("text_config is None. Initializing the AlignTextConfig with default values." )
if vision_config is None:
SCREAMING_SNAKE_CASE_ = {}
logger.info("vision_config is None. Initializing the AlignVisionConfig with default values." )
SCREAMING_SNAKE_CASE_ = AlignTextConfig(**__magic_name__ )
SCREAMING_SNAKE_CASE_ = AlignVisionConfig(**__magic_name__ )
SCREAMING_SNAKE_CASE_ = projection_dim
SCREAMING_SNAKE_CASE_ = temperature_init_value
SCREAMING_SNAKE_CASE_ = initializer_range
@classmethod
def __A ( cls : List[str] , __magic_name__ : AlignTextConfig , __magic_name__ : AlignVisionConfig , **__magic_name__ : Tuple ) -> Any:
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **__magic_name__ )
def __A ( self : int ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = copy.deepcopy(self.__dict__ )
SCREAMING_SNAKE_CASE_ = self.text_config.to_dict()
SCREAMING_SNAKE_CASE_ = self.vision_config.to_dict()
SCREAMING_SNAKE_CASE_ = self.__class__.model_type
return output
| 305
| 1
|
from __future__ import annotations
import collections
import pprint
from pathlib import Path
def a__ ( __UpperCamelCase ):
return "".join(sorted(__UpperCamelCase ) )
def a__ ( __UpperCamelCase ):
return word_by_signature[signature(__UpperCamelCase )]
A : str = Path(__file__).parent.joinpath("words.txt").read_text(encoding="utf-8")
A : int = sorted({word.strip().lower() for word in data.splitlines()})
A : Tuple = collections.defaultdict(list)
for word in word_list:
word_by_signature[signature(word)].append(word)
if __name__ == "__main__":
A : Union[str, Any] = {word: anagram(word) for word in word_list if len(anagram(word)) > 1}
with open("anagrams.txt", "w") as file:
file.write("all_anagrams = \n ")
file.write(pprint.pformat(all_anagrams))
| 305
|
import unittest
from transformers.testing_utils import CaptureStdout
from transformers.tools.python_interpreter import evaluate
def a__ ( __UpperCamelCase ):
return x + 2
class lowerCamelCase (unittest.TestCase ):
"""simple docstring"""
def __A ( self : List[Any] ) -> int:
SCREAMING_SNAKE_CASE_ = "x = 3"
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = evaluate(__magic_name__ , {} , state=__magic_name__ )
assert result == 3
self.assertDictEqual(__magic_name__ , {"x": 3} )
SCREAMING_SNAKE_CASE_ = "x = y"
SCREAMING_SNAKE_CASE_ = {"y": 5}
SCREAMING_SNAKE_CASE_ = evaluate(__magic_name__ , {} , state=__magic_name__ )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(__magic_name__ , {"x": 5, "y": 5} )
def __A ( self : Union[str, Any] ) -> str:
SCREAMING_SNAKE_CASE_ = "y = add_two(x)"
SCREAMING_SNAKE_CASE_ = {"x": 3}
SCREAMING_SNAKE_CASE_ = evaluate(__magic_name__ , {"add_two": add_two} , state=__magic_name__ )
assert result == 5
self.assertDictEqual(__magic_name__ , {"x": 3, "y": 5} )
# Won't work without the tool
with CaptureStdout() as out:
SCREAMING_SNAKE_CASE_ = evaluate(__magic_name__ , {} , state=__magic_name__ )
assert result is None
assert "tried to execute add_two" in out.out
def __A ( self : List[str] ) -> int:
SCREAMING_SNAKE_CASE_ = "x = 3"
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = evaluate(__magic_name__ , {} , state=__magic_name__ )
assert result == 3
self.assertDictEqual(__magic_name__ , {"x": 3} )
def __A ( self : Optional[Any] ) -> str:
SCREAMING_SNAKE_CASE_ = "test_dict = {'x': x, 'y': add_two(x)}"
SCREAMING_SNAKE_CASE_ = {"x": 3}
SCREAMING_SNAKE_CASE_ = evaluate(__magic_name__ , {"add_two": add_two} , state=__magic_name__ )
self.assertDictEqual(__magic_name__ , {"x": 3, "y": 5} )
self.assertDictEqual(__magic_name__ , {"x": 3, "test_dict": {"x": 3, "y": 5}} )
def __A ( self : Optional[int] ) -> List[str]:
SCREAMING_SNAKE_CASE_ = "x = 3\ny = 5"
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = evaluate(__magic_name__ , {} , state=__magic_name__ )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(__magic_name__ , {"x": 3, "y": 5} )
def __A ( self : Any ) -> List[str]:
SCREAMING_SNAKE_CASE_ = "text = f'This is x: {x}.'"
SCREAMING_SNAKE_CASE_ = {"x": 3}
SCREAMING_SNAKE_CASE_ = evaluate(__magic_name__ , {} , state=__magic_name__ )
# evaluate returns the value of the last assignment.
assert result == "This is x: 3."
self.assertDictEqual(__magic_name__ , {"x": 3, "text": "This is x: 3."} )
def __A ( self : int ) -> Tuple:
SCREAMING_SNAKE_CASE_ = "if x <= 3:\n y = 2\nelse:\n y = 5"
SCREAMING_SNAKE_CASE_ = {"x": 3}
SCREAMING_SNAKE_CASE_ = evaluate(__magic_name__ , {} , state=__magic_name__ )
# evaluate returns the value of the last assignment.
assert result == 2
self.assertDictEqual(__magic_name__ , {"x": 3, "y": 2} )
SCREAMING_SNAKE_CASE_ = {"x": 8}
SCREAMING_SNAKE_CASE_ = evaluate(__magic_name__ , {} , state=__magic_name__ )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(__magic_name__ , {"x": 8, "y": 5} )
def __A ( self : str ) -> str:
SCREAMING_SNAKE_CASE_ = "test_list = [x, add_two(x)]"
SCREAMING_SNAKE_CASE_ = {"x": 3}
SCREAMING_SNAKE_CASE_ = evaluate(__magic_name__ , {"add_two": add_two} , state=__magic_name__ )
self.assertListEqual(__magic_name__ , [3, 5] )
self.assertDictEqual(__magic_name__ , {"x": 3, "test_list": [3, 5]} )
def __A ( self : Union[str, Any] ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = "y = x"
SCREAMING_SNAKE_CASE_ = {"x": 3}
SCREAMING_SNAKE_CASE_ = evaluate(__magic_name__ , {} , state=__magic_name__ )
assert result == 3
self.assertDictEqual(__magic_name__ , {"x": 3, "y": 3} )
def __A ( self : Tuple ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = "test_list = [x, add_two(x)]\ntest_list[1]"
SCREAMING_SNAKE_CASE_ = {"x": 3}
SCREAMING_SNAKE_CASE_ = evaluate(__magic_name__ , {"add_two": add_two} , state=__magic_name__ )
assert result == 5
self.assertDictEqual(__magic_name__ , {"x": 3, "test_list": [3, 5]} )
SCREAMING_SNAKE_CASE_ = "test_dict = {'x': x, 'y': add_two(x)}\ntest_dict['y']"
SCREAMING_SNAKE_CASE_ = {"x": 3}
SCREAMING_SNAKE_CASE_ = evaluate(__magic_name__ , {"add_two": add_two} , state=__magic_name__ )
assert result == 5
self.assertDictEqual(__magic_name__ , {"x": 3, "test_dict": {"x": 3, "y": 5}} )
def __A ( self : Tuple ) -> Any:
SCREAMING_SNAKE_CASE_ = "x = 0\nfor i in range(3):\n x = i"
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = evaluate(__magic_name__ , {"range": range} , state=__magic_name__ )
assert result == 2
self.assertDictEqual(__magic_name__ , {"x": 2, "i": 2} )
| 305
| 1
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from accelerate import PartialState
from accelerate.utils.operations import broadcast, gather, gather_object, pad_across_processes, reduce
def a__ ( __UpperCamelCase ):
return (torch.arange(state.num_processes ) + 1.0 + (state.num_processes * state.process_index)).to(state.device )
def a__ ( __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = create_tensor(__UpperCamelCase )
SCREAMING_SNAKE_CASE_ = gather(__UpperCamelCase )
assert gathered_tensor.tolist() == list(range(1 , state.num_processes**2 + 1 ) )
def a__ ( __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = [state.process_index]
SCREAMING_SNAKE_CASE_ = gather_object(__UpperCamelCase )
assert len(__UpperCamelCase ) == state.num_processes, F'''{gathered_obj}, {len(__UpperCamelCase )} != {state.num_processes}'''
assert gathered_obj == list(range(state.num_processes ) ), F'''{gathered_obj} != {list(range(state.num_processes ) )}'''
def a__ ( __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = create_tensor(__UpperCamelCase )
SCREAMING_SNAKE_CASE_ = broadcast(__UpperCamelCase )
assert broadcasted_tensor.shape == torch.Size([state.num_processes] )
assert broadcasted_tensor.tolist() == list(range(1 , state.num_processes + 1 ) )
def a__ ( __UpperCamelCase ):
# We need to pad the tensor with one more element if we are the main process
# to ensure that we can pad
if state.is_main_process:
SCREAMING_SNAKE_CASE_ = torch.arange(state.num_processes + 1 ).to(state.device )
else:
SCREAMING_SNAKE_CASE_ = torch.arange(state.num_processes ).to(state.device )
SCREAMING_SNAKE_CASE_ = pad_across_processes(__UpperCamelCase )
assert padded_tensor.shape == torch.Size([state.num_processes + 1] )
if not state.is_main_process:
assert padded_tensor.tolist() == list(range(0 , state.num_processes ) ) + [0]
def a__ ( __UpperCamelCase ):
# For now runs on only two processes
if state.num_processes != 2:
return
SCREAMING_SNAKE_CASE_ = create_tensor(__UpperCamelCase )
SCREAMING_SNAKE_CASE_ = reduce(__UpperCamelCase , "sum" )
SCREAMING_SNAKE_CASE_ = torch.tensor([4.0, 6] ).to(state.device )
assert torch.allclose(__UpperCamelCase , __UpperCamelCase ), F'''{reduced_tensor} != {truth_tensor}'''
def a__ ( __UpperCamelCase ):
# For now runs on only two processes
if state.num_processes != 2:
return
SCREAMING_SNAKE_CASE_ = create_tensor(__UpperCamelCase )
SCREAMING_SNAKE_CASE_ = reduce(__UpperCamelCase , "mean" )
SCREAMING_SNAKE_CASE_ = torch.tensor([2.0, 3] ).to(state.device )
assert torch.allclose(__UpperCamelCase , __UpperCamelCase ), F'''{reduced_tensor} != {truth_tensor}'''
def a__ ( __UpperCamelCase ):
# For xla_spawn (TPUs)
main()
def a__ ( ):
SCREAMING_SNAKE_CASE_ = PartialState()
state.print(F'''State: {state}''' )
state.print("testing gather" )
test_gather(__UpperCamelCase )
state.print("testing gather_object" )
test_gather_object(__UpperCamelCase )
state.print("testing broadcast" )
test_broadcast(__UpperCamelCase )
state.print("testing pad_across_processes" )
test_pad_across_processes(__UpperCamelCase )
state.print("testing reduce_sum" )
test_reduce_sum(__UpperCamelCase )
state.print("testing reduce_mean" )
test_reduce_mean(__UpperCamelCase )
if __name__ == "__main__":
main()
| 305
|
import numpy as np
import pandas as pd
from sklearn.preprocessing import Normalizer
from sklearn.svm import SVR
from statsmodels.tsa.statespace.sarimax import SARIMAX
def a__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = np.array([[1, item, train_mtch[i]] for i, item in enumerate(__UpperCamelCase )] )
SCREAMING_SNAKE_CASE_ = np.array(__UpperCamelCase )
SCREAMING_SNAKE_CASE_ = np.dot(np.dot(np.linalg.inv(np.dot(x.transpose() , __UpperCamelCase ) ) , x.transpose() ) , __UpperCamelCase )
return abs(beta[0] + test_dt[0] * beta[1] + test_mtch[0] + beta[2] )
def a__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = (1, 2, 1)
SCREAMING_SNAKE_CASE_ = (1, 1, 0, 7)
SCREAMING_SNAKE_CASE_ = SARIMAX(
__UpperCamelCase , exog=__UpperCamelCase , order=__UpperCamelCase , seasonal_order=__UpperCamelCase )
SCREAMING_SNAKE_CASE_ = model.fit(disp=__UpperCamelCase , maxiter=6_0_0 , method="nm" )
SCREAMING_SNAKE_CASE_ = model_fit.predict(1 , len(__UpperCamelCase ) , exog=[test_match] )
return result[0]
def a__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = SVR(kernel="rbf" , C=1 , gamma=0.1 , epsilon=0.1 )
regressor.fit(__UpperCamelCase , __UpperCamelCase )
SCREAMING_SNAKE_CASE_ = regressor.predict(__UpperCamelCase )
return y_pred[0]
def a__ ( __UpperCamelCase ):
train_user.sort()
SCREAMING_SNAKE_CASE_ = np.percentile(__UpperCamelCase , 2_5 )
SCREAMING_SNAKE_CASE_ = np.percentile(__UpperCamelCase , 7_5 )
SCREAMING_SNAKE_CASE_ = qa - qa
SCREAMING_SNAKE_CASE_ = qa - (iqr * 0.1)
return low_lim
def a__ ( __UpperCamelCase , __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = 0
for i in list_vote:
if i > actual_result:
SCREAMING_SNAKE_CASE_ = not_safe + 1
else:
if abs(abs(__UpperCamelCase ) - abs(__UpperCamelCase ) ) <= 0.1:
safe += 1
else:
not_safe += 1
return safe > not_safe
if __name__ == "__main__":
# data_input_df = pd.read_csv("ex_data.csv", header=None)
A : Dict = [[1_82_31, 0.0, 1], [2_26_21, 1.0, 2], [1_56_75, 0.0, 3], [2_35_83, 1.0, 4]]
A : Optional[Any] = pd.DataFrame(
data_input, columns=["total_user", "total_even", "days"]
)
A : Union[str, Any] = Normalizer().fit_transform(data_input_df.values)
# split data
A : Optional[int] = normalize_df[:, 2].tolist()
A : List[str] = normalize_df[:, 0].tolist()
A : int = normalize_df[:, 1].tolist()
# for svr (input variable = total date and total match)
A : int = normalize_df[:, [1, 2]].tolist()
A : Tuple = x[: len(x) - 1]
A : str = x[len(x) - 1 :]
# for linear regression & sarimax
A : Tuple = total_date[: len(total_date) - 1]
A : Optional[int] = total_user[: len(total_user) - 1]
A : str = total_match[: len(total_match) - 1]
A : List[Any] = total_date[len(total_date) - 1 :]
A : List[Any] = total_user[len(total_user) - 1 :]
A : Optional[Any] = total_match[len(total_match) - 1 :]
# voting system with forecasting
A : Optional[int] = [
linear_regression_prediction(
trn_date, trn_user, trn_match, tst_date, tst_match
),
sarimax_predictor(trn_user, trn_match, tst_match),
support_vector_regressor(x_train, x_test, trn_user),
]
# check the safety of today's data
A : str = "" if data_safety_checker(res_vote, tst_user) else "not "
print("Today's data is {not_str}safe.")
| 305
| 1
|
import logging
import torch
from accelerate import Accelerator
from arguments import EvaluationArguments
from datasets import load_dataset
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, set_seed
class lowerCamelCase (SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def __init__( self : str , __magic_name__ : List[Any] , __magic_name__ : Tuple , __magic_name__ : Tuple=1_024 , __magic_name__ : int=1_024 , __magic_name__ : Dict=3.6 ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = tokenizer
SCREAMING_SNAKE_CASE_ = tokenizer.bos_token_id
SCREAMING_SNAKE_CASE_ = dataset
SCREAMING_SNAKE_CASE_ = seq_length
SCREAMING_SNAKE_CASE_ = seq_length * chars_per_token * num_of_sequences
def __iter__( self : int ) -> Dict:
SCREAMING_SNAKE_CASE_ = iter(self.dataset )
SCREAMING_SNAKE_CASE_ = True
while more_examples:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = [], 0
while True:
if buffer_len >= self.input_characters:
break
try:
buffer.append(next(__magic_name__ )["content"] )
buffer_len += len(buffer[-1] )
except StopIteration:
SCREAMING_SNAKE_CASE_ = False
break
SCREAMING_SNAKE_CASE_ = tokenizer(__magic_name__ , truncation=__magic_name__ )["input_ids"]
SCREAMING_SNAKE_CASE_ = []
for tokenized_input in tokenized_inputs:
all_token_ids.extend(tokenized_input + [self.concat_token_id] )
for i in range(0 , len(__magic_name__ ) , self.seq_length ):
SCREAMING_SNAKE_CASE_ = all_token_ids[i : i + self.seq_length]
if len(__magic_name__ ) == self.seq_length:
yield torch.tensor(__magic_name__ )
def a__ ( __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = {"streaming": True}
SCREAMING_SNAKE_CASE_ = load_dataset(args.dataset_name , split="train" , **__UpperCamelCase )
SCREAMING_SNAKE_CASE_ = ConstantLengthDataset(__UpperCamelCase , __UpperCamelCase , seq_length=args.seq_length )
SCREAMING_SNAKE_CASE_ = DataLoader(__UpperCamelCase , batch_size=args.batch_size )
return eval_dataloader
def a__ ( __UpperCamelCase ):
model.eval()
SCREAMING_SNAKE_CASE_ = []
for step, batch in enumerate(__UpperCamelCase ):
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = model(__UpperCamelCase , labels=__UpperCamelCase )
SCREAMING_SNAKE_CASE_ = outputs.loss.repeat(args.batch_size )
losses.append(accelerator.gather(__UpperCamelCase ) )
if args.max_eval_steps > 0 and step >= args.max_eval_steps:
break
SCREAMING_SNAKE_CASE_ = torch.mean(torch.cat(__UpperCamelCase ) )
try:
SCREAMING_SNAKE_CASE_ = torch.exp(__UpperCamelCase )
except OverflowError:
SCREAMING_SNAKE_CASE_ = float("inf" )
return loss.item(), perplexity.item()
# Setup Accelerator
A : Optional[Any] = Accelerator()
# Parse configuration
A : Optional[Any] = HfArgumentParser(EvaluationArguments)
A : Optional[Any] = parser.parse_args()
set_seed(args.seed)
# Logging
A : Optional[int] = logging.getLogger(__name__)
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO
)
# Load model and tokenizer
A : Optional[Any] = AutoModelForCausalLM.from_pretrained(args.model_ckpt)
A : List[str] = AutoTokenizer.from_pretrained(args.model_ckpt)
# Load dataset and dataloader
A : List[Any] = create_dataloader(args)
# Prepare everything with our `accelerator`.
A , A : List[str] = accelerator.prepare(model, eval_dataloader)
# Evaluate and save the last checkpoint
logger.info("Evaluating and saving model after training")
A , A : List[str] = evaluate(args)
logger.info(f"loss/eval: {eval_loss}, perplexity: {perplexity}")
| 305
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
A : List[str] = {"configuration_swin": ["SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP", "SwinConfig", "SwinOnnxConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Any = [
"SWIN_PRETRAINED_MODEL_ARCHIVE_LIST",
"SwinForImageClassification",
"SwinForMaskedImageModeling",
"SwinModel",
"SwinPreTrainedModel",
"SwinBackbone",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : str = [
"TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFSwinForImageClassification",
"TFSwinForMaskedImageModeling",
"TFSwinModel",
"TFSwinPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_swin import SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinConfig, SwinOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swin import (
SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinBackbone,
SwinForImageClassification,
SwinForMaskedImageModeling,
SwinModel,
SwinPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_swin import (
TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSwinForImageClassification,
TFSwinForMaskedImageModeling,
TFSwinModel,
TFSwinPreTrainedModel,
)
else:
import sys
A : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 305
| 1
|
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...file_utils import TensorType, is_torch_available
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
A : List[str] = logging.get_logger(__name__)
A : int = {
"facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json",
# See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small
}
class lowerCamelCase (SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
lowerCamelCase__ = '''blenderbot-small'''
lowerCamelCase__ = ['''past_key_values''']
lowerCamelCase__ = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self : Dict , __magic_name__ : Dict=50_265 , __magic_name__ : str=512 , __magic_name__ : List[Any]=8 , __magic_name__ : Any=2_048 , __magic_name__ : Dict=16 , __magic_name__ : Any=8 , __magic_name__ : Optional[int]=2_048 , __magic_name__ : Dict=16 , __magic_name__ : Tuple=0.0 , __magic_name__ : Dict=0.0 , __magic_name__ : Optional[int]=True , __magic_name__ : Any=True , __magic_name__ : Dict="gelu" , __magic_name__ : Tuple=512 , __magic_name__ : List[str]=0.1 , __magic_name__ : List[Any]=0.0 , __magic_name__ : List[Any]=0.0 , __magic_name__ : Tuple=0.02 , __magic_name__ : Union[str, Any]=1 , __magic_name__ : List[Any]=False , __magic_name__ : str=0 , __magic_name__ : Dict=1 , __magic_name__ : str=2 , __magic_name__ : Union[str, Any]=2 , **__magic_name__ : Optional[Any] , ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = vocab_size
SCREAMING_SNAKE_CASE_ = max_position_embeddings
SCREAMING_SNAKE_CASE_ = d_model
SCREAMING_SNAKE_CASE_ = encoder_ffn_dim
SCREAMING_SNAKE_CASE_ = encoder_layers
SCREAMING_SNAKE_CASE_ = encoder_attention_heads
SCREAMING_SNAKE_CASE_ = decoder_ffn_dim
SCREAMING_SNAKE_CASE_ = decoder_layers
SCREAMING_SNAKE_CASE_ = decoder_attention_heads
SCREAMING_SNAKE_CASE_ = dropout
SCREAMING_SNAKE_CASE_ = attention_dropout
SCREAMING_SNAKE_CASE_ = activation_dropout
SCREAMING_SNAKE_CASE_ = activation_function
SCREAMING_SNAKE_CASE_ = init_std
SCREAMING_SNAKE_CASE_ = encoder_layerdrop
SCREAMING_SNAKE_CASE_ = decoder_layerdrop
SCREAMING_SNAKE_CASE_ = use_cache
SCREAMING_SNAKE_CASE_ = encoder_layers
SCREAMING_SNAKE_CASE_ = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=__magic_name__ , bos_token_id=__magic_name__ , eos_token_id=__magic_name__ , is_encoder_decoder=__magic_name__ , decoder_start_token_id=__magic_name__ , forced_eos_token_id=__magic_name__ , **__magic_name__ , )
class lowerCamelCase (SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
@property
def __A ( self : str ) -> Mapping[str, Mapping[int, str]]:
if self.task in ["default", "seq2seq-lm"]:
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
SCREAMING_SNAKE_CASE_ = {0: "batch"}
SCREAMING_SNAKE_CASE_ = {0: "batch", 1: "past_decoder_sequence + sequence"}
else:
SCREAMING_SNAKE_CASE_ = {0: "batch", 1: "decoder_sequence"}
SCREAMING_SNAKE_CASE_ = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(__magic_name__ , direction="inputs" )
elif self.task == "causal-lm":
# TODO: figure this case out.
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.num_layers
for i in range(__magic_name__ ):
SCREAMING_SNAKE_CASE_ = {0: "batch", 2: "past_sequence + sequence"}
SCREAMING_SNAKE_CASE_ = {0: "batch", 2: "past_sequence + sequence"}
else:
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
("decoder_input_ids", {0: "batch", 1: "decoder_sequence"}),
("decoder_attention_mask", {0: "batch", 1: "decoder_sequence"}),
] )
return common_inputs
@property
def __A ( self : Tuple ) -> Mapping[str, Mapping[int, str]]:
if self.task in ["default", "seq2seq-lm"]:
SCREAMING_SNAKE_CASE_ = super().outputs
else:
SCREAMING_SNAKE_CASE_ = super(__magic_name__ , self ).outputs
if self.use_past:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.num_layers
for i in range(__magic_name__ ):
SCREAMING_SNAKE_CASE_ = {0: "batch", 2: "past_sequence + sequence"}
SCREAMING_SNAKE_CASE_ = {0: "batch", 2: "past_sequence + sequence"}
return common_outputs
def __A ( self : int , __magic_name__ : PreTrainedTokenizer , __magic_name__ : int = -1 , __magic_name__ : int = -1 , __magic_name__ : bool = False , __magic_name__ : Optional[TensorType] = None , ) -> Mapping[str, Any]:
SCREAMING_SNAKE_CASE_ = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
# Generate decoder inputs
SCREAMING_SNAKE_CASE_ = seq_length if not self.use_past else 1
SCREAMING_SNAKE_CASE_ = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
SCREAMING_SNAKE_CASE_ = {F'''decoder_{name}''': tensor for name, tensor in decoder_inputs.items()}
SCREAMING_SNAKE_CASE_ = dict(**__magic_name__ , **__magic_name__ )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = common_inputs["input_ids"].shape
SCREAMING_SNAKE_CASE_ = common_inputs["decoder_input_ids"].shape[1]
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.num_attention_heads
SCREAMING_SNAKE_CASE_ = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
SCREAMING_SNAKE_CASE_ = decoder_seq_length + 3
SCREAMING_SNAKE_CASE_ = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
SCREAMING_SNAKE_CASE_ = torch.cat(
[common_inputs["decoder_attention_mask"], torch.ones(__magic_name__ , __magic_name__ )] , dim=1 )
SCREAMING_SNAKE_CASE_ = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.num_layers
SCREAMING_SNAKE_CASE_ = min(__magic_name__ , __magic_name__ )
SCREAMING_SNAKE_CASE_ = max(__magic_name__ , __magic_name__ ) - min_num_layers
SCREAMING_SNAKE_CASE_ = "encoder" if num_encoder_layers > num_decoder_layers else "decoder"
for _ in range(__magic_name__ ):
common_inputs["past_key_values"].append(
(
torch.zeros(__magic_name__ ),
torch.zeros(__magic_name__ ),
torch.zeros(__magic_name__ ),
torch.zeros(__magic_name__ ),
) )
# TODO: test this.
SCREAMING_SNAKE_CASE_ = encoder_shape if remaining_side_name == "encoder" else decoder_shape
for _ in range(__magic_name__ , __magic_name__ ):
common_inputs["past_key_values"].append((torch.zeros(__magic_name__ ), torch.zeros(__magic_name__ )) )
return common_inputs
def __A ( self : Union[str, Any] , __magic_name__ : PreTrainedTokenizer , __magic_name__ : int = -1 , __magic_name__ : int = -1 , __magic_name__ : bool = False , __magic_name__ : Optional[TensorType] = None , ) -> Mapping[str, Any]:
SCREAMING_SNAKE_CASE_ = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
SCREAMING_SNAKE_CASE_ = seqlen + 2
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.num_layers
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.num_attention_heads
SCREAMING_SNAKE_CASE_ = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
SCREAMING_SNAKE_CASE_ = common_inputs["attention_mask"].dtype
SCREAMING_SNAKE_CASE_ = torch.cat(
[common_inputs["attention_mask"], torch.ones(__magic_name__ , __magic_name__ , dtype=__magic_name__ )] , dim=1 )
SCREAMING_SNAKE_CASE_ = [
(torch.zeros(__magic_name__ ), torch.zeros(__magic_name__ )) for _ in range(__magic_name__ )
]
return common_inputs
def __A ( self : Dict , __magic_name__ : PreTrainedTokenizer , __magic_name__ : int = -1 , __magic_name__ : int = -1 , __magic_name__ : bool = False , __magic_name__ : Optional[TensorType] = None , ) -> Mapping[str, Any]:
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
SCREAMING_SNAKE_CASE_ = compute_effective_axis_dimension(
__magic_name__ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
SCREAMING_SNAKE_CASE_ = tokenizer.num_special_tokens_to_add(__magic_name__ )
SCREAMING_SNAKE_CASE_ = compute_effective_axis_dimension(
__magic_name__ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__magic_name__ )
# Generate dummy inputs according to compute batch and sequence
SCREAMING_SNAKE_CASE_ = [" ".join([tokenizer.unk_token] ) * seq_length] * batch_size
SCREAMING_SNAKE_CASE_ = dict(tokenizer(__magic_name__ , return_tensors=__magic_name__ ) )
return common_inputs
def __A ( self : Optional[Any] , __magic_name__ : PreTrainedTokenizer , __magic_name__ : int = -1 , __magic_name__ : int = -1 , __magic_name__ : bool = False , __magic_name__ : Optional[TensorType] = None , ) -> Mapping[str, Any]:
if self.task in ["default", "seq2seq-lm"]:
SCREAMING_SNAKE_CASE_ = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
__magic_name__ , batch_size=__magic_name__ , seq_length=__magic_name__ , is_pair=__magic_name__ , framework=__magic_name__ )
elif self.task == "causal-lm":
SCREAMING_SNAKE_CASE_ = self._generate_dummy_inputs_for_causal_lm(
__magic_name__ , batch_size=__magic_name__ , seq_length=__magic_name__ , is_pair=__magic_name__ , framework=__magic_name__ )
else:
SCREAMING_SNAKE_CASE_ = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__magic_name__ , batch_size=__magic_name__ , seq_length=__magic_name__ , is_pair=__magic_name__ , framework=__magic_name__ )
return common_inputs
def __A ( self : Optional[Any] , __magic_name__ : str , __magic_name__ : List[Any] , __magic_name__ : str , __magic_name__ : List[str] ) -> List[str]:
if self.task in ["default", "seq2seq-lm"]:
SCREAMING_SNAKE_CASE_ = super()._flatten_past_key_values_(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
else:
SCREAMING_SNAKE_CASE_ = super(__magic_name__ , self )._flatten_past_key_values_(
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
| 305
|
import tempfile
import unittest
import numpy as np
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_flax
if is_flax_available():
import os
from flax.core.frozen_dict import unfreeze
from flax.traverse_util import flatten_dict
from transformers import FlaxBertModel
A : Union[str, Any] = "0.12" # assumed parallelism: 8
@require_flax
@is_staging_test
class lowerCamelCase (unittest.TestCase ):
"""simple docstring"""
@classmethod
def __A ( cls : Any ) -> Dict:
SCREAMING_SNAKE_CASE_ = TOKEN
HfFolder.save_token(__magic_name__ )
@classmethod
def __A ( cls : Optional[int] ) -> Tuple:
try:
delete_repo(token=cls._token , repo_id="test-model-flax" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-model-flax-org" )
except HTTPError:
pass
def __A ( self : str ) -> str:
SCREAMING_SNAKE_CASE_ = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
SCREAMING_SNAKE_CASE_ = FlaxBertModel(__magic_name__ )
model.push_to_hub("test-model-flax" , use_auth_token=self._token )
SCREAMING_SNAKE_CASE_ = FlaxBertModel.from_pretrained(F'''{USER}/test-model-flax''' )
SCREAMING_SNAKE_CASE_ = flatten_dict(unfreeze(model.params ) )
SCREAMING_SNAKE_CASE_ = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
SCREAMING_SNAKE_CASE_ = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(__magic_name__ , 1e-3 , msg=F'''{key} not identical''' )
# Reset repo
delete_repo(token=self._token , repo_id="test-model-flax" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(__magic_name__ , repo_id="test-model-flax" , push_to_hub=__magic_name__ , use_auth_token=self._token )
SCREAMING_SNAKE_CASE_ = FlaxBertModel.from_pretrained(F'''{USER}/test-model-flax''' )
SCREAMING_SNAKE_CASE_ = flatten_dict(unfreeze(model.params ) )
SCREAMING_SNAKE_CASE_ = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
SCREAMING_SNAKE_CASE_ = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(__magic_name__ , 1e-3 , msg=F'''{key} not identical''' )
def __A ( self : int ) -> Tuple:
SCREAMING_SNAKE_CASE_ = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
SCREAMING_SNAKE_CASE_ = FlaxBertModel(__magic_name__ )
model.push_to_hub("valid_org/test-model-flax-org" , use_auth_token=self._token )
SCREAMING_SNAKE_CASE_ = FlaxBertModel.from_pretrained("valid_org/test-model-flax-org" )
SCREAMING_SNAKE_CASE_ = flatten_dict(unfreeze(model.params ) )
SCREAMING_SNAKE_CASE_ = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
SCREAMING_SNAKE_CASE_ = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(__magic_name__ , 1e-3 , msg=F'''{key} not identical''' )
# Reset repo
delete_repo(token=self._token , repo_id="valid_org/test-model-flax-org" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(
__magic_name__ , repo_id="valid_org/test-model-flax-org" , push_to_hub=__magic_name__ , use_auth_token=self._token )
SCREAMING_SNAKE_CASE_ = FlaxBertModel.from_pretrained("valid_org/test-model-flax-org" )
SCREAMING_SNAKE_CASE_ = flatten_dict(unfreeze(model.params ) )
SCREAMING_SNAKE_CASE_ = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
SCREAMING_SNAKE_CASE_ = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(__magic_name__ , 1e-3 , msg=F'''{key} not identical''' )
def a__ ( __UpperCamelCase , __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = flatten_dict(modela.params )
SCREAMING_SNAKE_CASE_ = flatten_dict(modela.params )
for key in flat_params_a.keys():
if np.sum(np.abs(flat_params_a[key] - flat_params_a[key] ) ) > 1E-4:
SCREAMING_SNAKE_CASE_ = False
return models_are_equal
@require_flax
class lowerCamelCase (unittest.TestCase ):
"""simple docstring"""
def __A ( self : str ) -> Dict:
SCREAMING_SNAKE_CASE_ = BertConfig.from_pretrained("hf-internal-testing/tiny-bert-flax-only" )
SCREAMING_SNAKE_CASE_ = FlaxBertModel(__magic_name__ )
SCREAMING_SNAKE_CASE_ = "bert"
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(__magic_name__ , __magic_name__ ) )
with self.assertRaises(__magic_name__ ):
SCREAMING_SNAKE_CASE_ = FlaxBertModel.from_pretrained(__magic_name__ )
SCREAMING_SNAKE_CASE_ = FlaxBertModel.from_pretrained(__magic_name__ , subfolder=__magic_name__ )
self.assertTrue(check_models_equal(__magic_name__ , __magic_name__ ) )
def __A ( self : Optional[Any] ) -> Tuple:
SCREAMING_SNAKE_CASE_ = BertConfig.from_pretrained("hf-internal-testing/tiny-bert-flax-only" )
SCREAMING_SNAKE_CASE_ = FlaxBertModel(__magic_name__ )
SCREAMING_SNAKE_CASE_ = "bert"
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(__magic_name__ , __magic_name__ ) , max_shard_size="10KB" )
with self.assertRaises(__magic_name__ ):
SCREAMING_SNAKE_CASE_ = FlaxBertModel.from_pretrained(__magic_name__ )
SCREAMING_SNAKE_CASE_ = FlaxBertModel.from_pretrained(__magic_name__ , subfolder=__magic_name__ )
self.assertTrue(check_models_equal(__magic_name__ , __magic_name__ ) )
def __A ( self : Optional[int] ) -> Dict:
SCREAMING_SNAKE_CASE_ = "bert"
SCREAMING_SNAKE_CASE_ = "hf-internal-testing/tiny-random-bert-subfolder"
with self.assertRaises(__magic_name__ ):
SCREAMING_SNAKE_CASE_ = FlaxBertModel.from_pretrained(__magic_name__ )
SCREAMING_SNAKE_CASE_ = FlaxBertModel.from_pretrained(__magic_name__ , subfolder=__magic_name__ )
self.assertIsNotNone(__magic_name__ )
def __A ( self : List[str] ) -> Dict:
SCREAMING_SNAKE_CASE_ = "bert"
SCREAMING_SNAKE_CASE_ = "hf-internal-testing/tiny-random-bert-sharded-subfolder"
with self.assertRaises(__magic_name__ ):
SCREAMING_SNAKE_CASE_ = FlaxBertModel.from_pretrained(__magic_name__ )
SCREAMING_SNAKE_CASE_ = FlaxBertModel.from_pretrained(__magic_name__ , subfolder=__magic_name__ )
self.assertIsNotNone(__magic_name__ )
| 305
| 1
|
import gc
import importlib.metadata
import tempfile
import unittest
from packaging import version
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoTokenizer,
BitsAndBytesConfig,
pipeline,
)
from transformers.testing_utils import (
is_torch_available,
require_accelerate,
require_bitsandbytes,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
def a__ ( __UpperCamelCase ):
if model.config.model_type == "gpt2":
return model.transformer.h[0].mlp.c_fc
return model.transformer.h[0].mlp.dense_ah_to_h
if is_torch_available():
import torch
import torch.nn as nn
class lowerCamelCase (nn.Module ):
"""simple docstring"""
def __init__( self : Dict , __magic_name__ : nn.Module , __magic_name__ : int ) -> Tuple:
super().__init__()
SCREAMING_SNAKE_CASE_ = module
SCREAMING_SNAKE_CASE_ = nn.Sequential(
nn.Linear(module.in_features , __magic_name__ , bias=__magic_name__ ) , nn.Linear(__magic_name__ , module.out_features , bias=__magic_name__ ) , )
SCREAMING_SNAKE_CASE_ = (2.0 / (5 * min(module.in_features , module.out_features ))) ** 0.5
nn.init.normal_(self.adapter[0].weight , std=__magic_name__ )
nn.init.zeros_(self.adapter[1].weight )
self.adapter.to(module.weight.device )
def __A ( self : List[str] , __magic_name__ : str , *__magic_name__ : int , **__magic_name__ : Optional[int] ) -> Any:
return self.module(__magic_name__ , *__magic_name__ , **__magic_name__ ) + self.adapter(__magic_name__ )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class lowerCamelCase (unittest.TestCase ):
"""simple docstring"""
lowerCamelCase__ = '''bigscience/bloom-1b7'''
# Constant values
lowerCamelCase__ = 2.109659552692574
lowerCamelCase__ = '''Hello my name is'''
lowerCamelCase__ = set()
EXPECTED_OUTPUTS.add('''Hello my name is John and I am a professional photographer. I''' )
EXPECTED_OUTPUTS.add('''Hello my name is John.\nI am a friend of your father.\n''' )
EXPECTED_OUTPUTS.add('''Hello my name is John Doe, I am a student at the University''' )
lowerCamelCase__ = 1_0
def __A ( self : List[str] ) -> Optional[int]:
# Models and tokenizer
SCREAMING_SNAKE_CASE_ = AutoTokenizer.from_pretrained(self.model_name )
class lowerCamelCase (SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def __A ( self : Optional[Any] ) -> Tuple:
super().setUp()
# Models and tokenizer
SCREAMING_SNAKE_CASE_ = AutoModelForCausalLM.from_pretrained(
self.model_name , torch_dtype=torch.floataa , device_map="auto" )
SCREAMING_SNAKE_CASE_ = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=__magic_name__ , device_map="auto" )
def __A ( self : Dict ) -> Any:
del self.model_fpaa
del self.model_abit
gc.collect()
torch.cuda.empty_cache()
def __A ( self : Union[str, Any] ) -> int:
SCREAMING_SNAKE_CASE_ = self.model_abit.config
self.assertTrue(hasattr(__magic_name__ , "quantization_config" ) )
SCREAMING_SNAKE_CASE_ = config.to_dict()
SCREAMING_SNAKE_CASE_ = config.to_diff_dict()
SCREAMING_SNAKE_CASE_ = config.to_json_string()
def __A ( self : Union[str, Any] ) -> Dict:
from bitsandbytes.nn import Paramsabit
SCREAMING_SNAKE_CASE_ = self.model_fpaa.get_memory_footprint()
SCREAMING_SNAKE_CASE_ = self.model_abit.get_memory_footprint()
self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE )
SCREAMING_SNAKE_CASE_ = get_some_linear_layer(self.model_abit )
self.assertTrue(linear.weight.__class__ == Paramsabit )
def __A ( self : Optional[int] ) -> Optional[Any]:
from transformers import TaPreTrainedModel
self.model_fpaa.get_memory_footprint()
self.model_abit.get_memory_footprint()
for name, module in self.model_abit.named_modules():
if isinstance(__magic_name__ , torch.nn.Linear ):
if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules:
# 4-bit parameters are packed in uint8 variables
self.assertTrue(module.weight.dtype == torch.uinta )
def __A ( self : Dict ) -> Any:
SCREAMING_SNAKE_CASE_ = self.tokenizer(self.input_text , return_tensors="pt" )
SCREAMING_SNAKE_CASE_ = self.model_abit.generate(input_ids=encoded_input["input_ids"].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=__magic_name__ ) , self.EXPECTED_OUTPUTS )
def __A ( self : List[Any] ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = BitsAndBytesConfig()
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=__magic_name__ , device_map="auto" )
SCREAMING_SNAKE_CASE_ = self.tokenizer(self.input_text , return_tensors="pt" )
SCREAMING_SNAKE_CASE_ = model_abit_from_config.generate(
input_ids=encoded_input["input_ids"].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=__magic_name__ ) , self.EXPECTED_OUTPUTS )
def __A ( self : List[str] ) -> List[Any]:
with self.assertRaises(__magic_name__ ), tempfile.TemporaryDirectory() as tmpdirname:
self.model_abit.save_pretrained(__magic_name__ )
def __A ( self : Optional[int] ) -> Any:
SCREAMING_SNAKE_CASE_ = BitsAndBytesConfig()
with self.assertRaises(__magic_name__ ):
SCREAMING_SNAKE_CASE_ = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=__magic_name__ , load_in_abit=__magic_name__ , device_map="auto" , bnb_abit_quant_type="nf4" , )
def __A ( self : Optional[Any] ) -> Union[str, Any]:
with self.assertRaises(__magic_name__ ):
# Tries with `str`
self.model_abit.to("cpu" )
with self.assertRaises(__magic_name__ ):
# Tries with a `dtype``
self.model_abit.to(torch.floataa )
with self.assertRaises(__magic_name__ ):
# Tries with a `device`
self.model_abit.to(torch.device("cuda:0" ) )
with self.assertRaises(__magic_name__ ):
# Tries with a `device`
self.model_abit.float()
with self.assertRaises(__magic_name__ ):
# Tries with a `device`
self.model_abit.half()
# Test if we did not break anything
SCREAMING_SNAKE_CASE_ = self.tokenizer(self.input_text , return_tensors="pt" )
SCREAMING_SNAKE_CASE_ = self.model_fpaa.to(torch.floataa )
SCREAMING_SNAKE_CASE_ = self.model_fpaa.generate(input_ids=encoded_input["input_ids"].to(0 ) , max_new_tokens=10 )
# Check this does not throw an error
SCREAMING_SNAKE_CASE_ = self.model_fpaa.to("cpu" )
# Check this does not throw an error
SCREAMING_SNAKE_CASE_ = self.model_fpaa.half()
# Check this does not throw an error
SCREAMING_SNAKE_CASE_ = self.model_fpaa.float()
def __A ( self : Optional[Any] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = AutoModelForSeqaSeqLM.from_pretrained("t5-small" , load_in_abit=__magic_name__ , device_map="auto" )
self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class lowerCamelCase (unittest.TestCase ):
"""simple docstring"""
@classmethod
def __A ( cls : Optional[int] ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = "t5-small"
SCREAMING_SNAKE_CASE_ = "google/flan-t5-small" # flan-t5 uses dense-act instead of dense-relu-dense
SCREAMING_SNAKE_CASE_ = AutoTokenizer.from_pretrained(cls.model_name )
SCREAMING_SNAKE_CASE_ = "Translate in German: Hello, my dog is cute"
def __A ( self : List[str] ) -> List[str]:
gc.collect()
torch.cuda.empty_cache()
def __A ( self : Any ) -> List[Any]:
from transformers import TaForConditionalGeneration
SCREAMING_SNAKE_CASE_ = TaForConditionalGeneration._keep_in_fpaa_modules
SCREAMING_SNAKE_CASE_ = None
# test with `t5-small`
SCREAMING_SNAKE_CASE_ = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=__magic_name__ , device_map="auto" )
SCREAMING_SNAKE_CASE_ = self.tokenizer(self.input_text , return_tensors="pt" ).to(0 )
SCREAMING_SNAKE_CASE_ = model.generate(**__magic_name__ )
# test with `flan-t5-small`
SCREAMING_SNAKE_CASE_ = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=__magic_name__ , device_map="auto" )
SCREAMING_SNAKE_CASE_ = self.tokenizer(self.input_text , return_tensors="pt" ).to(0 )
SCREAMING_SNAKE_CASE_ = model.generate(**__magic_name__ )
SCREAMING_SNAKE_CASE_ = modules
def __A ( self : Any ) -> Union[str, Any]:
import bitsandbytes as bnb
from transformers import TaForConditionalGeneration
# test with `t5-small`
SCREAMING_SNAKE_CASE_ = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=__magic_name__ , device_map="auto" )
# there was a bug with decoders - this test checks that it is fixed
self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit ) )
SCREAMING_SNAKE_CASE_ = self.tokenizer(self.input_text , return_tensors="pt" ).to(0 )
SCREAMING_SNAKE_CASE_ = model.generate(**__magic_name__ )
# test with `flan-t5-small`
SCREAMING_SNAKE_CASE_ = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=__magic_name__ , device_map="auto" )
SCREAMING_SNAKE_CASE_ = self.tokenizer(self.input_text , return_tensors="pt" ).to(0 )
SCREAMING_SNAKE_CASE_ = model.generate(**__magic_name__ )
class lowerCamelCase (SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def __A ( self : str ) -> int:
super().setUp()
# model_name
SCREAMING_SNAKE_CASE_ = "bigscience/bloom-560m"
SCREAMING_SNAKE_CASE_ = "t5-small"
# Different types of model
SCREAMING_SNAKE_CASE_ = AutoModel.from_pretrained(self.model_name , load_in_abit=__magic_name__ , device_map="auto" )
# Sequence classification model
SCREAMING_SNAKE_CASE_ = AutoModelForSequenceClassification.from_pretrained(
self.model_name , load_in_abit=__magic_name__ , device_map="auto" )
# CausalLM model
SCREAMING_SNAKE_CASE_ = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=__magic_name__ , device_map="auto" )
# Seq2seq model
SCREAMING_SNAKE_CASE_ = AutoModelForSeqaSeqLM.from_pretrained(
self.seq_to_seq_name , load_in_abit=__magic_name__ , device_map="auto" )
def __A ( self : int ) -> Dict:
del self.base_model
del self.sequence_model
del self.model_abit
del self.seq_to_seq_model
gc.collect()
torch.cuda.empty_cache()
def __A ( self : int ) -> Optional[int]:
from bitsandbytes.nn import Paramsabit
self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit )
# Other heads should be nn.Parameter
self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter )
class lowerCamelCase (SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def __A ( self : Any ) -> Union[str, Any]:
super().setUp()
def __A ( self : List[Any] ) -> List[Any]:
del self.pipe
gc.collect()
torch.cuda.empty_cache()
def __A ( self : List[str] ) -> Dict:
SCREAMING_SNAKE_CASE_ = pipeline(
"text-generation" , model=self.model_name , model_kwargs={"device_map": "auto", "load_in_4bit": True, "torch_dtype": torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , )
# Real second forward pass
SCREAMING_SNAKE_CASE_ = self.pipe(self.input_text )
self.assertIn(pipeline_output[0]["generated_text"] , self.EXPECTED_OUTPUTS )
@require_torch_multi_gpu
class lowerCamelCase (SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def __A ( self : int ) -> int:
super().setUp()
def __A ( self : Dict ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = AutoModelForCausalLM.from_pretrained(
self.model_name , load_in_abit=__magic_name__ , device_map="balanced" )
# Check correct device map
self.assertEqual(set(model_parallel.hf_device_map.values() ) , {0, 1} )
# Check that inference pass works on the model
SCREAMING_SNAKE_CASE_ = self.tokenizer(self.input_text , return_tensors="pt" )
# Second real batch
SCREAMING_SNAKE_CASE_ = model_parallel.generate(input_ids=encoded_input["input_ids"].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=__magic_name__ ) , self.EXPECTED_OUTPUTS )
class lowerCamelCase (SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def __A ( self : Union[str, Any] ) -> List[str]:
SCREAMING_SNAKE_CASE_ = "facebook/opt-350m"
super().setUp()
def __A ( self : Any ) -> Tuple:
if version.parse(importlib.metadata.version("bitsandbytes" ) ) < version.parse("0.37.0" ):
return
# Step 1: freeze all parameters
SCREAMING_SNAKE_CASE_ = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=__magic_name__ )
self.assertEqual(set(model.hf_device_map.values() ) , {torch.cuda.current_device()} )
for param in model.parameters():
SCREAMING_SNAKE_CASE_ = False # freeze the model - train adapters later
if param.ndim == 1:
# cast the small parameters (e.g. layernorm) to fp32 for stability
SCREAMING_SNAKE_CASE_ = param.data.to(torch.floataa )
# Step 2: add adapters
for _, module in model.named_modules():
if "OPTAttention" in repr(type(__magic_name__ ) ):
SCREAMING_SNAKE_CASE_ = LoRALayer(module.q_proj , rank=16 )
SCREAMING_SNAKE_CASE_ = LoRALayer(module.k_proj , rank=16 )
SCREAMING_SNAKE_CASE_ = LoRALayer(module.v_proj , rank=16 )
# Step 3: dummy batch
SCREAMING_SNAKE_CASE_ = self.tokenizer("Test batch " , return_tensors="pt" ).to(0 )
# Step 4: Check if the gradient is not None
with torch.cuda.amp.autocast():
SCREAMING_SNAKE_CASE_ = model.forward(**__magic_name__ )
out.logits.norm().backward()
for module in model.modules():
if isinstance(__magic_name__ , __magic_name__ ):
self.assertTrue(module.adapter[1].weight.grad is not None )
self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0 )
elif isinstance(__magic_name__ , nn.Embedding ):
self.assertTrue(module.weight.grad is None )
class lowerCamelCase (SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
lowerCamelCase__ = '''gpt2-xl'''
lowerCamelCase__ = 3.3191854854152187
| 305
|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.utils import ComputeEnvironment
from .cluster import get_cluster_input
from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
from .sagemaker import get_sagemaker_input
A : Union[str, Any] = "Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine"
def a__ ( ):
SCREAMING_SNAKE_CASE_ = _ask_options(
"In which compute environment are you running?" , ["This machine", "AWS (Amazon SageMaker)"] , _convert_compute_environment , )
if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
SCREAMING_SNAKE_CASE_ = get_sagemaker_input()
else:
SCREAMING_SNAKE_CASE_ = get_cluster_input()
return config
def a__ ( __UpperCamelCase=None ):
if subparsers is not None:
SCREAMING_SNAKE_CASE_ = subparsers.add_parser("config" , description=__UpperCamelCase )
else:
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser("Accelerate config command" , description=__UpperCamelCase )
parser.add_argument(
"--config_file" , default=__UpperCamelCase , help=(
"The path to use to store the config file. Will default to a file named default_config.yaml in the cache "
"location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have "
"such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed "
"with 'huggingface'."
) , )
if subparsers is not None:
parser.set_defaults(func=__UpperCamelCase )
return parser
def a__ ( __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = get_user_input()
if args.config_file is not None:
SCREAMING_SNAKE_CASE_ = args.config_file
else:
if not os.path.isdir(__UpperCamelCase ):
os.makedirs(__UpperCamelCase )
SCREAMING_SNAKE_CASE_ = default_yaml_config_file
if config_file.endswith(".json" ):
config.to_json_file(__UpperCamelCase )
else:
config.to_yaml_file(__UpperCamelCase )
print(F'''accelerate configuration saved at {config_file}''' )
def a__ ( ):
SCREAMING_SNAKE_CASE_ = config_command_parser()
SCREAMING_SNAKE_CASE_ = parser.parse_args()
config_command(__UpperCamelCase )
if __name__ == "__main__":
main()
| 305
| 1
|
import argparse
import os
import torch
from transformers import FlavaImageCodebook, FlavaImageCodebookConfig
def a__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = s.rsplit(__UpperCamelCase , __UpperCamelCase )
return new.join(__UpperCamelCase )
def a__ ( __UpperCamelCase ):
# encoder.embeddings are double copied in original FLAVA
return sum(param.float().sum() if "encoder.embeddings" not in key else 0 for key, param in state_dict.items() )
def a__ ( __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = ["group_1", "group_2", "group_3", "group_4"]
for key, value in state_dict.items():
for group_key in group_keys:
if group_key in key:
SCREAMING_SNAKE_CASE_ = key.replace(F'''{group_key}.''' , F'''{group_key}.group.''' )
if "res_path" in key:
SCREAMING_SNAKE_CASE_ = key.replace("res_path." , "res_path.path." )
if key.endswith(".w" ):
SCREAMING_SNAKE_CASE_ = rreplace(__UpperCamelCase , ".w" , ".weight" , 1 )
if key.endswith(".b" ):
SCREAMING_SNAKE_CASE_ = rreplace(__UpperCamelCase , ".b" , ".bias" , 1 )
SCREAMING_SNAKE_CASE_ = value.float()
return upgrade
@torch.no_grad()
def a__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=None , __UpperCamelCase=True ):
from dall_e import Encoder
SCREAMING_SNAKE_CASE_ = Encoder()
if os.path.exists(__UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = torch.load(__UpperCamelCase )
else:
SCREAMING_SNAKE_CASE_ = torch.hub.load_state_dict_from_url(__UpperCamelCase )
if isinstance(__UpperCamelCase , __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = ckpt.state_dict()
encoder.load_state_dict(__UpperCamelCase )
if config_path is not None:
SCREAMING_SNAKE_CASE_ = FlavaImageCodebookConfig.from_pretrained(__UpperCamelCase )
else:
SCREAMING_SNAKE_CASE_ = FlavaImageCodebookConfig()
SCREAMING_SNAKE_CASE_ = FlavaImageCodebook(__UpperCamelCase ).eval()
SCREAMING_SNAKE_CASE_ = encoder.state_dict()
SCREAMING_SNAKE_CASE_ = upgrade_state_dict(__UpperCamelCase )
hf_model.load_state_dict(__UpperCamelCase )
SCREAMING_SNAKE_CASE_ = hf_model.state_dict()
SCREAMING_SNAKE_CASE_ = count_parameters(__UpperCamelCase )
SCREAMING_SNAKE_CASE_ = count_parameters(__UpperCamelCase )
assert torch.allclose(__UpperCamelCase , __UpperCamelCase , atol=1E-3 )
if save_checkpoint:
hf_model.save_pretrained(__UpperCamelCase )
else:
return hf_state_dict
if __name__ == "__main__":
A : str = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to flava checkpoint")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
A : Tuple = parser.parse_args()
convert_dalle_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 305
|
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=SCREAMING_SNAKE_CASE__ )
class lowerCamelCase (SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
lowerCamelCase__ = field(default='''summarization''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
lowerCamelCase__ = Features({'''text''': Value('''string''' )} )
lowerCamelCase__ = Features({'''summary''': Value('''string''' )} )
lowerCamelCase__ = "text"
lowerCamelCase__ = "summary"
@property
def __A ( self : Dict ) -> Dict[str, str]:
return {self.text_column: "text", self.summary_column: "summary"}
| 305
| 1
|
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import floats_tensor, load_image, load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class lowerCamelCase (SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase__ = ShapEImgaImgPipeline
lowerCamelCase__ = ['''image''']
lowerCamelCase__ = ['''image''']
lowerCamelCase__ = [
'''num_images_per_prompt''',
'''num_inference_steps''',
'''generator''',
'''latents''',
'''guidance_scale''',
'''frame_size''',
'''output_type''',
'''return_dict''',
]
lowerCamelCase__ = False
@property
def __A ( self : Any ) -> Dict:
return 32
@property
def __A ( self : int ) -> str:
return 32
@property
def __A ( self : str ) -> List[Any]:
return self.time_input_dim * 4
@property
def __A ( self : List[str] ) -> Union[str, Any]:
return 8
@property
def __A ( self : Dict ) -> int:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=64 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1 , )
SCREAMING_SNAKE_CASE_ = CLIPVisionModel(__magic_name__ )
return model
@property
def __A ( self : str ) -> str:
SCREAMING_SNAKE_CASE_ = CLIPImageProcessor(
crop_size=224 , do_center_crop=__magic_name__ , do_normalize=__magic_name__ , do_resize=__magic_name__ , image_mean=[0.4814_5466, 0.457_8275, 0.4082_1073] , image_std=[0.2686_2954, 0.2613_0258, 0.2757_7711] , resample=3 , size=224 , )
return image_processor
@property
def __A ( self : Tuple ) -> List[Any]:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ = {
"num_attention_heads": 2,
"attention_head_dim": 16,
"embedding_dim": self.time_input_dim,
"num_embeddings": 32,
"embedding_proj_dim": self.text_embedder_hidden_size,
"time_embed_dim": self.time_embed_dim,
"num_layers": 1,
"clip_embed_dim": self.time_input_dim * 2,
"additional_embeddings": 0,
"time_embed_act_fn": "gelu",
"norm_in_type": "layer",
"embedding_proj_norm_type": "layer",
"encoder_hid_proj_type": None,
"added_emb_type": None,
}
SCREAMING_SNAKE_CASE_ = PriorTransformer(**__magic_name__ )
return model
@property
def __A ( self : Dict ) -> List[str]:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ = {
"param_shapes": (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
"d_latent": self.time_input_dim,
"d_hidden": self.renderer_dim,
"n_output": 12,
"background": (
0.1,
0.1,
0.1,
),
}
SCREAMING_SNAKE_CASE_ = ShapERenderer(**__magic_name__ )
return model
def __A ( self : int ) -> List[str]:
SCREAMING_SNAKE_CASE_ = self.dummy_prior
SCREAMING_SNAKE_CASE_ = self.dummy_image_encoder
SCREAMING_SNAKE_CASE_ = self.dummy_image_processor
SCREAMING_SNAKE_CASE_ = self.dummy_renderer
SCREAMING_SNAKE_CASE_ = HeunDiscreteScheduler(
beta_schedule="exp" , num_train_timesteps=1_024 , prediction_type="sample" , use_karras_sigmas=__magic_name__ , clip_sample=__magic_name__ , clip_sample_range=1.0 , )
SCREAMING_SNAKE_CASE_ = {
"prior": prior,
"image_encoder": image_encoder,
"image_processor": image_processor,
"renderer": renderer,
"scheduler": scheduler,
}
return components
def __A ( self : int , __magic_name__ : int , __magic_name__ : int=0 ) -> Tuple:
SCREAMING_SNAKE_CASE_ = floats_tensor((1, 3, 64, 64) , rng=random.Random(__magic_name__ ) ).to(__magic_name__ )
if str(__magic_name__ ).startswith("mps" ):
SCREAMING_SNAKE_CASE_ = torch.manual_seed(__magic_name__ )
else:
SCREAMING_SNAKE_CASE_ = torch.Generator(device=__magic_name__ ).manual_seed(__magic_name__ )
SCREAMING_SNAKE_CASE_ = {
"image": input_image,
"generator": generator,
"num_inference_steps": 1,
"frame_size": 32,
"output_type": "np",
}
return inputs
def __A ( self : Dict ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = "cpu"
SCREAMING_SNAKE_CASE_ = self.get_dummy_components()
SCREAMING_SNAKE_CASE_ = self.pipeline_class(**__magic_name__ )
SCREAMING_SNAKE_CASE_ = pipe.to(__magic_name__ )
pipe.set_progress_bar_config(disable=__magic_name__ )
SCREAMING_SNAKE_CASE_ = pipe(**self.get_dummy_inputs(__magic_name__ ) )
SCREAMING_SNAKE_CASE_ = output.images[0]
SCREAMING_SNAKE_CASE_ = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
SCREAMING_SNAKE_CASE_ = np.array(
[
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __A ( self : Optional[Any] ) -> str:
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def __A ( self : Any ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = torch_device == "cpu"
SCREAMING_SNAKE_CASE_ = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=__magic_name__ , relax_max_difference=__magic_name__ , )
def __A ( self : List[str] ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = self.get_dummy_components()
SCREAMING_SNAKE_CASE_ = self.pipeline_class(**__magic_name__ )
SCREAMING_SNAKE_CASE_ = pipe.to(__magic_name__ )
pipe.set_progress_bar_config(disable=__magic_name__ )
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = 2
SCREAMING_SNAKE_CASE_ = self.get_dummy_inputs(__magic_name__ )
for key in inputs.keys():
if key in self.batch_params:
SCREAMING_SNAKE_CASE_ = batch_size * [inputs[key]]
SCREAMING_SNAKE_CASE_ = pipe(**__magic_name__ , num_images_per_prompt=__magic_name__ )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class lowerCamelCase (unittest.TestCase ):
"""simple docstring"""
def __A ( self : Any ) -> Tuple:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __A ( self : Optional[int] ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/shap_e/corgi.png" )
SCREAMING_SNAKE_CASE_ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/shap_e/test_shap_e_img2img_out.npy" )
SCREAMING_SNAKE_CASE_ = ShapEImgaImgPipeline.from_pretrained("openai/shap-e-img2img" )
SCREAMING_SNAKE_CASE_ = pipe.to(__magic_name__ )
pipe.set_progress_bar_config(disable=__magic_name__ )
SCREAMING_SNAKE_CASE_ = torch.Generator(device=__magic_name__ ).manual_seed(0 )
SCREAMING_SNAKE_CASE_ = pipe(
__magic_name__ , generator=__magic_name__ , guidance_scale=3.0 , num_inference_steps=64 , frame_size=64 , output_type="np" , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(__magic_name__ , __magic_name__ )
| 305
|
from ....utils import logging
A : List[str] = logging.get_logger(__name__)
class lowerCamelCase (SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def __init__( self : List[str] , __magic_name__ : Optional[Any] , __magic_name__ : Any=None , __magic_name__ : List[str]=2_048 ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = config.__dict__
SCREAMING_SNAKE_CASE_ = modal_hidden_size
if num_labels:
SCREAMING_SNAKE_CASE_ = num_labels
| 305
| 1
|
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFAutoModel, is_tensorflow_text_available, is_tf_available
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_tensorflow_text_available():
from transformers.models.bert import TFBertTokenizer
A : Optional[Any] = ["bert-base-uncased", "bert-base-cased"]
A : int = "hf-internal-testing/tiny-bert-tf-only"
if is_tf_available():
class lowerCamelCase (tf.keras.Model ):
"""simple docstring"""
def __init__( self : List[str] , __magic_name__ : Union[str, Any] ) -> Optional[Any]:
super().__init__()
SCREAMING_SNAKE_CASE_ = tokenizer
SCREAMING_SNAKE_CASE_ = AutoConfig.from_pretrained(__magic_name__ )
SCREAMING_SNAKE_CASE_ = TFAutoModel.from_config(__magic_name__ )
def __A ( self : Union[str, Any] , __magic_name__ : Optional[int] ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = self.tokenizer(__magic_name__ )
SCREAMING_SNAKE_CASE_ = self.bert(**__magic_name__ )
return out["pooler_output"]
@require_tf
@require_tensorflow_text
class lowerCamelCase (unittest.TestCase ):
"""simple docstring"""
def __A ( self : Any ) -> Dict:
super().setUp()
SCREAMING_SNAKE_CASE_ = [
BertTokenizer.from_pretrained(__magic_name__ ) for checkpoint in (TOKENIZER_CHECKPOINTS * 2)
] # repeat for when fast_bert_tokenizer=false
SCREAMING_SNAKE_CASE_ = [TFBertTokenizer.from_pretrained(__magic_name__ ) for checkpoint in TOKENIZER_CHECKPOINTS] + [
TFBertTokenizer.from_pretrained(__magic_name__ , use_fast_bert_tokenizer=__magic_name__ )
for checkpoint in TOKENIZER_CHECKPOINTS
]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
SCREAMING_SNAKE_CASE_ = [
"This is a straightforward English test sentence.",
"This one has some weird characters\rto\nsee\r\nif those\u00E9break things.",
"Now we're going to add some Chinese: 一 二 三 一二三",
"And some much more rare Chinese: 齉 堃 齉堃",
"Je vais aussi écrire en français pour tester les accents",
"Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ",
]
SCREAMING_SNAKE_CASE_ = list(zip(self.test_sentences , self.test_sentences[::-1] ) )
def __A ( self : str ) -> Optional[Any]:
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ):
for test_inputs in (self.test_sentences, self.paired_sentences):
SCREAMING_SNAKE_CASE_ = tokenizer(__magic_name__ , return_tensors="tf" , padding="longest" )
SCREAMING_SNAKE_CASE_ = tf_tokenizer(__magic_name__ )
for key in python_outputs.keys():
self.assertTrue(tf.reduce_all(python_outputs[key].shape == tf_outputs[key].shape ) )
self.assertTrue(tf.reduce_all(tf.cast(python_outputs[key] , tf.intaa ) == tf_outputs[key] ) )
@slow
def __A ( self : Optional[int] ) -> Optional[Any]:
for tf_tokenizer in self.tf_tokenizers:
SCREAMING_SNAKE_CASE_ = tf_tokenizer(self.paired_sentences )
SCREAMING_SNAKE_CASE_ = tf_tokenizer(
text=[sentence[0] for sentence in self.paired_sentences] , text_pair=[sentence[1] for sentence in self.paired_sentences] , )
for key in merged_outputs.keys():
self.assertTrue(tf.reduce_all(tf.cast(merged_outputs[key] , tf.intaa ) == separated_outputs[key] ) )
@slow
def __A ( self : Dict ) -> Optional[int]:
for tf_tokenizer in self.tf_tokenizers:
SCREAMING_SNAKE_CASE_ = tf.function(__magic_name__ )
for test_inputs in (self.test_sentences, self.paired_sentences):
SCREAMING_SNAKE_CASE_ = tf.constant(__magic_name__ )
SCREAMING_SNAKE_CASE_ = compiled_tokenizer(__magic_name__ )
SCREAMING_SNAKE_CASE_ = tf_tokenizer(__magic_name__ )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def __A ( self : Union[str, Any] ) -> Dict:
for tf_tokenizer in self.tf_tokenizers:
SCREAMING_SNAKE_CASE_ = ModelToSave(tokenizer=__magic_name__ )
SCREAMING_SNAKE_CASE_ = tf.convert_to_tensor(self.test_sentences )
SCREAMING_SNAKE_CASE_ = model(__magic_name__ ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
SCREAMING_SNAKE_CASE_ = Path(__magic_name__ ) / "saved.model"
model.save(__magic_name__ )
SCREAMING_SNAKE_CASE_ = tf.keras.models.load_model(__magic_name__ )
SCREAMING_SNAKE_CASE_ = loaded_model(__magic_name__ )
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertLessEqual(tf.reduce_max(tf.abs(out - loaded_output ) ) , 1e-5 )
| 305
|
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class lowerCamelCase (SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
lowerCamelCase__ = ['''image_processor''', '''tokenizer''']
lowerCamelCase__ = '''ViltImageProcessor'''
lowerCamelCase__ = ('''BertTokenizer''', '''BertTokenizerFast''')
def __init__( self : Optional[int] , __magic_name__ : str=None , __magic_name__ : List[str]=None , **__magic_name__ : Any ) -> str:
SCREAMING_SNAKE_CASE_ = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , __magic_name__ , )
SCREAMING_SNAKE_CASE_ = kwargs.pop("feature_extractor" )
SCREAMING_SNAKE_CASE_ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(__magic_name__ , __magic_name__ )
SCREAMING_SNAKE_CASE_ = self.image_processor
def __call__( self : List[str] , __magic_name__ : List[str] , __magic_name__ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , __magic_name__ : bool = True , __magic_name__ : Union[bool, str, PaddingStrategy] = False , __magic_name__ : Union[bool, str, TruncationStrategy] = None , __magic_name__ : Optional[int] = None , __magic_name__ : int = 0 , __magic_name__ : Optional[int] = None , __magic_name__ : Optional[bool] = None , __magic_name__ : Optional[bool] = None , __magic_name__ : bool = False , __magic_name__ : bool = False , __magic_name__ : bool = False , __magic_name__ : bool = False , __magic_name__ : bool = True , __magic_name__ : Optional[Union[str, TensorType]] = None , **__magic_name__ : str , ) -> BatchEncoding:
SCREAMING_SNAKE_CASE_ = self.tokenizer(
text=__magic_name__ , add_special_tokens=__magic_name__ , padding=__magic_name__ , truncation=__magic_name__ , max_length=__magic_name__ , stride=__magic_name__ , pad_to_multiple_of=__magic_name__ , return_token_type_ids=__magic_name__ , return_attention_mask=__magic_name__ , return_overflowing_tokens=__magic_name__ , return_special_tokens_mask=__magic_name__ , return_offsets_mapping=__magic_name__ , return_length=__magic_name__ , verbose=__magic_name__ , return_tensors=__magic_name__ , **__magic_name__ , )
# add pixel_values + pixel_mask
SCREAMING_SNAKE_CASE_ = self.image_processor(__magic_name__ , return_tensors=__magic_name__ )
encoding.update(__magic_name__ )
return encoding
def __A ( self : Optional[int] , *__magic_name__ : List[Any] , **__magic_name__ : Optional[Any] ) -> Any:
return self.tokenizer.batch_decode(*__magic_name__ , **__magic_name__ )
def __A ( self : Dict , *__magic_name__ : List[Any] , **__magic_name__ : Union[str, Any] ) -> str:
return self.tokenizer.decode(*__magic_name__ , **__magic_name__ )
@property
def __A ( self : Optional[int] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = self.tokenizer.model_input_names
SCREAMING_SNAKE_CASE_ = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def __A ( self : Dict ) -> List[Any]:
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , __magic_name__ , )
return self.image_processor_class
@property
def __A ( self : int ) -> List[Any]:
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , __magic_name__ , )
return self.image_processor
| 305
| 1
|
from __future__ import annotations
A : Dict = "#"
class lowerCamelCase :
"""simple docstring"""
def __init__( self : Dict ) -> None:
SCREAMING_SNAKE_CASE_ = {}
def __A ( self : List[Any] , __magic_name__ : str ) -> None:
SCREAMING_SNAKE_CASE_ = self._trie
for char in text:
if char not in trie:
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = trie[char]
SCREAMING_SNAKE_CASE_ = True
def __A ( self : Union[str, Any] , __magic_name__ : str ) -> tuple | list:
SCREAMING_SNAKE_CASE_ = self._trie
for char in prefix:
if char in trie:
SCREAMING_SNAKE_CASE_ = trie[char]
else:
return []
return self._elements(__magic_name__ )
def __A ( self : int , __magic_name__ : dict ) -> tuple:
SCREAMING_SNAKE_CASE_ = []
for c, v in d.items():
SCREAMING_SNAKE_CASE_ = [" "] if c == END else [(c + s) for s in self._elements(__magic_name__ )]
result.extend(__magic_name__ )
return tuple(__magic_name__ )
A : Union[str, Any] = Trie()
A : Optional[int] = ("depart", "detergent", "daring", "dog", "deer", "deal")
for word in words:
trie.insert_word(word)
def a__ ( __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = trie.find_word(__UpperCamelCase )
return tuple(string + word for word in suffixes )
def a__ ( ):
print(autocomplete_using_trie("de" ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 305
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
A : str = logging.get_logger(__name__)
A : Optional[int] = {
"microsoft/table-transformer-detection": (
"https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json"
),
}
class lowerCamelCase (SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
lowerCamelCase__ = '''table-transformer'''
lowerCamelCase__ = ['''past_key_values''']
lowerCamelCase__ = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
}
def __init__( self : List[Any] , __magic_name__ : Optional[Any]=True , __magic_name__ : Dict=None , __magic_name__ : Any=3 , __magic_name__ : List[str]=100 , __magic_name__ : Union[str, Any]=6 , __magic_name__ : Dict=2_048 , __magic_name__ : str=8 , __magic_name__ : int=6 , __magic_name__ : List[Any]=2_048 , __magic_name__ : Optional[int]=8 , __magic_name__ : Optional[int]=0.0 , __magic_name__ : List[Any]=0.0 , __magic_name__ : Optional[Any]=True , __magic_name__ : List[Any]="relu" , __magic_name__ : List[str]=256 , __magic_name__ : List[str]=0.1 , __magic_name__ : int=0.0 , __magic_name__ : Optional[Any]=0.0 , __magic_name__ : Tuple=0.02 , __magic_name__ : str=1.0 , __magic_name__ : int=False , __magic_name__ : Dict="sine" , __magic_name__ : Union[str, Any]="resnet50" , __magic_name__ : Optional[Any]=True , __magic_name__ : str=False , __magic_name__ : List[str]=1 , __magic_name__ : int=5 , __magic_name__ : Union[str, Any]=2 , __magic_name__ : Tuple=1 , __magic_name__ : Optional[int]=1 , __magic_name__ : Optional[Any]=5 , __magic_name__ : Optional[int]=2 , __magic_name__ : Union[str, Any]=0.1 , **__magic_name__ : Tuple , ) -> str:
if backbone_config is not None and use_timm_backbone:
raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`." )
if not use_timm_backbone:
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
SCREAMING_SNAKE_CASE_ = CONFIG_MAPPING["resnet"](out_features=["stage4"] )
elif isinstance(__magic_name__ , __magic_name__ ):
SCREAMING_SNAKE_CASE_ = backbone_config.get("model_type" )
SCREAMING_SNAKE_CASE_ = CONFIG_MAPPING[backbone_model_type]
SCREAMING_SNAKE_CASE_ = config_class.from_dict(__magic_name__ )
# set timm attributes to None
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None, None, None
SCREAMING_SNAKE_CASE_ = use_timm_backbone
SCREAMING_SNAKE_CASE_ = backbone_config
SCREAMING_SNAKE_CASE_ = num_channels
SCREAMING_SNAKE_CASE_ = num_queries
SCREAMING_SNAKE_CASE_ = d_model
SCREAMING_SNAKE_CASE_ = encoder_ffn_dim
SCREAMING_SNAKE_CASE_ = encoder_layers
SCREAMING_SNAKE_CASE_ = encoder_attention_heads
SCREAMING_SNAKE_CASE_ = decoder_ffn_dim
SCREAMING_SNAKE_CASE_ = decoder_layers
SCREAMING_SNAKE_CASE_ = decoder_attention_heads
SCREAMING_SNAKE_CASE_ = dropout
SCREAMING_SNAKE_CASE_ = attention_dropout
SCREAMING_SNAKE_CASE_ = activation_dropout
SCREAMING_SNAKE_CASE_ = activation_function
SCREAMING_SNAKE_CASE_ = init_std
SCREAMING_SNAKE_CASE_ = init_xavier_std
SCREAMING_SNAKE_CASE_ = encoder_layerdrop
SCREAMING_SNAKE_CASE_ = decoder_layerdrop
SCREAMING_SNAKE_CASE_ = encoder_layers
SCREAMING_SNAKE_CASE_ = auxiliary_loss
SCREAMING_SNAKE_CASE_ = position_embedding_type
SCREAMING_SNAKE_CASE_ = backbone
SCREAMING_SNAKE_CASE_ = use_pretrained_backbone
SCREAMING_SNAKE_CASE_ = dilation
# Hungarian matcher
SCREAMING_SNAKE_CASE_ = class_cost
SCREAMING_SNAKE_CASE_ = bbox_cost
SCREAMING_SNAKE_CASE_ = giou_cost
# Loss coefficients
SCREAMING_SNAKE_CASE_ = mask_loss_coefficient
SCREAMING_SNAKE_CASE_ = dice_loss_coefficient
SCREAMING_SNAKE_CASE_ = bbox_loss_coefficient
SCREAMING_SNAKE_CASE_ = giou_loss_coefficient
SCREAMING_SNAKE_CASE_ = eos_coefficient
super().__init__(is_encoder_decoder=__magic_name__ , **__magic_name__ )
@property
def __A ( self : Union[str, Any] ) -> int:
return self.encoder_attention_heads
@property
def __A ( self : Any ) -> int:
return self.d_model
class lowerCamelCase (SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
lowerCamelCase__ = version.parse('''1.11''' )
@property
def __A ( self : Tuple ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
("pixel_mask", {0: "batch"}),
] )
@property
def __A ( self : Any ) -> float:
return 1e-5
@property
def __A ( self : int ) -> int:
return 12
| 305
| 1
|
from math import isqrt
def a__ ( __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , __UpperCamelCase , __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = False
return [i for i in range(2 , __UpperCamelCase ) if is_prime[i]]
def a__ ( __UpperCamelCase = 1_0**8 ):
SCREAMING_SNAKE_CASE_ = calculate_prime_numbers(max_number // 2 )
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = len(__UpperCamelCase ) - 1
while left <= right:
while prime_numbers[left] * prime_numbers[right] >= max_number:
right -= 1
semiprimes_count += right - left + 1
left += 1
return semiprimes_count
if __name__ == "__main__":
print(f"{solution() = }")
| 305
|
import warnings
from diffusers import StableDiffusionImgaImgPipeline # noqa F401
warnings.warn(
"The `image_to_image.py` script is outdated. Please use directly `from diffusers import"
" StableDiffusionImg2ImgPipeline` instead."
)
| 305
| 1
|
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaInpaintPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class lowerCamelCase (SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase__ = KandinskyVaaInpaintPipeline
lowerCamelCase__ = ['''image_embeds''', '''negative_image_embeds''', '''image''', '''mask_image''']
lowerCamelCase__ = [
'''image_embeds''',
'''negative_image_embeds''',
'''image''',
'''mask_image''',
]
lowerCamelCase__ = [
'''generator''',
'''height''',
'''width''',
'''latents''',
'''guidance_scale''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
lowerCamelCase__ = False
@property
def __A ( self : Any ) -> List[str]:
return 32
@property
def __A ( self : Union[str, Any] ) -> Optional[Any]:
return 32
@property
def __A ( self : int ) -> int:
return self.time_input_dim
@property
def __A ( self : str ) -> str:
return self.time_input_dim * 4
@property
def __A ( self : Union[str, Any] ) -> Any:
return 100
@property
def __A ( self : List[str] ) -> str:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ = {
"in_channels": 9,
# Out channels is double in channels because predicts mean and variance
"out_channels": 8,
"addition_embed_type": "image",
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"encoder_hid_dim": self.text_embedder_hidden_size,
"encoder_hid_dim_type": "image_proj",
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": None,
}
SCREAMING_SNAKE_CASE_ = UNetaDConditionModel(**__magic_name__ )
return model
@property
def __A ( self : List[str] ) -> List[Any]:
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def __A ( self : List[str] ) -> List[Any]:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ = VQModel(**self.dummy_movq_kwargs )
return model
def __A ( self : Dict ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = self.dummy_unet
SCREAMING_SNAKE_CASE_ = self.dummy_movq
SCREAMING_SNAKE_CASE_ = DDIMScheduler(
num_train_timesteps=1_000 , beta_schedule="linear" , beta_start=0.0_0085 , beta_end=0.012 , clip_sample=__magic_name__ , set_alpha_to_one=__magic_name__ , steps_offset=1 , prediction_type="epsilon" , thresholding=__magic_name__ , )
SCREAMING_SNAKE_CASE_ = {
"unet": unet,
"scheduler": scheduler,
"movq": movq,
}
return components
def __A ( self : Optional[int] , __magic_name__ : List[Any] , __magic_name__ : Tuple=0 ) -> Dict:
SCREAMING_SNAKE_CASE_ = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(__magic_name__ ) ).to(__magic_name__ )
SCREAMING_SNAKE_CASE_ = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
__magic_name__ )
# create init_image
SCREAMING_SNAKE_CASE_ = floats_tensor((1, 3, 64, 64) , rng=random.Random(__magic_name__ ) ).to(__magic_name__ )
SCREAMING_SNAKE_CASE_ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
SCREAMING_SNAKE_CASE_ = Image.fromarray(np.uinta(__magic_name__ ) ).convert("RGB" ).resize((256, 256) )
# create mask
SCREAMING_SNAKE_CASE_ = np.ones((64, 64) , dtype=np.floataa )
SCREAMING_SNAKE_CASE_ = 0
if str(__magic_name__ ).startswith("mps" ):
SCREAMING_SNAKE_CASE_ = torch.manual_seed(__magic_name__ )
else:
SCREAMING_SNAKE_CASE_ = torch.Generator(device=__magic_name__ ).manual_seed(__magic_name__ )
SCREAMING_SNAKE_CASE_ = {
"image": init_image,
"mask_image": mask,
"image_embeds": image_embeds,
"negative_image_embeds": negative_image_embeds,
"generator": generator,
"height": 64,
"width": 64,
"num_inference_steps": 2,
"guidance_scale": 4.0,
"output_type": "np",
}
return inputs
def __A ( self : Optional[Any] ) -> Dict:
SCREAMING_SNAKE_CASE_ = "cpu"
SCREAMING_SNAKE_CASE_ = self.get_dummy_components()
SCREAMING_SNAKE_CASE_ = self.pipeline_class(**__magic_name__ )
SCREAMING_SNAKE_CASE_ = pipe.to(__magic_name__ )
pipe.set_progress_bar_config(disable=__magic_name__ )
SCREAMING_SNAKE_CASE_ = pipe(**self.get_dummy_inputs(__magic_name__ ) )
SCREAMING_SNAKE_CASE_ = output.images
SCREAMING_SNAKE_CASE_ = pipe(
**self.get_dummy_inputs(__magic_name__ ) , return_dict=__magic_name__ , )[0]
SCREAMING_SNAKE_CASE_ = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE_ = image_from_tuple[0, -3:, -3:, -1]
print(F'''image.shape {image.shape}''' )
assert image.shape == (1, 64, 64, 3)
SCREAMING_SNAKE_CASE_ = np.array(
[0.5077_5903, 0.4952_7195, 0.4882_4543, 0.5019_2237, 0.4864_4906, 0.4937_3814, 0.478_0598, 0.4723_4827, 0.4832_7848] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
def __A ( self : int ) -> List[str]:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class lowerCamelCase (unittest.TestCase ):
"""simple docstring"""
def __A ( self : List[Any] ) -> List[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __A ( self : List[Any] ) -> str:
SCREAMING_SNAKE_CASE_ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinskyv22/kandinskyv22_inpaint_cat_with_hat_fp16.npy" )
SCREAMING_SNAKE_CASE_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png" )
SCREAMING_SNAKE_CASE_ = np.ones((768, 768) , dtype=np.floataa )
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = "a hat"
SCREAMING_SNAKE_CASE_ = KandinskyVaaPriorPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-prior" , torch_dtype=torch.floataa )
pipe_prior.to(__magic_name__ )
SCREAMING_SNAKE_CASE_ = KandinskyVaaInpaintPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-decoder-inpaint" , torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE_ = pipeline.to(__magic_name__ )
pipeline.set_progress_bar_config(disable=__magic_name__ )
SCREAMING_SNAKE_CASE_ = torch.Generator(device="cpu" ).manual_seed(0 )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = pipe_prior(
__magic_name__ , generator=__magic_name__ , num_inference_steps=5 , negative_prompt="" , ).to_tuple()
SCREAMING_SNAKE_CASE_ = pipeline(
image=__magic_name__ , mask_image=__magic_name__ , image_embeds=__magic_name__ , negative_image_embeds=__magic_name__ , generator=__magic_name__ , num_inference_steps=100 , height=768 , width=768 , output_type="np" , )
SCREAMING_SNAKE_CASE_ = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(__magic_name__ , __magic_name__ )
| 305
|
from __future__ import annotations
import numpy as np
def a__ ( __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = np.shape(__UpperCamelCase )
if rows != columns:
SCREAMING_SNAKE_CASE_ = (
"'table' has to be of square shaped array but got a "
F'''{rows}x{columns} array:\n{table}'''
)
raise ValueError(__UpperCamelCase )
SCREAMING_SNAKE_CASE_ = np.zeros((rows, columns) )
SCREAMING_SNAKE_CASE_ = np.zeros((rows, columns) )
for i in range(__UpperCamelCase ):
for j in range(__UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = sum(lower[i][k] * upper[k][j] for k in range(__UpperCamelCase ) )
if upper[j][j] == 0:
raise ArithmeticError("No LU decomposition exists" )
SCREAMING_SNAKE_CASE_ = (table[i][j] - total) / upper[j][j]
SCREAMING_SNAKE_CASE_ = 1
for j in range(__UpperCamelCase , __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = sum(lower[i][k] * upper[k][j] for k in range(__UpperCamelCase ) )
SCREAMING_SNAKE_CASE_ = table[i][j] - total
return lower, upper
if __name__ == "__main__":
import doctest
doctest.testmod()
| 305
| 1
|
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
A : Optional[Any] = logging.get_logger(__name__)
A : Optional[int] = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
A : Union[str, Any] = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class lowerCamelCase :
"""simple docstring"""
lowerCamelCase__ = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'''help''': '''Model type selected in the list: ''' + ''', '''.join(SCREAMING_SNAKE_CASE__ )} )
lowerCamelCase__ = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'''help''': '''The input data dir. Should contain the .json files for the SQuAD task.'''} )
lowerCamelCase__ = field(
default=1_2_8 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
lowerCamelCase__ = field(
default=1_2_8 , metadata={'''help''': '''When splitting up a long document into chunks, how much stride to take between chunks.'''} , )
lowerCamelCase__ = field(
default=6_4 , metadata={
'''help''': (
'''The maximum number of tokens for the question. Questions longer than this will '''
'''be truncated to this length.'''
)
} , )
lowerCamelCase__ = field(
default=3_0 , metadata={
'''help''': (
'''The maximum length of an answer that can be generated. This is needed because the start '''
'''and end predictions are not conditioned on one another.'''
)
} , )
lowerCamelCase__ = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
lowerCamelCase__ = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'''help''': '''If true, the SQuAD examples contain some that do not have an answer.'''} )
lowerCamelCase__ = field(
default=0.0 , metadata={'''help''': '''If null_score - best_non_null is greater than the threshold predict null.'''} )
lowerCamelCase__ = field(
default=2_0 , metadata={'''help''': '''If null_score - best_non_null is greater than the threshold predict null.'''} )
lowerCamelCase__ = field(
default=0 , metadata={
'''help''': (
'''language id of input for language-specific xlm models (see'''
''' tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)'''
)
} , )
lowerCamelCase__ = field(default=1 , metadata={'''help''': '''multiple threads for converting example to features'''} )
class lowerCamelCase (SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
lowerCamelCase__ = '''train'''
lowerCamelCase__ = '''dev'''
class lowerCamelCase (SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
lowerCamelCase__ = 42
lowerCamelCase__ = 42
lowerCamelCase__ = 42
lowerCamelCase__ = 42
def __init__( self : Tuple , __magic_name__ : SquadDataTrainingArguments , __magic_name__ : PreTrainedTokenizer , __magic_name__ : Optional[int] = None , __magic_name__ : Union[str, Split] = Split.train , __magic_name__ : Optional[bool] = False , __magic_name__ : Optional[str] = None , __magic_name__ : Optional[str] = "pt" , ) -> Tuple:
SCREAMING_SNAKE_CASE_ = args
SCREAMING_SNAKE_CASE_ = is_language_sensitive
SCREAMING_SNAKE_CASE_ = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(__magic_name__ , __magic_name__ ):
try:
SCREAMING_SNAKE_CASE_ = Split[mode]
except KeyError:
raise KeyError("mode is not a valid split name" )
SCREAMING_SNAKE_CASE_ = mode
# Load data features from cache or dataset file
SCREAMING_SNAKE_CASE_ = "v2" if args.version_2_with_negative else "v1"
SCREAMING_SNAKE_CASE_ = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , F'''cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}''' , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
SCREAMING_SNAKE_CASE_ = cached_features_file + ".lock"
with FileLock(__magic_name__ ):
if os.path.exists(__magic_name__ ) and not args.overwrite_cache:
SCREAMING_SNAKE_CASE_ = time.time()
SCREAMING_SNAKE_CASE_ = torch.load(__magic_name__ )
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
SCREAMING_SNAKE_CASE_ = self.old_features["features"]
SCREAMING_SNAKE_CASE_ = self.old_features.get("dataset" , __magic_name__ )
SCREAMING_SNAKE_CASE_ = self.old_features.get("examples" , __magic_name__ )
logger.info(
F'''Loading features from cached file {cached_features_file} [took %.3f s]''' , time.time() - start )
if self.dataset is None or self.examples is None:
logger.warning(
F'''Deleting cached file {cached_features_file} will allow dataset and examples to be cached in'''
" future run" )
else:
if mode == Split.dev:
SCREAMING_SNAKE_CASE_ = self.processor.get_dev_examples(args.data_dir )
else:
SCREAMING_SNAKE_CASE_ = self.processor.get_train_examples(args.data_dir )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = squad_convert_examples_to_features(
examples=self.examples , tokenizer=__magic_name__ , max_seq_length=args.max_seq_length , doc_stride=args.doc_stride , max_query_length=args.max_query_length , is_training=mode == Split.train , threads=args.threads , return_dataset=__magic_name__ , )
SCREAMING_SNAKE_CASE_ = time.time()
torch.save(
{"features": self.features, "dataset": self.dataset, "examples": self.examples} , __magic_name__ , )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
F'''Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]''' )
def __len__( self : Any ) -> Union[str, Any]:
return len(self.features )
def __getitem__( self : int , __magic_name__ : int ) -> Dict[str, torch.Tensor]:
# Convert to Tensors and build dataset
SCREAMING_SNAKE_CASE_ = self.features[i]
SCREAMING_SNAKE_CASE_ = torch.tensor(feature.input_ids , dtype=torch.long )
SCREAMING_SNAKE_CASE_ = torch.tensor(feature.attention_mask , dtype=torch.long )
SCREAMING_SNAKE_CASE_ = torch.tensor(feature.token_type_ids , dtype=torch.long )
SCREAMING_SNAKE_CASE_ = torch.tensor(feature.cls_index , dtype=torch.long )
SCREAMING_SNAKE_CASE_ = torch.tensor(feature.p_mask , dtype=torch.float )
SCREAMING_SNAKE_CASE_ = torch.tensor(feature.is_impossible , dtype=torch.float )
SCREAMING_SNAKE_CASE_ = {
"input_ids": input_ids,
"attention_mask": attention_mask,
"token_type_ids": token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({"cls_index": cls_index, "p_mask": p_mask} )
if self.args.version_2_with_negative:
inputs.update({"is_impossible": is_impossible} )
if self.is_language_sensitive:
inputs.update({"langs": (torch.ones(input_ids.shape , dtype=torch.intaa ) * self.args.lang_id)} )
if self.mode == Split.train:
SCREAMING_SNAKE_CASE_ = torch.tensor(feature.start_position , dtype=torch.long )
SCREAMING_SNAKE_CASE_ = torch.tensor(feature.end_position , dtype=torch.long )
inputs.update({"start_positions": start_positions, "end_positions": end_positions} )
return inputs
| 305
|
from math import pi, sqrt, tan
def a__ ( __UpperCamelCase ):
if side_length < 0:
raise ValueError("surface_area_cube() only accepts non-negative values" )
return 6 * side_length**2
def a__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
if length < 0 or breadth < 0 or height < 0:
raise ValueError("surface_area_cuboid() only accepts non-negative values" )
return 2 * ((length * breadth) + (breadth * height) + (length * height))
def a__ ( __UpperCamelCase ):
if radius < 0:
raise ValueError("surface_area_sphere() only accepts non-negative values" )
return 4 * pi * radius**2
def a__ ( __UpperCamelCase ):
if radius < 0:
raise ValueError("surface_area_hemisphere() only accepts non-negative values" )
return 3 * pi * radius**2
def a__ ( __UpperCamelCase , __UpperCamelCase ):
if radius < 0 or height < 0:
raise ValueError("surface_area_cone() only accepts non-negative values" )
return pi * radius * (radius + (height**2 + radius**2) ** 0.5)
def a__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
if radius_a < 0 or radius_a < 0 or height < 0:
raise ValueError(
"surface_area_conical_frustum() only accepts non-negative values" )
SCREAMING_SNAKE_CASE_ = (height**2 + (radius_a - radius_a) ** 2) ** 0.5
return pi * ((slant_height * (radius_a + radius_a)) + radius_a**2 + radius_a**2)
def a__ ( __UpperCamelCase , __UpperCamelCase ):
if radius < 0 or height < 0:
raise ValueError("surface_area_cylinder() only accepts non-negative values" )
return 2 * pi * radius * (height + radius)
def a__ ( __UpperCamelCase , __UpperCamelCase ):
if torus_radius < 0 or tube_radius < 0:
raise ValueError("surface_area_torus() only accepts non-negative values" )
if torus_radius < tube_radius:
raise ValueError(
"surface_area_torus() does not support spindle or self intersecting tori" )
return 4 * pow(__UpperCamelCase , 2 ) * torus_radius * tube_radius
def a__ ( __UpperCamelCase , __UpperCamelCase ):
if length < 0 or width < 0:
raise ValueError("area_rectangle() only accepts non-negative values" )
return length * width
def a__ ( __UpperCamelCase ):
if side_length < 0:
raise ValueError("area_square() only accepts non-negative values" )
return side_length**2
def a__ ( __UpperCamelCase , __UpperCamelCase ):
if base < 0 or height < 0:
raise ValueError("area_triangle() only accepts non-negative values" )
return (base * height) / 2
def a__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
if sidea < 0 or sidea < 0 or sidea < 0:
raise ValueError("area_triangle_three_sides() only accepts non-negative values" )
elif sidea + sidea < sidea or sidea + sidea < sidea or sidea + sidea < sidea:
raise ValueError("Given three sides do not form a triangle" )
SCREAMING_SNAKE_CASE_ = (sidea + sidea + sidea) / 2
SCREAMING_SNAKE_CASE_ = sqrt(
semi_perimeter
* (semi_perimeter - sidea)
* (semi_perimeter - sidea)
* (semi_perimeter - sidea) )
return area
def a__ ( __UpperCamelCase , __UpperCamelCase ):
if base < 0 or height < 0:
raise ValueError("area_parallelogram() only accepts non-negative values" )
return base * height
def a__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
if basea < 0 or basea < 0 or height < 0:
raise ValueError("area_trapezium() only accepts non-negative values" )
return 1 / 2 * (basea + basea) * height
def a__ ( __UpperCamelCase ):
if radius < 0:
raise ValueError("area_circle() only accepts non-negative values" )
return pi * radius**2
def a__ ( __UpperCamelCase , __UpperCamelCase ):
if radius_x < 0 or radius_y < 0:
raise ValueError("area_ellipse() only accepts non-negative values" )
return pi * radius_x * radius_y
def a__ ( __UpperCamelCase , __UpperCamelCase ):
if diagonal_a < 0 or diagonal_a < 0:
raise ValueError("area_rhombus() only accepts non-negative values" )
return 1 / 2 * diagonal_a * diagonal_a
def a__ ( __UpperCamelCase , __UpperCamelCase ):
if not isinstance(__UpperCamelCase , __UpperCamelCase ) or sides < 3:
raise ValueError(
"area_reg_polygon() only accepts integers greater than or \
equal to three as number of sides" )
elif length < 0:
raise ValueError(
"area_reg_polygon() only accepts non-negative values as \
length of a side" )
return (sides * length**2) / (4 * tan(pi / sides ))
return (sides * length**2) / (4 * tan(pi / sides ))
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True) # verbose so we can see methods missing tests
print("[DEMO] Areas of various geometric shapes: \n")
print(f"Rectangle: {area_rectangle(10, 20) = }")
print(f"Square: {area_square(10) = }")
print(f"Triangle: {area_triangle(10, 10) = }")
print(f"Triangle: {area_triangle_three_sides(5, 12, 13) = }")
print(f"Parallelogram: {area_parallelogram(10, 20) = }")
print(f"Rhombus: {area_rhombus(10, 20) = }")
print(f"Trapezium: {area_trapezium(10, 20, 30) = }")
print(f"Circle: {area_circle(20) = }")
print(f"Ellipse: {area_ellipse(10, 20) = }")
print("\nSurface Areas of various geometric shapes: \n")
print(f"Cube: {surface_area_cube(20) = }")
print(f"Cuboid: {surface_area_cuboid(10, 20, 30) = }")
print(f"Sphere: {surface_area_sphere(20) = }")
print(f"Hemisphere: {surface_area_hemisphere(20) = }")
print(f"Cone: {surface_area_cone(10, 20) = }")
print(f"Conical Frustum: {surface_area_conical_frustum(10, 20, 30) = }")
print(f"Cylinder: {surface_area_cylinder(10, 20) = }")
print(f"Torus: {surface_area_torus(20, 10) = }")
print(f"Equilateral Triangle: {area_reg_polygon(3, 10) = }")
print(f"Square: {area_reg_polygon(4, 10) = }")
print(f"Reqular Pentagon: {area_reg_polygon(5, 10) = }")
| 305
| 1
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
A : Dict = logging.get_logger(__name__)
class lowerCamelCase (SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
lowerCamelCase__ = ['''pixel_values''']
def __init__( self : Union[str, Any] , __magic_name__ : bool = True , __magic_name__ : Optional[Dict[str, int]] = None , __magic_name__ : PILImageResampling = PILImageResampling.BILINEAR , __magic_name__ : bool = True , __magic_name__ : Dict[str, int] = None , __magic_name__ : bool = True , __magic_name__ : Union[int, float] = 1 / 255 , __magic_name__ : bool = True , __magic_name__ : Optional[Union[float, List[float]]] = None , __magic_name__ : Optional[Union[float, List[float]]] = None , **__magic_name__ : List[str] , ) -> None:
super().__init__(**__magic_name__ )
SCREAMING_SNAKE_CASE_ = size if size is not None else {"shortest_edge": 256}
SCREAMING_SNAKE_CASE_ = get_size_dict(__magic_name__ , default_to_square=__magic_name__ )
SCREAMING_SNAKE_CASE_ = crop_size if crop_size is not None else {"height": 224, "width": 224}
SCREAMING_SNAKE_CASE_ = get_size_dict(__magic_name__ )
SCREAMING_SNAKE_CASE_ = do_resize
SCREAMING_SNAKE_CASE_ = size
SCREAMING_SNAKE_CASE_ = resample
SCREAMING_SNAKE_CASE_ = do_center_crop
SCREAMING_SNAKE_CASE_ = crop_size
SCREAMING_SNAKE_CASE_ = do_rescale
SCREAMING_SNAKE_CASE_ = rescale_factor
SCREAMING_SNAKE_CASE_ = do_normalize
SCREAMING_SNAKE_CASE_ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
SCREAMING_SNAKE_CASE_ = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __A ( self : List[Any] , __magic_name__ : np.ndarray , __magic_name__ : Dict[str, int] , __magic_name__ : PILImageResampling = PILImageResampling.BICUBIC , __magic_name__ : Optional[Union[str, ChannelDimension]] = None , **__magic_name__ : str , ) -> np.ndarray:
SCREAMING_SNAKE_CASE_ = get_size_dict(__magic_name__ , default_to_square=__magic_name__ )
if "shortest_edge" not in size:
raise ValueError(F'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' )
SCREAMING_SNAKE_CASE_ = get_resize_output_image_size(__magic_name__ , size=size["shortest_edge"] , default_to_square=__magic_name__ )
return resize(__magic_name__ , size=__magic_name__ , resample=__magic_name__ , data_format=__magic_name__ , **__magic_name__ )
def __A ( self : List[Any] , __magic_name__ : np.ndarray , __magic_name__ : Dict[str, int] , __magic_name__ : Optional[Union[str, ChannelDimension]] = None , **__magic_name__ : Any , ) -> np.ndarray:
SCREAMING_SNAKE_CASE_ = get_size_dict(__magic_name__ )
return center_crop(__magic_name__ , size=(size["height"], size["width"]) , data_format=__magic_name__ , **__magic_name__ )
def __A ( self : Union[str, Any] , __magic_name__ : np.ndarray , __magic_name__ : float , __magic_name__ : Optional[Union[str, ChannelDimension]] = None , **__magic_name__ : Union[str, Any] ) -> np.ndarray:
return rescale(__magic_name__ , scale=__magic_name__ , data_format=__magic_name__ , **__magic_name__ )
def __A ( self : Union[str, Any] , __magic_name__ : np.ndarray , __magic_name__ : Union[float, List[float]] , __magic_name__ : Union[float, List[float]] , __magic_name__ : Optional[Union[str, ChannelDimension]] = None , **__magic_name__ : str , ) -> np.ndarray:
return normalize(__magic_name__ , mean=__magic_name__ , std=__magic_name__ , data_format=__magic_name__ , **__magic_name__ )
def __A ( self : str , __magic_name__ : ImageInput , __magic_name__ : Optional[bool] = None , __magic_name__ : Dict[str, int] = None , __magic_name__ : PILImageResampling = None , __magic_name__ : bool = None , __magic_name__ : Dict[str, int] = None , __magic_name__ : Optional[bool] = None , __magic_name__ : Optional[float] = None , __magic_name__ : Optional[bool] = None , __magic_name__ : Optional[Union[float, List[float]]] = None , __magic_name__ : Optional[Union[float, List[float]]] = None , __magic_name__ : Optional[Union[str, TensorType]] = None , __magic_name__ : Union[str, ChannelDimension] = ChannelDimension.FIRST , **__magic_name__ : Dict , ) -> Any:
SCREAMING_SNAKE_CASE_ = do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE_ = size if size is not None else self.size
SCREAMING_SNAKE_CASE_ = get_size_dict(__magic_name__ , default_to_square=__magic_name__ )
SCREAMING_SNAKE_CASE_ = resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE_ = do_center_crop if do_center_crop is not None else self.do_center_crop
SCREAMING_SNAKE_CASE_ = crop_size if crop_size is not None else self.crop_size
SCREAMING_SNAKE_CASE_ = get_size_dict(__magic_name__ )
SCREAMING_SNAKE_CASE_ = do_rescale if do_rescale is not None else self.do_rescale
SCREAMING_SNAKE_CASE_ = rescale_factor if rescale_factor is not None else self.rescale_factor
SCREAMING_SNAKE_CASE_ = do_normalize if do_normalize is not None else self.do_normalize
SCREAMING_SNAKE_CASE_ = image_mean if image_mean is not None else self.image_mean
SCREAMING_SNAKE_CASE_ = image_std if image_std is not None else self.image_std
SCREAMING_SNAKE_CASE_ = make_list_of_images(__magic_name__ )
if not valid_images(__magic_name__ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE_ = [to_numpy_array(__magic_name__ ) for image in images]
if do_resize:
SCREAMING_SNAKE_CASE_ = [self.resize(image=__magic_name__ , size=__magic_name__ , resample=__magic_name__ ) for image in images]
if do_center_crop:
SCREAMING_SNAKE_CASE_ = [self.center_crop(image=__magic_name__ , size=__magic_name__ ) for image in images]
if do_rescale:
SCREAMING_SNAKE_CASE_ = [self.rescale(image=__magic_name__ , scale=__magic_name__ ) for image in images]
if do_normalize:
SCREAMING_SNAKE_CASE_ = [self.normalize(image=__magic_name__ , mean=__magic_name__ , std=__magic_name__ ) for image in images]
SCREAMING_SNAKE_CASE_ = [to_channel_dimension_format(__magic_name__ , __magic_name__ ) for image in images]
SCREAMING_SNAKE_CASE_ = {"pixel_values": images}
return BatchFeature(data=__magic_name__ , tensor_type=__magic_name__ )
| 305
|
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...file_utils import TensorType, is_torch_available
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
A : List[str] = logging.get_logger(__name__)
A : int = {
"facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json",
# See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small
}
class lowerCamelCase (SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
lowerCamelCase__ = '''blenderbot-small'''
lowerCamelCase__ = ['''past_key_values''']
lowerCamelCase__ = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self : Dict , __magic_name__ : Dict=50_265 , __magic_name__ : str=512 , __magic_name__ : List[Any]=8 , __magic_name__ : Any=2_048 , __magic_name__ : Dict=16 , __magic_name__ : Any=8 , __magic_name__ : Optional[int]=2_048 , __magic_name__ : Dict=16 , __magic_name__ : Tuple=0.0 , __magic_name__ : Dict=0.0 , __magic_name__ : Optional[int]=True , __magic_name__ : Any=True , __magic_name__ : Dict="gelu" , __magic_name__ : Tuple=512 , __magic_name__ : List[str]=0.1 , __magic_name__ : List[Any]=0.0 , __magic_name__ : List[Any]=0.0 , __magic_name__ : Tuple=0.02 , __magic_name__ : Union[str, Any]=1 , __magic_name__ : List[Any]=False , __magic_name__ : str=0 , __magic_name__ : Dict=1 , __magic_name__ : str=2 , __magic_name__ : Union[str, Any]=2 , **__magic_name__ : Optional[Any] , ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = vocab_size
SCREAMING_SNAKE_CASE_ = max_position_embeddings
SCREAMING_SNAKE_CASE_ = d_model
SCREAMING_SNAKE_CASE_ = encoder_ffn_dim
SCREAMING_SNAKE_CASE_ = encoder_layers
SCREAMING_SNAKE_CASE_ = encoder_attention_heads
SCREAMING_SNAKE_CASE_ = decoder_ffn_dim
SCREAMING_SNAKE_CASE_ = decoder_layers
SCREAMING_SNAKE_CASE_ = decoder_attention_heads
SCREAMING_SNAKE_CASE_ = dropout
SCREAMING_SNAKE_CASE_ = attention_dropout
SCREAMING_SNAKE_CASE_ = activation_dropout
SCREAMING_SNAKE_CASE_ = activation_function
SCREAMING_SNAKE_CASE_ = init_std
SCREAMING_SNAKE_CASE_ = encoder_layerdrop
SCREAMING_SNAKE_CASE_ = decoder_layerdrop
SCREAMING_SNAKE_CASE_ = use_cache
SCREAMING_SNAKE_CASE_ = encoder_layers
SCREAMING_SNAKE_CASE_ = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=__magic_name__ , bos_token_id=__magic_name__ , eos_token_id=__magic_name__ , is_encoder_decoder=__magic_name__ , decoder_start_token_id=__magic_name__ , forced_eos_token_id=__magic_name__ , **__magic_name__ , )
class lowerCamelCase (SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
@property
def __A ( self : str ) -> Mapping[str, Mapping[int, str]]:
if self.task in ["default", "seq2seq-lm"]:
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
SCREAMING_SNAKE_CASE_ = {0: "batch"}
SCREAMING_SNAKE_CASE_ = {0: "batch", 1: "past_decoder_sequence + sequence"}
else:
SCREAMING_SNAKE_CASE_ = {0: "batch", 1: "decoder_sequence"}
SCREAMING_SNAKE_CASE_ = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(__magic_name__ , direction="inputs" )
elif self.task == "causal-lm":
# TODO: figure this case out.
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.num_layers
for i in range(__magic_name__ ):
SCREAMING_SNAKE_CASE_ = {0: "batch", 2: "past_sequence + sequence"}
SCREAMING_SNAKE_CASE_ = {0: "batch", 2: "past_sequence + sequence"}
else:
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
("decoder_input_ids", {0: "batch", 1: "decoder_sequence"}),
("decoder_attention_mask", {0: "batch", 1: "decoder_sequence"}),
] )
return common_inputs
@property
def __A ( self : Tuple ) -> Mapping[str, Mapping[int, str]]:
if self.task in ["default", "seq2seq-lm"]:
SCREAMING_SNAKE_CASE_ = super().outputs
else:
SCREAMING_SNAKE_CASE_ = super(__magic_name__ , self ).outputs
if self.use_past:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.num_layers
for i in range(__magic_name__ ):
SCREAMING_SNAKE_CASE_ = {0: "batch", 2: "past_sequence + sequence"}
SCREAMING_SNAKE_CASE_ = {0: "batch", 2: "past_sequence + sequence"}
return common_outputs
def __A ( self : int , __magic_name__ : PreTrainedTokenizer , __magic_name__ : int = -1 , __magic_name__ : int = -1 , __magic_name__ : bool = False , __magic_name__ : Optional[TensorType] = None , ) -> Mapping[str, Any]:
SCREAMING_SNAKE_CASE_ = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
# Generate decoder inputs
SCREAMING_SNAKE_CASE_ = seq_length if not self.use_past else 1
SCREAMING_SNAKE_CASE_ = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
SCREAMING_SNAKE_CASE_ = {F'''decoder_{name}''': tensor for name, tensor in decoder_inputs.items()}
SCREAMING_SNAKE_CASE_ = dict(**__magic_name__ , **__magic_name__ )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = common_inputs["input_ids"].shape
SCREAMING_SNAKE_CASE_ = common_inputs["decoder_input_ids"].shape[1]
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.num_attention_heads
SCREAMING_SNAKE_CASE_ = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
SCREAMING_SNAKE_CASE_ = decoder_seq_length + 3
SCREAMING_SNAKE_CASE_ = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
SCREAMING_SNAKE_CASE_ = torch.cat(
[common_inputs["decoder_attention_mask"], torch.ones(__magic_name__ , __magic_name__ )] , dim=1 )
SCREAMING_SNAKE_CASE_ = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.num_layers
SCREAMING_SNAKE_CASE_ = min(__magic_name__ , __magic_name__ )
SCREAMING_SNAKE_CASE_ = max(__magic_name__ , __magic_name__ ) - min_num_layers
SCREAMING_SNAKE_CASE_ = "encoder" if num_encoder_layers > num_decoder_layers else "decoder"
for _ in range(__magic_name__ ):
common_inputs["past_key_values"].append(
(
torch.zeros(__magic_name__ ),
torch.zeros(__magic_name__ ),
torch.zeros(__magic_name__ ),
torch.zeros(__magic_name__ ),
) )
# TODO: test this.
SCREAMING_SNAKE_CASE_ = encoder_shape if remaining_side_name == "encoder" else decoder_shape
for _ in range(__magic_name__ , __magic_name__ ):
common_inputs["past_key_values"].append((torch.zeros(__magic_name__ ), torch.zeros(__magic_name__ )) )
return common_inputs
def __A ( self : Union[str, Any] , __magic_name__ : PreTrainedTokenizer , __magic_name__ : int = -1 , __magic_name__ : int = -1 , __magic_name__ : bool = False , __magic_name__ : Optional[TensorType] = None , ) -> Mapping[str, Any]:
SCREAMING_SNAKE_CASE_ = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
SCREAMING_SNAKE_CASE_ = seqlen + 2
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.num_layers
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.num_attention_heads
SCREAMING_SNAKE_CASE_ = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
SCREAMING_SNAKE_CASE_ = common_inputs["attention_mask"].dtype
SCREAMING_SNAKE_CASE_ = torch.cat(
[common_inputs["attention_mask"], torch.ones(__magic_name__ , __magic_name__ , dtype=__magic_name__ )] , dim=1 )
SCREAMING_SNAKE_CASE_ = [
(torch.zeros(__magic_name__ ), torch.zeros(__magic_name__ )) for _ in range(__magic_name__ )
]
return common_inputs
def __A ( self : Dict , __magic_name__ : PreTrainedTokenizer , __magic_name__ : int = -1 , __magic_name__ : int = -1 , __magic_name__ : bool = False , __magic_name__ : Optional[TensorType] = None , ) -> Mapping[str, Any]:
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
SCREAMING_SNAKE_CASE_ = compute_effective_axis_dimension(
__magic_name__ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
SCREAMING_SNAKE_CASE_ = tokenizer.num_special_tokens_to_add(__magic_name__ )
SCREAMING_SNAKE_CASE_ = compute_effective_axis_dimension(
__magic_name__ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__magic_name__ )
# Generate dummy inputs according to compute batch and sequence
SCREAMING_SNAKE_CASE_ = [" ".join([tokenizer.unk_token] ) * seq_length] * batch_size
SCREAMING_SNAKE_CASE_ = dict(tokenizer(__magic_name__ , return_tensors=__magic_name__ ) )
return common_inputs
def __A ( self : Optional[Any] , __magic_name__ : PreTrainedTokenizer , __magic_name__ : int = -1 , __magic_name__ : int = -1 , __magic_name__ : bool = False , __magic_name__ : Optional[TensorType] = None , ) -> Mapping[str, Any]:
if self.task in ["default", "seq2seq-lm"]:
SCREAMING_SNAKE_CASE_ = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
__magic_name__ , batch_size=__magic_name__ , seq_length=__magic_name__ , is_pair=__magic_name__ , framework=__magic_name__ )
elif self.task == "causal-lm":
SCREAMING_SNAKE_CASE_ = self._generate_dummy_inputs_for_causal_lm(
__magic_name__ , batch_size=__magic_name__ , seq_length=__magic_name__ , is_pair=__magic_name__ , framework=__magic_name__ )
else:
SCREAMING_SNAKE_CASE_ = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__magic_name__ , batch_size=__magic_name__ , seq_length=__magic_name__ , is_pair=__magic_name__ , framework=__magic_name__ )
return common_inputs
def __A ( self : Optional[Any] , __magic_name__ : str , __magic_name__ : List[Any] , __magic_name__ : str , __magic_name__ : List[str] ) -> List[str]:
if self.task in ["default", "seq2seq-lm"]:
SCREAMING_SNAKE_CASE_ = super()._flatten_past_key_values_(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
else:
SCREAMING_SNAKE_CASE_ = super(__magic_name__ , self )._flatten_past_key_values_(
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
| 305
| 1
|
import argparse
import json
import os
import tensorstore as ts
import torch
from flax import serialization
from flax.traverse_util import flatten_dict, unflatten_dict
from tensorflow.io import gfile
from transformers.modeling_utils import dtype_byte_size
from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import (
rename_keys,
)
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
from transformers.utils.hub import convert_file_size_to_int
def a__ ( __UpperCamelCase , __UpperCamelCase ):
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3:
# expert layer
SCREAMING_SNAKE_CASE_ = flax_key_tuple[:-1] + ("weight",)
SCREAMING_SNAKE_CASE_ = torch.permute(__UpperCamelCase , (0, 2, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(__UpperCamelCase ):
# linear layer
SCREAMING_SNAKE_CASE_ = flax_key_tuple[:-1] + ("weight",)
SCREAMING_SNAKE_CASE_ = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
SCREAMING_SNAKE_CASE_ = flax_key_tuple[:-1] + ("weight",)
return flax_key_tuple, flax_tensor
def a__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
if "metadata" in layer:
SCREAMING_SNAKE_CASE_ = layer.split("metadata" )
SCREAMING_SNAKE_CASE_ = "".join(split_layer[0] )[:-1]
SCREAMING_SNAKE_CASE_ = [tuple(("metadata" + split_layer[1]).split("/" ) )]
elif "kvstore" in layer:
SCREAMING_SNAKE_CASE_ = layer.split("kvstore" )
SCREAMING_SNAKE_CASE_ = "".join(split_layer[0] )[:-1]
SCREAMING_SNAKE_CASE_ = [tuple(("kvstore" + split_layer[1]).split("/" ) )]
else:
SCREAMING_SNAKE_CASE_ = layer.split("/" )
SCREAMING_SNAKE_CASE_ = "/".join(split_layer[:-1] )
SCREAMING_SNAKE_CASE_ = (split_layer[-1],)
if "kvstore/path" in layer:
SCREAMING_SNAKE_CASE_ = F'''{switch_checkpoint_path}/{checkpoint_info[layer]}'''
elif "kvstore/driver" in layer:
SCREAMING_SNAKE_CASE_ = "file"
else:
SCREAMING_SNAKE_CASE_ = checkpoint_info[layer]
return curr_real_layer_name, split_layer, content
def a__ ( __UpperCamelCase , __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = rename_keys(__UpperCamelCase )
SCREAMING_SNAKE_CASE_ = {}
for k, v in current_block.items():
SCREAMING_SNAKE_CASE_ = v
SCREAMING_SNAKE_CASE_ = new_current_block
torch.save(__UpperCamelCase , __UpperCamelCase )
def a__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = WEIGHTS_NAME ):
SCREAMING_SNAKE_CASE_ = convert_file_size_to_int(__UpperCamelCase )
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = 0
os.makedirs(__UpperCamelCase , exist_ok=__UpperCamelCase )
with gfile.GFile(switch_checkpoint_path + "/checkpoint" , "rb" ) as fp:
SCREAMING_SNAKE_CASE_ = serialization.msgpack_restore(fp.read() )["optimizer"]["target"]
SCREAMING_SNAKE_CASE_ = flatten_dict(__UpperCamelCase , sep="/" )
SCREAMING_SNAKE_CASE_ = {}
for layer in checkpoint_info.keys():
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = get_key_and_tensorstore_dict(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
if curr_real_layer_name in all_layers:
SCREAMING_SNAKE_CASE_ = content
else:
SCREAMING_SNAKE_CASE_ = {split_layer[-1]: content}
for key in all_layers.keys():
# open tensorstore file
SCREAMING_SNAKE_CASE_ = ts.open(unflatten_dict(all_layers[key] ) ).result().read().result()
SCREAMING_SNAKE_CASE_ = torch.tensor(__UpperCamelCase )
SCREAMING_SNAKE_CASE_ = raw_weights.numel() * dtype_byte_size(raw_weights.dtype )
# use the renaming pattern from the small conversion scripts
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = rename_base_flax_keys(tuple(key.split("/" ) ) , __UpperCamelCase )
SCREAMING_SNAKE_CASE_ = "/".join(__UpperCamelCase )
# If this weight is going to tip up over the maximal size, we split.
if current_block_size + weight_size > max_shard_size:
SCREAMING_SNAKE_CASE_ = os.path.join(
__UpperCamelCase , weights_name.replace(".bin" , F'''-{len(__UpperCamelCase )+1:05d}-of-???.bin''' ) )
rename_and_save_block(__UpperCamelCase , __UpperCamelCase )
sharded_state_dicts.append(current_block.keys() )
del current_block
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = raw_weights.to(getattr(__UpperCamelCase , __UpperCamelCase ) )
current_block_size += weight_size
total_size += weight_size
# Add the last block
SCREAMING_SNAKE_CASE_ = os.path.join(__UpperCamelCase , weights_name.replace(".bin" , F'''-{len(__UpperCamelCase )+1:05d}-of-???.bin''' ) )
rename_and_save_block(__UpperCamelCase , __UpperCamelCase )
sharded_state_dicts.append(current_block.keys() )
# If we only have one shard, we return it
if len(__UpperCamelCase ) == 1:
return {weights_name: sharded_state_dicts[0]}, None
# Otherwise, let's build the index
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = {}
for idx, shard in enumerate(__UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = weights_name.replace(
".bin" , F'''-{idx+1:05d}-of-{len(__UpperCamelCase ):05d}.bin''' ) # len(sharded_state_dicts):05d}
SCREAMING_SNAKE_CASE_ = os.path.join(__UpperCamelCase , weights_name.replace(".bin" , F'''-{idx+1:05d}-of-???.bin''' ) )
os.rename(__UpperCamelCase , os.path.join(__UpperCamelCase , __UpperCamelCase ) )
SCREAMING_SNAKE_CASE_ = shard
for key in shard:
SCREAMING_SNAKE_CASE_ = shard_file
# Add the metadata
SCREAMING_SNAKE_CASE_ = {"total_size": total_size}
SCREAMING_SNAKE_CASE_ = {"metadata": metadata, "weight_map": weight_map}
with open(os.path.join(__UpperCamelCase , __UpperCamelCase ) , "w" , encoding="utf-8" ) as f:
SCREAMING_SNAKE_CASE_ = json.dumps(__UpperCamelCase , indent=2 , sort_keys=__UpperCamelCase ) + "\n"
f.write(__UpperCamelCase )
return metadata, index
if __name__ == "__main__":
A : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--switch_t5x_checkpoint_path",
default="/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600",
type=str,
required=False,
help="Path to a directory containing a folder per layer. Follows the original Google format.",
)
parser.add_argument("--max_shard_size", default="10GB", required=False, help="Max shard size")
parser.add_argument("--dtype", default="bfloat16", type=str, required=False, help="dtype of the saved model")
parser.add_argument(
"--pytorch_dump_folder_path",
default="/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted",
type=str,
required=False,
help="Path to the output pytorch model.",
)
A : Optional[Any] = parser.parse_args()
shard_on_the_fly(
args.switch_tax_checkpoint_path,
args.pytorch_dump_folder_path,
args.max_shard_size,
args.dtype,
)
def a__ ( ):
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer
SCREAMING_SNAKE_CASE_ = SwitchTransformersConfig.from_pretrained("google/switch-base-8" )
config.save_pretrained("/home/arthur_huggingface_co/transformers/switch_converted" )
SCREAMING_SNAKE_CASE_ = SwitchTransformersForConditionalGeneration.from_pretrained(
"/home/arthur_huggingface_co/transformers/switch_converted" , device_map="auto" )
SCREAMING_SNAKE_CASE_ = TaTokenizer.from_pretrained("t5-small" )
SCREAMING_SNAKE_CASE_ = "A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>."
SCREAMING_SNAKE_CASE_ = tokenizer(__UpperCamelCase , return_tensors="pt" ).input_ids
SCREAMING_SNAKE_CASE_ = model.generate(__UpperCamelCase , decoder_start_token_id=0 )
print(tokenizer.decode(out[0] ) )
| 305
|
import inspect
import unittest
from datasets import load_dataset
from packaging import version
from transformers import BeitConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_MAPPING,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
)
from transformers.models.beit.modeling_beit import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
import PIL
from PIL import Image
from transformers import BeitImageProcessor
class lowerCamelCase :
"""simple docstring"""
def __init__( self : List[Any] , __magic_name__ : List[str] , __magic_name__ : int=100 , __magic_name__ : Optional[Any]=13 , __magic_name__ : Dict=30 , __magic_name__ : Tuple=2 , __magic_name__ : str=3 , __magic_name__ : str=True , __magic_name__ : Optional[int]=True , __magic_name__ : Union[str, Any]=32 , __magic_name__ : Optional[int]=4 , __magic_name__ : Dict=4 , __magic_name__ : Tuple=37 , __magic_name__ : Any="gelu" , __magic_name__ : int=0.1 , __magic_name__ : List[str]=0.1 , __magic_name__ : Optional[int]=10 , __magic_name__ : Tuple=0.02 , __magic_name__ : Optional[int]=3 , __magic_name__ : List[str]=None , __magic_name__ : Tuple=[0, 1, 2, 3] , ) -> List[str]:
SCREAMING_SNAKE_CASE_ = parent
SCREAMING_SNAKE_CASE_ = 100
SCREAMING_SNAKE_CASE_ = batch_size
SCREAMING_SNAKE_CASE_ = image_size
SCREAMING_SNAKE_CASE_ = patch_size
SCREAMING_SNAKE_CASE_ = num_channels
SCREAMING_SNAKE_CASE_ = is_training
SCREAMING_SNAKE_CASE_ = use_labels
SCREAMING_SNAKE_CASE_ = hidden_size
SCREAMING_SNAKE_CASE_ = num_hidden_layers
SCREAMING_SNAKE_CASE_ = num_attention_heads
SCREAMING_SNAKE_CASE_ = intermediate_size
SCREAMING_SNAKE_CASE_ = hidden_act
SCREAMING_SNAKE_CASE_ = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ = type_sequence_label_size
SCREAMING_SNAKE_CASE_ = initializer_range
SCREAMING_SNAKE_CASE_ = scope
SCREAMING_SNAKE_CASE_ = out_indices
SCREAMING_SNAKE_CASE_ = num_labels
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
SCREAMING_SNAKE_CASE_ = (image_size // patch_size) ** 2
SCREAMING_SNAKE_CASE_ = num_patches + 1
def __A ( self : Any ) -> int:
SCREAMING_SNAKE_CASE_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = None
if self.use_labels:
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
SCREAMING_SNAKE_CASE_ = self.get_config()
return config, pixel_values, labels, pixel_labels
def __A ( self : Dict ) -> Optional[int]:
return BeitConfig(
vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__magic_name__ , initializer_range=self.initializer_range , out_indices=self.out_indices , )
def __A ( self : Optional[int] , __magic_name__ : List[str] , __magic_name__ : Optional[int] , __magic_name__ : Tuple , __magic_name__ : Tuple ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = BeitModel(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
SCREAMING_SNAKE_CASE_ = model(__magic_name__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __A ( self : Union[str, Any] , __magic_name__ : List[str] , __magic_name__ : int , __magic_name__ : int , __magic_name__ : str ) -> int:
SCREAMING_SNAKE_CASE_ = BeitForMaskedImageModeling(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
SCREAMING_SNAKE_CASE_ = model(__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) )
def __A ( self : Dict , __magic_name__ : List[Any] , __magic_name__ : int , __magic_name__ : Tuple , __magic_name__ : Optional[Any] ) -> int:
SCREAMING_SNAKE_CASE_ = self.type_sequence_label_size
SCREAMING_SNAKE_CASE_ = BeitForImageClassification(__magic_name__ )
model.to(__magic_name__ )
model.eval()
SCREAMING_SNAKE_CASE_ = model(__magic_name__ , labels=__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = BeitForImageClassification(__magic_name__ )
model.to(__magic_name__ )
model.eval()
SCREAMING_SNAKE_CASE_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE_ = model(__magic_name__ , labels=__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __A ( self : Tuple , __magic_name__ : Any , __magic_name__ : Optional[Any] , __magic_name__ : Tuple , __magic_name__ : int ) -> int:
SCREAMING_SNAKE_CASE_ = self.num_labels
SCREAMING_SNAKE_CASE_ = BeitForSemanticSegmentation(__magic_name__ )
model.to(__magic_name__ )
model.eval()
SCREAMING_SNAKE_CASE_ = model(__magic_name__ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
SCREAMING_SNAKE_CASE_ = model(__magic_name__ , labels=__magic_name__ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
def __A ( self : str ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = config_and_inputs
SCREAMING_SNAKE_CASE_ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class lowerCamelCase (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase__ = (
(BeitModel, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation)
if is_torch_available()
else ()
)
lowerCamelCase__ = (
{
'''feature-extraction''': BeitModel,
'''image-classification''': BeitForImageClassification,
'''image-segmentation''': BeitForSemanticSegmentation,
}
if is_torch_available()
else {}
)
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
def __A ( self : Tuple ) -> Any:
SCREAMING_SNAKE_CASE_ = BeitModelTester(self )
SCREAMING_SNAKE_CASE_ = ConfigTester(self , config_class=__magic_name__ , has_text_modality=__magic_name__ , hidden_size=37 )
def __A ( self : Dict ) -> List[Any]:
self.config_tester.run_common_tests()
@unittest.skip(reason="BEiT does not use inputs_embeds" )
def __A ( self : List[str] ) -> Optional[Any]:
pass
@require_torch_multi_gpu
@unittest.skip(reason="BEiT has some layers using `add_module` which doesn't work well with `nn.DataParallel`" )
def __A ( self : str ) -> List[str]:
pass
def __A ( self : List[Any] ) -> List[str]:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ = model_class(__magic_name__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
SCREAMING_SNAKE_CASE_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__magic_name__ , nn.Linear ) )
def __A ( self : Union[str, Any] ) -> int:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ = model_class(__magic_name__ )
SCREAMING_SNAKE_CASE_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE_ = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE_ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __magic_name__ )
def __A ( self : Tuple ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__magic_name__ )
def __A ( self : Dict ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__magic_name__ )
def __A ( self : Optional[int] ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__magic_name__ )
def __A ( self : Optional[Any] ) -> List[str]:
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__magic_name__ )
def __A ( self : int ) -> Optional[int]:
if not self.model_tester.is_training:
return
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if model_class in [*get_values(__magic_name__ ), BeitForMaskedImageModeling]:
continue
SCREAMING_SNAKE_CASE_ = model_class(__magic_name__ )
model.to(__magic_name__ )
model.train()
SCREAMING_SNAKE_CASE_ = self._prepare_for_class(__magic_name__ , __magic_name__ , return_labels=__magic_name__ )
SCREAMING_SNAKE_CASE_ = model(**__magic_name__ ).loss
loss.backward()
def __A ( self : Any ) -> Tuple:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if (
model_class in [*get_values(__magic_name__ ), BeitForMaskedImageModeling]
or not model_class.supports_gradient_checkpointing
):
continue
SCREAMING_SNAKE_CASE_ = model_class(__magic_name__ )
model.gradient_checkpointing_enable()
model.to(__magic_name__ )
model.train()
SCREAMING_SNAKE_CASE_ = self._prepare_for_class(__magic_name__ , __magic_name__ , return_labels=__magic_name__ )
SCREAMING_SNAKE_CASE_ = model(**__magic_name__ ).loss
loss.backward()
def __A ( self : List[str] ) -> str:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ = _config_zero_init(__magic_name__ )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ = model_class(config=__magic_name__ )
for name, param in model.named_parameters():
# we skip lambda parameters as these require special initial values
# determined by config.layer_scale_init_value
if "lambda" in name:
continue
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@slow
def __A ( self : int ) -> Optional[int]:
for model_name in BEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE_ = BeitModel.from_pretrained(__magic_name__ )
self.assertIsNotNone(__magic_name__ )
def a__ ( ):
SCREAMING_SNAKE_CASE_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class lowerCamelCase (unittest.TestCase ):
"""simple docstring"""
@cached_property
def __A ( self : List[Any] ) -> str:
return BeitImageProcessor.from_pretrained("microsoft/beit-base-patch16-224" ) if is_vision_available() else None
@slow
def __A ( self : List[str] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = BeitForMaskedImageModeling.from_pretrained("microsoft/beit-base-patch16-224-pt22k" ).to(__magic_name__ )
SCREAMING_SNAKE_CASE_ = self.default_image_processor
SCREAMING_SNAKE_CASE_ = prepare_img()
SCREAMING_SNAKE_CASE_ = image_processor(images=__magic_name__ , return_tensors="pt" ).pixel_values.to(__magic_name__ )
# prepare bool_masked_pos
SCREAMING_SNAKE_CASE_ = torch.ones((1, 196) , dtype=torch.bool ).to(__magic_name__ )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = model(pixel_values=__magic_name__ , bool_masked_pos=__magic_name__ )
SCREAMING_SNAKE_CASE_ = outputs.logits
# verify the logits
SCREAMING_SNAKE_CASE_ = torch.Size((1, 196, 8_192) )
self.assertEqual(logits.shape , __magic_name__ )
SCREAMING_SNAKE_CASE_ = torch.tensor(
[[-3.2437, 0.5072, -13.9174], [-3.2456, 0.4948, -13.9401], [-3.2033, 0.5121, -13.8550]] ).to(__magic_name__ )
self.assertTrue(torch.allclose(logits[bool_masked_pos][:3, :3] , __magic_name__ , atol=1e-2 ) )
@slow
def __A ( self : Tuple ) -> int:
SCREAMING_SNAKE_CASE_ = BeitForImageClassification.from_pretrained("microsoft/beit-base-patch16-224" ).to(__magic_name__ )
SCREAMING_SNAKE_CASE_ = self.default_image_processor
SCREAMING_SNAKE_CASE_ = prepare_img()
SCREAMING_SNAKE_CASE_ = image_processor(images=__magic_name__ , return_tensors="pt" ).to(__magic_name__ )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = model(**__magic_name__ )
SCREAMING_SNAKE_CASE_ = outputs.logits
# verify the logits
SCREAMING_SNAKE_CASE_ = torch.Size((1, 1_000) )
self.assertEqual(logits.shape , __magic_name__ )
SCREAMING_SNAKE_CASE_ = torch.tensor([-1.2385, -1.0987, -1.0108] ).to(__magic_name__ )
self.assertTrue(torch.allclose(logits[0, :3] , __magic_name__ , atol=1e-4 ) )
SCREAMING_SNAKE_CASE_ = 281
self.assertEqual(logits.argmax(-1 ).item() , __magic_name__ )
@slow
def __A ( self : Optional[Any] ) -> int:
SCREAMING_SNAKE_CASE_ = BeitForImageClassification.from_pretrained("microsoft/beit-large-patch16-224-pt22k-ft22k" ).to(
__magic_name__ )
SCREAMING_SNAKE_CASE_ = self.default_image_processor
SCREAMING_SNAKE_CASE_ = prepare_img()
SCREAMING_SNAKE_CASE_ = image_processor(images=__magic_name__ , return_tensors="pt" ).to(__magic_name__ )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = model(**__magic_name__ )
SCREAMING_SNAKE_CASE_ = outputs.logits
# verify the logits
SCREAMING_SNAKE_CASE_ = torch.Size((1, 21_841) )
self.assertEqual(logits.shape , __magic_name__ )
SCREAMING_SNAKE_CASE_ = torch.tensor([1.6881, -0.2787, 0.5901] ).to(__magic_name__ )
self.assertTrue(torch.allclose(logits[0, :3] , __magic_name__ , atol=1e-4 ) )
SCREAMING_SNAKE_CASE_ = 2_396
self.assertEqual(logits.argmax(-1 ).item() , __magic_name__ )
@slow
def __A ( self : Tuple ) -> Tuple:
SCREAMING_SNAKE_CASE_ = BeitForSemanticSegmentation.from_pretrained("microsoft/beit-base-finetuned-ade-640-640" )
SCREAMING_SNAKE_CASE_ = model.to(__magic_name__ )
SCREAMING_SNAKE_CASE_ = BeitImageProcessor(do_resize=__magic_name__ , size=640 , do_center_crop=__magic_name__ )
SCREAMING_SNAKE_CASE_ = load_dataset("hf-internal-testing/fixtures_ade20k" , split="test" )
SCREAMING_SNAKE_CASE_ = Image.open(ds[0]["file"] )
SCREAMING_SNAKE_CASE_ = image_processor(images=__magic_name__ , return_tensors="pt" ).to(__magic_name__ )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = model(**__magic_name__ )
SCREAMING_SNAKE_CASE_ = outputs.logits
# verify the logits
SCREAMING_SNAKE_CASE_ = torch.Size((1, 150, 160, 160) )
self.assertEqual(logits.shape , __magic_name__ )
SCREAMING_SNAKE_CASE_ = version.parse(PIL.__version__ ) < version.parse("9.0.0" )
if is_pillow_less_than_a:
SCREAMING_SNAKE_CASE_ = torch.tensor(
[
[[-4.9225, -2.3954, -3.0522], [-2.8822, -1.0046, -1.7561], [-2.9549, -1.3228, -2.1347]],
[[-5.8168, -3.4129, -4.0778], [-3.8651, -2.2214, -3.0277], [-3.8356, -2.4643, -3.3535]],
[[-0.0078, 3.9952, 4.0754], [2.9856, 4.6944, 5.0035], [3.2413, 4.7813, 4.9969]],
] , device=__magic_name__ , )
else:
SCREAMING_SNAKE_CASE_ = torch.tensor(
[
[[-4.8960, -2.3688, -3.0355], [-2.8478, -0.9836, -1.7418], [-2.9449, -1.3332, -2.1456]],
[[-5.8081, -3.4124, -4.1006], [-3.8561, -2.2081, -3.0323], [-3.8365, -2.4601, -3.3669]],
[[-0.0309, 3.9868, 4.0540], [2.9640, 4.6877, 4.9976], [3.2081, 4.7690, 4.9942]],
] , device=__magic_name__ , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , __magic_name__ , atol=1e-4 ) )
@slow
def __A ( self : List[str] ) -> Tuple:
SCREAMING_SNAKE_CASE_ = BeitForSemanticSegmentation.from_pretrained("microsoft/beit-base-finetuned-ade-640-640" )
SCREAMING_SNAKE_CASE_ = model.to(__magic_name__ )
SCREAMING_SNAKE_CASE_ = BeitImageProcessor(do_resize=__magic_name__ , size=640 , do_center_crop=__magic_name__ )
SCREAMING_SNAKE_CASE_ = load_dataset("hf-internal-testing/fixtures_ade20k" , split="test" )
SCREAMING_SNAKE_CASE_ = Image.open(ds[0]["file"] )
SCREAMING_SNAKE_CASE_ = image_processor(images=__magic_name__ , return_tensors="pt" ).to(__magic_name__ )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = model(**__magic_name__ )
SCREAMING_SNAKE_CASE_ = outputs.logits.detach().cpu()
SCREAMING_SNAKE_CASE_ = image_processor.post_process_semantic_segmentation(outputs=__magic_name__ , target_sizes=[(500, 300)] )
SCREAMING_SNAKE_CASE_ = torch.Size((500, 300) )
self.assertEqual(segmentation[0].shape , __magic_name__ )
SCREAMING_SNAKE_CASE_ = image_processor.post_process_semantic_segmentation(outputs=__magic_name__ )
SCREAMING_SNAKE_CASE_ = torch.Size((160, 160) )
self.assertEqual(segmentation[0].shape , __magic_name__ )
| 305
| 1
|
from collections.abc import Callable
class lowerCamelCase :
"""simple docstring"""
def __init__( self : Any , __magic_name__ : Callable | None = None ) -> None:
# Stores actual heap items.
SCREAMING_SNAKE_CASE_ = []
# Stores indexes of each item for supporting updates and deletion.
SCREAMING_SNAKE_CASE_ = {}
# Stores current size of heap.
SCREAMING_SNAKE_CASE_ = 0
# Stores function used to evaluate the score of an item on which basis ordering
# will be done.
SCREAMING_SNAKE_CASE_ = key or (lambda __magic_name__ : x)
def __A ( self : Any , __magic_name__ : int ) -> int | None:
return int((i - 1) / 2 ) if i > 0 else None
def __A ( self : List[Any] , __magic_name__ : int ) -> int | None:
SCREAMING_SNAKE_CASE_ = int(2 * i + 1 )
return left if 0 < left < self.size else None
def __A ( self : Tuple , __magic_name__ : int ) -> int | None:
SCREAMING_SNAKE_CASE_ = int(2 * i + 2 )
return right if 0 < right < self.size else None
def __A ( self : Union[str, Any] , __magic_name__ : int , __magic_name__ : int ) -> None:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = (
self.pos_map[self.arr[j][0]],
self.pos_map[self.arr[i][0]],
)
# Then swap the items in the list.
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.arr[j], self.arr[i]
def __A ( self : Optional[Any] , __magic_name__ : int , __magic_name__ : int ) -> bool:
return self.arr[i][1] < self.arr[j][1]
def __A ( self : int , __magic_name__ : int ) -> int:
SCREAMING_SNAKE_CASE_ = self._left(__magic_name__ )
SCREAMING_SNAKE_CASE_ = self._right(__magic_name__ )
SCREAMING_SNAKE_CASE_ = i
if left is not None and not self._cmp(__magic_name__ , __magic_name__ ):
SCREAMING_SNAKE_CASE_ = left
if right is not None and not self._cmp(__magic_name__ , __magic_name__ ):
SCREAMING_SNAKE_CASE_ = right
return valid_parent
def __A ( self : Any , __magic_name__ : int ) -> None:
SCREAMING_SNAKE_CASE_ = self._parent(__magic_name__ )
while parent is not None and not self._cmp(__magic_name__ , __magic_name__ ):
self._swap(__magic_name__ , __magic_name__ )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = parent, self._parent(__magic_name__ )
def __A ( self : List[Any] , __magic_name__ : int ) -> None:
SCREAMING_SNAKE_CASE_ = self._get_valid_parent(__magic_name__ )
while valid_parent != index:
self._swap(__magic_name__ , __magic_name__ )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = valid_parent, self._get_valid_parent(__magic_name__ )
def __A ( self : Optional[Any] , __magic_name__ : int , __magic_name__ : int ) -> None:
if item not in self.pos_map:
return
SCREAMING_SNAKE_CASE_ = self.pos_map[item]
SCREAMING_SNAKE_CASE_ = [item, self.key(__magic_name__ )]
# Make sure heap is right in both up and down direction.
# Ideally only one of them will make any change.
self._heapify_up(__magic_name__ )
self._heapify_down(__magic_name__ )
def __A ( self : Tuple , __magic_name__ : int ) -> None:
if item not in self.pos_map:
return
SCREAMING_SNAKE_CASE_ = self.pos_map[item]
del self.pos_map[item]
SCREAMING_SNAKE_CASE_ = self.arr[self.size - 1]
SCREAMING_SNAKE_CASE_ = index
self.size -= 1
# Make sure heap is right in both up and down direction. Ideally only one
# of them will make any change- so no performance loss in calling both.
if self.size > index:
self._heapify_up(__magic_name__ )
self._heapify_down(__magic_name__ )
def __A ( self : Union[str, Any] , __magic_name__ : int , __magic_name__ : int ) -> None:
SCREAMING_SNAKE_CASE_ = len(self.arr )
if arr_len == self.size:
self.arr.append([item, self.key(__magic_name__ )] )
else:
SCREAMING_SNAKE_CASE_ = [item, self.key(__magic_name__ )]
SCREAMING_SNAKE_CASE_ = self.size
self.size += 1
self._heapify_up(self.size - 1 )
def __A ( self : Optional[int] ) -> tuple | None:
return self.arr[0] if self.size else None
def __A ( self : Union[str, Any] ) -> tuple | None:
SCREAMING_SNAKE_CASE_ = self.get_top()
if top_item_tuple:
self.delete_item(top_item_tuple[0] )
return top_item_tuple
def a__ ( ):
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 305
|
from __future__ import annotations
A : Dict = "#"
class lowerCamelCase :
"""simple docstring"""
def __init__( self : Dict ) -> None:
SCREAMING_SNAKE_CASE_ = {}
def __A ( self : List[Any] , __magic_name__ : str ) -> None:
SCREAMING_SNAKE_CASE_ = self._trie
for char in text:
if char not in trie:
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = trie[char]
SCREAMING_SNAKE_CASE_ = True
def __A ( self : Union[str, Any] , __magic_name__ : str ) -> tuple | list:
SCREAMING_SNAKE_CASE_ = self._trie
for char in prefix:
if char in trie:
SCREAMING_SNAKE_CASE_ = trie[char]
else:
return []
return self._elements(__magic_name__ )
def __A ( self : int , __magic_name__ : dict ) -> tuple:
SCREAMING_SNAKE_CASE_ = []
for c, v in d.items():
SCREAMING_SNAKE_CASE_ = [" "] if c == END else [(c + s) for s in self._elements(__magic_name__ )]
result.extend(__magic_name__ )
return tuple(__magic_name__ )
A : Union[str, Any] = Trie()
A : Optional[int] = ("depart", "detergent", "daring", "dog", "deer", "deal")
for word in words:
trie.insert_word(word)
def a__ ( __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = trie.find_word(__UpperCamelCase )
return tuple(string + word for word in suffixes )
def a__ ( ):
print(autocomplete_using_trie("de" ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 305
| 1
|
import argparse
import torch
from transformers import (
SpeechTaConfig,
SpeechTaFeatureExtractor,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaProcessor,
SpeechTaTokenizer,
logging,
)
from transformers.tokenization_utils import AddedToken
logging.set_verbosity_info()
A : int = logging.get_logger("transformers.models.speecht5")
A : Any = {
"speech_encoder_prenet.layer_norm": "speecht5.encoder.prenet.feature_projection.layer_norm",
"speech_encoder_prenet.post_extract_proj": "speecht5.encoder.prenet.feature_projection.projection",
"speech_encoder_prenet.pos_conv.0": "speecht5.encoder.prenet.pos_conv_embed.conv",
"speech_encoder_prenet.mask_emb": "speecht5.encoder.prenet.masked_spec_embed",
}
A : List[str] = {
"text_encoder_prenet.encoder_prenet.0": "speecht5.encoder.prenet.embed_tokens",
"text_encoder_prenet.encoder_prenet.1.alpha": "speecht5.encoder.prenet.encode_positions.alpha",
}
A : Dict = {
"speech_decoder_prenet.decoder_prenet.0.0.prenet.0.0": "speecht5.decoder.prenet.layers.0",
"speech_decoder_prenet.decoder_prenet.0.0.prenet.1.0": "speecht5.decoder.prenet.layers.1",
"speech_decoder_prenet.decoder_prenet.0.1": "speecht5.decoder.prenet.final_layer",
"speech_decoder_prenet.decoder_prenet.1.alpha": "speecht5.decoder.prenet.encode_positions.alpha",
"speech_decoder_prenet.spkembs_layer.0": "speecht5.decoder.prenet.speaker_embeds_layer",
}
A : int = {
"speech_decoder_postnet.feat_out": "speech_decoder_postnet.feat_out",
"speech_decoder_postnet.prob_out": "speech_decoder_postnet.prob_out",
"speech_decoder_postnet.postnet.postnet.0.0": "speech_decoder_postnet.layers.0.conv",
"speech_decoder_postnet.postnet.postnet.0.1": "speech_decoder_postnet.layers.0.batch_norm",
"speech_decoder_postnet.postnet.postnet.1.0": "speech_decoder_postnet.layers.1.conv",
"speech_decoder_postnet.postnet.postnet.1.1": "speech_decoder_postnet.layers.1.batch_norm",
"speech_decoder_postnet.postnet.postnet.2.0": "speech_decoder_postnet.layers.2.conv",
"speech_decoder_postnet.postnet.postnet.2.1": "speech_decoder_postnet.layers.2.batch_norm",
"speech_decoder_postnet.postnet.postnet.3.0": "speech_decoder_postnet.layers.3.conv",
"speech_decoder_postnet.postnet.postnet.3.1": "speech_decoder_postnet.layers.3.batch_norm",
"speech_decoder_postnet.postnet.postnet.4.0": "speech_decoder_postnet.layers.4.conv",
"speech_decoder_postnet.postnet.postnet.4.1": "speech_decoder_postnet.layers.4.batch_norm",
}
A : Tuple = {
"text_decoder_prenet.embed_tokens": "speecht5.decoder.prenet.embed_tokens",
}
A : Optional[Any] = {
"text_decoder_postnet.output_projection": "text_decoder_postnet.lm_head",
}
A : Optional[Any] = {
"encoder.layers.*.self_attn.k_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.k_proj",
"encoder.layers.*.self_attn.v_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.v_proj",
"encoder.layers.*.self_attn.q_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.q_proj",
"encoder.layers.*.self_attn.out_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.out_proj",
"encoder.layers.*.self_attn_layer_norm": "speecht5.encoder.wrapped_encoder.layers.*.layer_norm",
"encoder.layers.*.fc1": "speecht5.encoder.wrapped_encoder.layers.*.feed_forward.intermediate_dense",
"encoder.layers.*.fc2": "speecht5.encoder.wrapped_encoder.layers.*.feed_forward.output_dense",
"encoder.layers.*.final_layer_norm": "speecht5.encoder.wrapped_encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "speecht5.encoder.wrapped_encoder.layer_norm",
"encoder.pos_emb.pe_k": "speecht5.encoder.wrapped_encoder.embed_positions.pe_k",
}
A : List[Any] = {
"decoder.layers.*.self_attn.k_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.k_proj",
"decoder.layers.*.self_attn.v_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.v_proj",
"decoder.layers.*.self_attn.q_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.q_proj",
"decoder.layers.*.self_attn.out_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.out_proj",
"decoder.layers.*.self_attn_layer_norm": "speecht5.decoder.wrapped_decoder.layers.*.self_attn_layer_norm",
"decoder.layers.*.encoder_attn.k_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.k_proj",
"decoder.layers.*.encoder_attn.v_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.v_proj",
"decoder.layers.*.encoder_attn.q_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.q_proj",
"decoder.layers.*.encoder_attn.out_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.out_proj",
"decoder.layers.*.encoder_attn_layer_norm": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn_layer_norm",
"decoder.layers.*.fc1": "speecht5.decoder.wrapped_decoder.layers.*.feed_forward.intermediate_dense",
"decoder.layers.*.fc2": "speecht5.decoder.wrapped_decoder.layers.*.feed_forward.output_dense",
"decoder.layers.*.final_layer_norm": "speecht5.decoder.wrapped_decoder.layers.*.final_layer_norm",
}
A : Optional[Any] = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_TEXT_DECODER_PRENET,
**MAPPING_TEXT_DECODER_POSTNET,
}
A : Dict = {
**MAPPING_TEXT_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
A : str = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
A : str = []
A : Any = [
"encoder.version",
"encoder.layers.*.norm_k.weight",
"encoder.layers.*.norm_k.bias",
"decoder.version",
"decoder.layers.*.norm_k.weight",
"decoder.layers.*.norm_k.bias",
"decoder.pos_emb.pe_k",
"speech_encoder_prenet.embed_positions._float_tensor",
"text_decoder_prenet.embed_positions._float_tensor",
]
A : str = IGNORE_KEYS + [
"encoder.proj",
"text_encoder_prenet.*",
"speech_decoder_prenet.*",
"speech_decoder_postnet.*",
]
A : int = IGNORE_KEYS + [
"encoder.proj",
"speech_encoder_prenet.*",
"text_decoder_prenet.*",
"text_decoder_postnet.*",
]
A : List[str] = IGNORE_KEYS + [
"encoder.proj",
"text_encoder_prenet.*",
"text_decoder_prenet.*",
"text_decoder_postnet.*",
]
def a__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
for attribute in key.split("." ):
SCREAMING_SNAKE_CASE_ = getattr(__UpperCamelCase , __UpperCamelCase )
if weight_type is not None:
SCREAMING_SNAKE_CASE_ = getattr(__UpperCamelCase , __UpperCamelCase ).shape
else:
SCREAMING_SNAKE_CASE_ = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}''' )
if weight_type == "weight":
SCREAMING_SNAKE_CASE_ = value
elif weight_type == "weight_g":
SCREAMING_SNAKE_CASE_ = value
elif weight_type == "weight_v":
SCREAMING_SNAKE_CASE_ = value
elif weight_type == "bias":
SCREAMING_SNAKE_CASE_ = value
elif weight_type == "running_mean":
SCREAMING_SNAKE_CASE_ = value
elif weight_type == "running_var":
SCREAMING_SNAKE_CASE_ = value
elif weight_type == "num_batches_tracked":
SCREAMING_SNAKE_CASE_ = value
else:
SCREAMING_SNAKE_CASE_ = value
logger.info(F'''{key + ("." + weight_type if weight_type is not None else "")} was initialized from {full_name}.''' )
def a__ ( __UpperCamelCase , __UpperCamelCase ):
for key in ignore_keys:
if key.endswith(".*" ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = key.split(".*." )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def a__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = []
if task == "s2t":
SCREAMING_SNAKE_CASE_ = hf_model.speechta.encoder.prenet.feature_encoder
SCREAMING_SNAKE_CASE_ = MAPPING_S2T
SCREAMING_SNAKE_CASE_ = IGNORE_KEYS_S2T
elif task == "t2s":
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = MAPPING_T2S
SCREAMING_SNAKE_CASE_ = IGNORE_KEYS_T2S
elif task == "s2s":
SCREAMING_SNAKE_CASE_ = hf_model.speechta.encoder.prenet.feature_encoder
SCREAMING_SNAKE_CASE_ = MAPPING_S2S
SCREAMING_SNAKE_CASE_ = IGNORE_KEYS_S2S
else:
raise ValueError(F'''Unsupported task: {task}''' )
for name, value in fairseq_dict.items():
if should_ignore(__UpperCamelCase , __UpperCamelCase ):
logger.info(F'''{name} was ignored''' )
continue
SCREAMING_SNAKE_CASE_ = False
if "conv_layers" in name:
load_conv_layer(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , hf_model.config.feat_extract_norm == "group" , )
SCREAMING_SNAKE_CASE_ = True
else:
for key, mapped_key in MAPPING.items():
# mapped_key = "speecht5." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if "*" in key:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = key.split(".*." )
if prefix in name and suffix in name:
SCREAMING_SNAKE_CASE_ = suffix
# if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]:
if key in name:
SCREAMING_SNAKE_CASE_ = True
if "*" in mapped_key:
SCREAMING_SNAKE_CASE_ = name.split(__UpperCamelCase )[0].split("." )[-2]
SCREAMING_SNAKE_CASE_ = mapped_key.replace("*" , __UpperCamelCase )
if "weight_g" in name:
SCREAMING_SNAKE_CASE_ = "weight_g"
elif "weight_v" in name:
SCREAMING_SNAKE_CASE_ = "weight_v"
elif "bias" in name:
SCREAMING_SNAKE_CASE_ = "bias"
elif "weight" in name:
SCREAMING_SNAKE_CASE_ = "weight"
elif "running_mean" in name:
SCREAMING_SNAKE_CASE_ = "running_mean"
elif "running_var" in name:
SCREAMING_SNAKE_CASE_ = "running_var"
elif "num_batches_tracked" in name:
SCREAMING_SNAKE_CASE_ = "num_batches_tracked"
else:
SCREAMING_SNAKE_CASE_ = None
set_recursively(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
continue
if not is_used:
unused_weights.append(__UpperCamelCase )
logger.warning(F'''Unused weights: {unused_weights}''' )
def a__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = full_name.split("conv_layers." )[-1]
SCREAMING_SNAKE_CASE_ = name.split("." )
SCREAMING_SNAKE_CASE_ = int(items[0] )
SCREAMING_SNAKE_CASE_ = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' )
SCREAMING_SNAKE_CASE_ = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' )
SCREAMING_SNAKE_CASE_ = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.''' )
SCREAMING_SNAKE_CASE_ = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.''' )
SCREAMING_SNAKE_CASE_ = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(__UpperCamelCase )
@torch.no_grad()
def a__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=None , ):
if config_path is not None:
SCREAMING_SNAKE_CASE_ = SpeechTaConfig.from_pretrained(__UpperCamelCase )
else:
SCREAMING_SNAKE_CASE_ = SpeechTaConfig()
if task == "s2t":
SCREAMING_SNAKE_CASE_ = config.max_text_positions
SCREAMING_SNAKE_CASE_ = SpeechTaForSpeechToText(__UpperCamelCase )
elif task == "t2s":
SCREAMING_SNAKE_CASE_ = 1_8_7_6
SCREAMING_SNAKE_CASE_ = 6_0_0
SCREAMING_SNAKE_CASE_ = config.max_speech_positions
SCREAMING_SNAKE_CASE_ = SpeechTaForTextToSpeech(__UpperCamelCase )
elif task == "s2s":
SCREAMING_SNAKE_CASE_ = 1_8_7_6
SCREAMING_SNAKE_CASE_ = config.max_speech_positions
SCREAMING_SNAKE_CASE_ = SpeechTaForSpeechToSpeech(__UpperCamelCase )
else:
raise ValueError(F'''Unknown task name: {task}''' )
if vocab_path:
SCREAMING_SNAKE_CASE_ = SpeechTaTokenizer(__UpperCamelCase , model_max_length=config.max_text_positions )
# Mask token behaves like a normal word, i.e. include the space before it
SCREAMING_SNAKE_CASE_ = AddedToken("<mask>" , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase )
SCREAMING_SNAKE_CASE_ = mask_token
tokenizer.add_special_tokens({"mask_token": mask_token} )
tokenizer.add_tokens(["<ctc_blank>"] )
SCREAMING_SNAKE_CASE_ = SpeechTaFeatureExtractor()
SCREAMING_SNAKE_CASE_ = SpeechTaProcessor(tokenizer=__UpperCamelCase , feature_extractor=__UpperCamelCase )
processor.save_pretrained(__UpperCamelCase )
SCREAMING_SNAKE_CASE_ = torch.load(__UpperCamelCase )
recursively_load_weights(fairseq_checkpoint["model"] , __UpperCamelCase , __UpperCamelCase )
model.save_pretrained(__UpperCamelCase )
if repo_id:
print("Pushing to the hub..." )
processor.push_to_hub(__UpperCamelCase )
model.push_to_hub(__UpperCamelCase )
if __name__ == "__main__":
A : Optional[Any] = argparse.ArgumentParser()
parser.add_argument(
"--task",
default="s2t",
type=str,
help="Type of the SpeechT5 model you'd like to convert. Should be one of 's2t', 't2s', 's2s'.",
)
parser.add_argument("--checkpoint_path", required=True, default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--vocab_path", default=None, type=str, help="Path to SentencePiece model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--pytorch_dump_folder_path", required=True, default=None, type=str, help="Path to the output PyTorch model."
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
A : int = parser.parse_args()
convert_speechta_checkpoint(
args.task,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.vocab_path,
args.push_to_hub,
)
| 305
|
from collections import deque
class lowerCamelCase :
"""simple docstring"""
def __init__( self : str , __magic_name__ : str , __magic_name__ : int , __magic_name__ : int ) -> None:
SCREAMING_SNAKE_CASE_ = process_name # process name
SCREAMING_SNAKE_CASE_ = arrival_time # arrival time of the process
# completion time of finished process or last interrupted time
SCREAMING_SNAKE_CASE_ = arrival_time
SCREAMING_SNAKE_CASE_ = burst_time # remaining burst time
SCREAMING_SNAKE_CASE_ = 0 # total time of the process wait in ready queue
SCREAMING_SNAKE_CASE_ = 0 # time from arrival time to completion time
class lowerCamelCase :
"""simple docstring"""
def __init__( self : Tuple , __magic_name__ : int , __magic_name__ : list[int] , __magic_name__ : deque[Process] , __magic_name__ : int , ) -> None:
# total number of mlfq's queues
SCREAMING_SNAKE_CASE_ = number_of_queues
# time slice of queues that round robin algorithm applied
SCREAMING_SNAKE_CASE_ = time_slices
# unfinished process is in this ready_queue
SCREAMING_SNAKE_CASE_ = queue
# current time
SCREAMING_SNAKE_CASE_ = current_time
# finished process is in this sequence queue
SCREAMING_SNAKE_CASE_ = deque()
def __A ( self : Dict ) -> list[str]:
SCREAMING_SNAKE_CASE_ = []
for i in range(len(self.finish_queue ) ):
sequence.append(self.finish_queue[i].process_name )
return sequence
def __A ( self : List[str] , __magic_name__ : list[Process] ) -> list[int]:
SCREAMING_SNAKE_CASE_ = []
for i in range(len(__magic_name__ ) ):
waiting_times.append(queue[i].waiting_time )
return waiting_times
def __A ( self : List[str] , __magic_name__ : list[Process] ) -> list[int]:
SCREAMING_SNAKE_CASE_ = []
for i in range(len(__magic_name__ ) ):
turnaround_times.append(queue[i].turnaround_time )
return turnaround_times
def __A ( self : Tuple , __magic_name__ : list[Process] ) -> list[int]:
SCREAMING_SNAKE_CASE_ = []
for i in range(len(__magic_name__ ) ):
completion_times.append(queue[i].stop_time )
return completion_times
def __A ( self : str , __magic_name__ : deque[Process] ) -> list[int]:
return [q.burst_time for q in queue]
def __A ( self : Optional[Any] , __magic_name__ : Process ) -> int:
process.waiting_time += self.current_time - process.stop_time
return process.waiting_time
def __A ( self : Optional[Any] , __magic_name__ : deque[Process] ) -> deque[Process]:
SCREAMING_SNAKE_CASE_ = deque() # sequence deque of finished process
while len(__magic_name__ ) != 0:
SCREAMING_SNAKE_CASE_ = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of current process
self.update_waiting_time(__magic_name__ )
# update current time
self.current_time += cp.burst_time
# finish the process and set the process's burst-time 0
SCREAMING_SNAKE_CASE_ = 0
# set the process's turnaround time because it is finished
SCREAMING_SNAKE_CASE_ = self.current_time - cp.arrival_time
# set the completion time
SCREAMING_SNAKE_CASE_ = self.current_time
# add the process to queue that has finished queue
finished.append(__magic_name__ )
self.finish_queue.extend(__magic_name__ ) # add finished process to finish queue
# FCFS will finish all remaining processes
return finished
def __A ( self : Any , __magic_name__ : deque[Process] , __magic_name__ : int ) -> tuple[deque[Process], deque[Process]]:
SCREAMING_SNAKE_CASE_ = deque() # sequence deque of terminated process
# just for 1 cycle and unfinished processes will go back to queue
for _ in range(len(__magic_name__ ) ):
SCREAMING_SNAKE_CASE_ = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of unfinished processes
self.update_waiting_time(__magic_name__ )
# if the burst time of process is bigger than time-slice
if cp.burst_time > time_slice:
# use CPU for only time-slice
self.current_time += time_slice
# update remaining burst time
cp.burst_time -= time_slice
# update end point time
SCREAMING_SNAKE_CASE_ = self.current_time
# locate the process behind the queue because it is not finished
ready_queue.append(__magic_name__ )
else:
# use CPU for remaining burst time
self.current_time += cp.burst_time
# set burst time 0 because the process is finished
SCREAMING_SNAKE_CASE_ = 0
# set the finish time
SCREAMING_SNAKE_CASE_ = self.current_time
# update the process' turnaround time because it is finished
SCREAMING_SNAKE_CASE_ = self.current_time - cp.arrival_time
# add the process to queue that has finished queue
finished.append(__magic_name__ )
self.finish_queue.extend(__magic_name__ ) # add finished process to finish queue
# return finished processes queue and remaining processes queue
return finished, ready_queue
def __A ( self : Any ) -> deque[Process]:
# all queues except last one have round_robin algorithm
for i in range(self.number_of_queues - 1 ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.round_robin(
self.ready_queue , self.time_slices[i] )
# the last queue has first_come_first_served algorithm
self.first_come_first_served(self.ready_queue )
return self.finish_queue
if __name__ == "__main__":
import doctest
A : Dict = Process("P1", 0, 53)
A : str = Process("P2", 0, 17)
A : List[Any] = Process("P3", 0, 68)
A : List[str] = Process("P4", 0, 24)
A : Dict = 3
A : Any = [17, 25]
A : Dict = deque([Pa, Pa, Pa, Pa])
if len(time_slices) != number_of_queues - 1:
raise SystemExit(0)
doctest.testmod(extraglobs={"queue": deque([Pa, Pa, Pa, Pa])})
A : Union[str, Any] = Process("P1", 0, 53)
A : Any = Process("P2", 0, 17)
A : Dict = Process("P3", 0, 68)
A : List[str] = Process("P4", 0, 24)
A : Optional[int] = 3
A : int = [17, 25]
A : Union[str, Any] = deque([Pa, Pa, Pa, Pa])
A : Tuple = MLFQ(number_of_queues, time_slices, queue, 0)
A : Tuple = mlfq.multi_level_feedback_queue()
# print total waiting times of processes(P1, P2, P3, P4)
print(
f"waiting time:\
\t\t\t{MLFQ.calculate_waiting_time(mlfq, [Pa, Pa, Pa, Pa])}"
)
# print completion times of processes(P1, P2, P3, P4)
print(
f"completion time:\
\t\t{MLFQ.calculate_completion_time(mlfq, [Pa, Pa, Pa, Pa])}"
)
# print total turnaround times of processes(P1, P2, P3, P4)
print(
f"turnaround time:\
\t\t{MLFQ.calculate_turnaround_time(mlfq, [Pa, Pa, Pa, Pa])}"
)
# print sequence of finished processes
print(
f"sequence of finished processes:\
{mlfq.calculate_sequence_of_finish_queue()}"
)
| 305
| 1
|
from __future__ import annotations
A : str = 1.6_021E-19 # units = C
def a__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , ):
if (conductivity, electron_conc, mobility).count(0 ) != 1:
raise ValueError("You cannot supply more or less than 2 values" )
elif conductivity < 0:
raise ValueError("Conductivity cannot be negative" )
elif electron_conc < 0:
raise ValueError("Electron concentration cannot be negative" )
elif mobility < 0:
raise ValueError("mobility cannot be negative" )
elif conductivity == 0:
return (
"conductivity",
mobility * electron_conc * ELECTRON_CHARGE,
)
elif electron_conc == 0:
return (
"electron_conc",
conductivity / (mobility * ELECTRON_CHARGE),
)
else:
return (
"mobility",
conductivity / (electron_conc * ELECTRON_CHARGE),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 305
|
import torch
def a__ ( ):
if torch.cuda.is_available():
SCREAMING_SNAKE_CASE_ = torch.cuda.device_count()
else:
SCREAMING_SNAKE_CASE_ = 0
print(F'''Successfully ran on {num_gpus} GPUs''' )
if __name__ == "__main__":
main()
| 305
| 1
|
import argparse
from collections import defaultdict
import yaml
A : Tuple = "docs/source/en/_toctree.yml"
def a__ ( __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = defaultdict(__UpperCamelCase )
for doc in model_doc:
counts[doc["local"]] += 1
SCREAMING_SNAKE_CASE_ = [key for key, value in counts.items() if value > 1]
SCREAMING_SNAKE_CASE_ = []
for duplicate_key in duplicates:
SCREAMING_SNAKE_CASE_ = list({doc["title"] for doc in model_doc if doc["local"] == duplicate_key} )
if len(__UpperCamelCase ) > 1:
raise ValueError(
F'''{duplicate_key} is present several times in the documentation table of content at '''
"`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the "
"others." )
# Only add this once
new_doc.append({"local": duplicate_key, "title": titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in model_doc if counts[doc["local"]] == 1] )
# Sort
return sorted(__UpperCamelCase , key=lambda __UpperCamelCase : s["title"].lower() )
def a__ ( __UpperCamelCase=False ):
with open(__UpperCamelCase , encoding="utf-8" ) as f:
SCREAMING_SNAKE_CASE_ = yaml.safe_load(f.read() )
# Get to the API doc
SCREAMING_SNAKE_CASE_ = 0
while content[api_idx]["title"] != "API":
api_idx += 1
SCREAMING_SNAKE_CASE_ = content[api_idx]["sections"]
# Then to the model doc
SCREAMING_SNAKE_CASE_ = 0
while api_doc[model_idx]["title"] != "Models":
model_idx += 1
SCREAMING_SNAKE_CASE_ = api_doc[model_idx]["sections"]
SCREAMING_SNAKE_CASE_ = [(idx, section) for idx, section in enumerate(__UpperCamelCase ) if "sections" in section]
SCREAMING_SNAKE_CASE_ = False
for idx, modality_doc in modalities_docs:
SCREAMING_SNAKE_CASE_ = modality_doc["sections"]
SCREAMING_SNAKE_CASE_ = clean_model_doc_toc(__UpperCamelCase )
if old_modality_doc != new_modality_doc:
SCREAMING_SNAKE_CASE_ = True
if overwrite:
SCREAMING_SNAKE_CASE_ = new_modality_doc
if diff:
if overwrite:
SCREAMING_SNAKE_CASE_ = model_doc
SCREAMING_SNAKE_CASE_ = api_doc
with open(__UpperCamelCase , "w" , encoding="utf-8" ) as f:
f.write(yaml.dump(__UpperCamelCase , allow_unicode=__UpperCamelCase ) )
else:
raise ValueError(
"The model doc part of the table of content is not properly sorted, run `make style` to fix this." )
if __name__ == "__main__":
A : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
A : Optional[int] = parser.parse_args()
check_model_doc(args.fix_and_overwrite)
| 305
|
from collections.abc import Generator
from math import sin
def a__ ( __UpperCamelCase ):
if len(__UpperCamelCase ) != 3_2:
raise ValueError("Input must be of length 32" )
SCREAMING_SNAKE_CASE_ = b""
for i in [3, 2, 1, 0]:
little_endian += string_aa[8 * i : 8 * i + 8]
return little_endian
def a__ ( __UpperCamelCase ):
if i < 0:
raise ValueError("Input must be non-negative" )
SCREAMING_SNAKE_CASE_ = format(__UpperCamelCase , "08x" )[-8:]
SCREAMING_SNAKE_CASE_ = b""
for i in [3, 2, 1, 0]:
little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode("utf-8" )
return little_endian_hex
def a__ ( __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = b""
for char in message:
bit_string += format(__UpperCamelCase , "08b" ).encode("utf-8" )
SCREAMING_SNAKE_CASE_ = format(len(__UpperCamelCase ) , "064b" ).encode("utf-8" )
# Pad bit_string to a multiple of 512 chars
bit_string += b"1"
while len(__UpperCamelCase ) % 5_1_2 != 4_4_8:
bit_string += b"0"
bit_string += to_little_endian(start_len[3_2:] ) + to_little_endian(start_len[:3_2] )
return bit_string
def a__ ( __UpperCamelCase ):
if len(__UpperCamelCase ) % 5_1_2 != 0:
raise ValueError("Input must have length that's a multiple of 512" )
for pos in range(0 , len(__UpperCamelCase ) , 5_1_2 ):
SCREAMING_SNAKE_CASE_ = bit_string[pos : pos + 5_1_2]
SCREAMING_SNAKE_CASE_ = []
for i in range(0 , 5_1_2 , 3_2 ):
block_words.append(int(to_little_endian(block[i : i + 3_2] ) , 2 ) )
yield block_words
def a__ ( __UpperCamelCase ):
if i < 0:
raise ValueError("Input must be non-negative" )
SCREAMING_SNAKE_CASE_ = format(__UpperCamelCase , "032b" )
SCREAMING_SNAKE_CASE_ = ""
for c in i_str:
new_str += "1" if c == "0" else "0"
return int(__UpperCamelCase , 2 )
def a__ ( __UpperCamelCase , __UpperCamelCase ):
return (a + b) % 2**3_2
def a__ ( __UpperCamelCase , __UpperCamelCase ):
if i < 0:
raise ValueError("Input must be non-negative" )
if shift < 0:
raise ValueError("Shift must be non-negative" )
return ((i << shift) ^ (i >> (3_2 - shift))) % 2**3_2
def a__ ( __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = preprocess(__UpperCamelCase )
SCREAMING_SNAKE_CASE_ = [int(2**3_2 * abs(sin(i + 1 ) ) ) for i in range(6_4 )]
# Starting states
SCREAMING_SNAKE_CASE_ = 0X67452301
SCREAMING_SNAKE_CASE_ = 0Xefcdab89
SCREAMING_SNAKE_CASE_ = 0X98badcfe
SCREAMING_SNAKE_CASE_ = 0X10325476
SCREAMING_SNAKE_CASE_ = [
7,
1_2,
1_7,
2_2,
7,
1_2,
1_7,
2_2,
7,
1_2,
1_7,
2_2,
7,
1_2,
1_7,
2_2,
5,
9,
1_4,
2_0,
5,
9,
1_4,
2_0,
5,
9,
1_4,
2_0,
5,
9,
1_4,
2_0,
4,
1_1,
1_6,
2_3,
4,
1_1,
1_6,
2_3,
4,
1_1,
1_6,
2_3,
4,
1_1,
1_6,
2_3,
6,
1_0,
1_5,
2_1,
6,
1_0,
1_5,
2_1,
6,
1_0,
1_5,
2_1,
6,
1_0,
1_5,
2_1,
]
# Process bit string in chunks, each with 16 32-char words
for block_words in get_block_words(__UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = aa
SCREAMING_SNAKE_CASE_ = ba
SCREAMING_SNAKE_CASE_ = ca
SCREAMING_SNAKE_CASE_ = da
# Hash current chunk
for i in range(6_4 ):
if i <= 1_5:
# f = (b & c) | (not_32(b) & d) # Alternate definition for f
SCREAMING_SNAKE_CASE_ = d ^ (b & (c ^ d))
SCREAMING_SNAKE_CASE_ = i
elif i <= 3_1:
# f = (d & b) | (not_32(d) & c) # Alternate definition for f
SCREAMING_SNAKE_CASE_ = c ^ (d & (b ^ c))
SCREAMING_SNAKE_CASE_ = (5 * i + 1) % 1_6
elif i <= 4_7:
SCREAMING_SNAKE_CASE_ = b ^ c ^ d
SCREAMING_SNAKE_CASE_ = (3 * i + 5) % 1_6
else:
SCREAMING_SNAKE_CASE_ = c ^ (b | not_aa(__UpperCamelCase ))
SCREAMING_SNAKE_CASE_ = (7 * i) % 1_6
SCREAMING_SNAKE_CASE_ = (f + a + added_consts[i] + block_words[g]) % 2**3_2
SCREAMING_SNAKE_CASE_ = d
SCREAMING_SNAKE_CASE_ = c
SCREAMING_SNAKE_CASE_ = b
SCREAMING_SNAKE_CASE_ = sum_aa(__UpperCamelCase , left_rotate_aa(__UpperCamelCase , shift_amounts[i] ) )
# Add hashed chunk to running total
SCREAMING_SNAKE_CASE_ = sum_aa(__UpperCamelCase , __UpperCamelCase )
SCREAMING_SNAKE_CASE_ = sum_aa(__UpperCamelCase , __UpperCamelCase )
SCREAMING_SNAKE_CASE_ = sum_aa(__UpperCamelCase , __UpperCamelCase )
SCREAMING_SNAKE_CASE_ = sum_aa(__UpperCamelCase , __UpperCamelCase )
SCREAMING_SNAKE_CASE_ = reformat_hex(__UpperCamelCase ) + reformat_hex(__UpperCamelCase ) + reformat_hex(__UpperCamelCase ) + reformat_hex(__UpperCamelCase )
return digest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 305
| 1
|
import os
from typing import Dict, List, Union
import tensorflow as tf
from keras_nlp.tokenizers import BytePairTokenizer
from tensorflow_text import pad_model_inputs
from .tokenization_gpta import GPTaTokenizer
class lowerCamelCase (tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self : Optional[int] , __magic_name__ : Dict[str, int] , __magic_name__ : List[str] , __magic_name__ : int = None , __magic_name__ : int = None ) -> Dict:
super().__init__()
SCREAMING_SNAKE_CASE_ = pad_token_id
SCREAMING_SNAKE_CASE_ = max_length
SCREAMING_SNAKE_CASE_ = vocab
SCREAMING_SNAKE_CASE_ = merges
SCREAMING_SNAKE_CASE_ = BytePairTokenizer(__magic_name__ , __magic_name__ , sequence_length=__magic_name__ )
@classmethod
def __A ( cls : List[Any] , __magic_name__ : GPTaTokenizer , *__magic_name__ : str , **__magic_name__ : List[Any] ) -> Dict:
SCREAMING_SNAKE_CASE_ = [" ".join(__magic_name__ ) for m in tokenizer.bpe_ranks.keys()]
SCREAMING_SNAKE_CASE_ = tokenizer.get_vocab()
return cls(__magic_name__ , __magic_name__ , *__magic_name__ , **__magic_name__ )
@classmethod
def __A ( cls : int , __magic_name__ : Union[str, os.PathLike] , *__magic_name__ : Optional[Any] , **__magic_name__ : Union[str, Any] ) -> str:
SCREAMING_SNAKE_CASE_ = GPTaTokenizer.from_pretrained(__magic_name__ , *__magic_name__ , **__magic_name__ )
return cls.from_tokenizer(__magic_name__ , *__magic_name__ , **__magic_name__ )
@classmethod
def __A ( cls : List[Any] , __magic_name__ : Tuple ) -> Union[str, Any]:
return cls(**__magic_name__ )
def __A ( self : Any ) -> Dict:
return {
"vocab": self.vocab,
"merges": self.merges,
"max_length": self.max_length,
"pad_token_id": self.pad_token_id,
}
def __A ( self : Optional[int] , __magic_name__ : Union[str, Any] , __magic_name__ : int = None ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = self.tf_tokenizer(__magic_name__ )
SCREAMING_SNAKE_CASE_ = tf.ones_like(__magic_name__ )
if self.pad_token_id is not None:
# pad the tokens up to max length
SCREAMING_SNAKE_CASE_ = max_length if max_length is not None else self.max_length
if max_length is not None:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = pad_model_inputs(
__magic_name__ , max_seq_length=__magic_name__ , pad_value=self.pad_token_id )
return {"attention_mask": attention_mask, "input_ids": input_ids}
| 305
|
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BlipaProcessor, BlipImageProcessor, GPTaTokenizer, PreTrainedTokenizerFast
@require_vision
class lowerCamelCase (unittest.TestCase ):
"""simple docstring"""
def __A ( self : int ) -> Any:
SCREAMING_SNAKE_CASE_ = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE_ = BlipImageProcessor()
SCREAMING_SNAKE_CASE_ = GPTaTokenizer.from_pretrained("hf-internal-testing/tiny-random-GPT2Model" )
SCREAMING_SNAKE_CASE_ = BlipaProcessor(__magic_name__ , __magic_name__ )
processor.save_pretrained(self.tmpdirname )
def __A ( self : str , **__magic_name__ : int ) -> Union[str, Any]:
return AutoProcessor.from_pretrained(self.tmpdirname , **__magic_name__ ).tokenizer
def __A ( self : Dict , **__magic_name__ : List[Any] ) -> int:
return AutoProcessor.from_pretrained(self.tmpdirname , **__magic_name__ ).image_processor
def __A ( self : int ) -> Any:
shutil.rmtree(self.tmpdirname )
def __A ( self : Dict ) -> Dict:
SCREAMING_SNAKE_CASE_ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
SCREAMING_SNAKE_CASE_ = [Image.fromarray(np.moveaxis(__magic_name__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __A ( self : List[Any] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = BlipaProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE_ = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
SCREAMING_SNAKE_CASE_ = self.get_image_processor(do_normalize=__magic_name__ , padding_value=1.0 )
SCREAMING_SNAKE_CASE_ = BlipaProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=__magic_name__ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __magic_name__ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __magic_name__ )
def __A ( self : Tuple ) -> int:
SCREAMING_SNAKE_CASE_ = self.get_image_processor()
SCREAMING_SNAKE_CASE_ = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ = BlipaProcessor(tokenizer=__magic_name__ , image_processor=__magic_name__ )
SCREAMING_SNAKE_CASE_ = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE_ = image_processor(__magic_name__ , return_tensors="np" )
SCREAMING_SNAKE_CASE_ = processor(images=__magic_name__ , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def __A ( self : str ) -> Tuple:
SCREAMING_SNAKE_CASE_ = self.get_image_processor()
SCREAMING_SNAKE_CASE_ = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ = BlipaProcessor(tokenizer=__magic_name__ , image_processor=__magic_name__ )
SCREAMING_SNAKE_CASE_ = "lower newer"
SCREAMING_SNAKE_CASE_ = processor(text=__magic_name__ )
SCREAMING_SNAKE_CASE_ = tokenizer(__magic_name__ , return_token_type_ids=__magic_name__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __A ( self : Dict ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = self.get_image_processor()
SCREAMING_SNAKE_CASE_ = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ = BlipaProcessor(tokenizer=__magic_name__ , image_processor=__magic_name__ )
SCREAMING_SNAKE_CASE_ = "lower newer"
SCREAMING_SNAKE_CASE_ = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE_ = processor(text=__magic_name__ , images=__magic_name__ )
self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "input_ids", "attention_mask"] )
# test if it raises when no input is passed
with pytest.raises(__magic_name__ ):
processor()
def __A ( self : Dict ) -> Tuple:
SCREAMING_SNAKE_CASE_ = self.get_image_processor()
SCREAMING_SNAKE_CASE_ = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ = BlipaProcessor(tokenizer=__magic_name__ , image_processor=__magic_name__ )
SCREAMING_SNAKE_CASE_ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
SCREAMING_SNAKE_CASE_ = processor.batch_decode(__magic_name__ )
SCREAMING_SNAKE_CASE_ = tokenizer.batch_decode(__magic_name__ )
self.assertListEqual(__magic_name__ , __magic_name__ )
def __A ( self : List[str] ) -> int:
SCREAMING_SNAKE_CASE_ = self.get_image_processor()
SCREAMING_SNAKE_CASE_ = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ = BlipaProcessor(tokenizer=__magic_name__ , image_processor=__magic_name__ )
SCREAMING_SNAKE_CASE_ = "lower newer"
SCREAMING_SNAKE_CASE_ = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE_ = processor(text=__magic_name__ , images=__magic_name__ )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "input_ids", "attention_mask"] )
| 305
| 1
|
from __future__ import annotations
import pandas as pd
def a__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = [0] * no_of_processes
SCREAMING_SNAKE_CASE_ = [0] * no_of_processes
# Copy the burst time into remaining_time[]
for i in range(__UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = burst_time[i]
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = 9_9_9_9_9_9_9_9_9
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = False
# Process until all processes are completed
while complete != no_of_processes:
for j in range(__UpperCamelCase ):
if arrival_time[j] <= increment_time and remaining_time[j] > 0:
if remaining_time[j] < minm:
SCREAMING_SNAKE_CASE_ = remaining_time[j]
SCREAMING_SNAKE_CASE_ = j
SCREAMING_SNAKE_CASE_ = True
if not check:
increment_time += 1
continue
remaining_time[short] -= 1
SCREAMING_SNAKE_CASE_ = remaining_time[short]
if minm == 0:
SCREAMING_SNAKE_CASE_ = 9_9_9_9_9_9_9_9_9
if remaining_time[short] == 0:
complete += 1
SCREAMING_SNAKE_CASE_ = False
# Find finish time of current process
SCREAMING_SNAKE_CASE_ = increment_time + 1
# Calculate waiting time
SCREAMING_SNAKE_CASE_ = finish_time - arrival_time[short]
SCREAMING_SNAKE_CASE_ = finar - burst_time[short]
if waiting_time[short] < 0:
SCREAMING_SNAKE_CASE_ = 0
# Increment time
increment_time += 1
return waiting_time
def a__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = [0] * no_of_processes
for i in range(__UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = burst_time[i] + waiting_time[i]
return turn_around_time
def a__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = 0
for i in range(__UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = total_waiting_time + waiting_time[i]
SCREAMING_SNAKE_CASE_ = total_turn_around_time + turn_around_time[i]
print(F'''Average waiting time = {total_waiting_time / no_of_processes:.5f}''' )
print("Average turn around time =" , total_turn_around_time / no_of_processes )
if __name__ == "__main__":
print("Enter how many process you want to analyze")
A : int = int(input())
A : Union[str, Any] = [0] * no_of_processes
A : Optional[int] = [0] * no_of_processes
A : List[Any] = list(range(1, no_of_processes + 1))
for i in range(no_of_processes):
print("Enter the arrival time and burst time for process:--" + str(i + 1))
A , A : Tuple = map(int, input().split())
A : Any = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
A : List[str] = burst_time
A : Optional[Any] = no_of_processes
A : Any = waiting_time
A : List[str] = calculate_turnaroundtime(bt, n, wt)
calculate_average_times(waiting_time, turn_around_time, no_of_processes)
A : Any = pd.DataFrame(
list(zip(processes, burst_time, arrival_time, waiting_time, turn_around_time)),
columns=[
"Process",
"BurstTime",
"ArrivalTime",
"WaitingTime",
"TurnAroundTime",
],
)
# Printing the dataFrame
pd.set_option("display.max_rows", fcfs.shape[0] + 1)
print(fcfs)
| 305
|
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available
from ...utils import OptionalDependencyNotAvailable
A : List[Any] = {"configuration_dpt": ["DPT_PRETRAINED_CONFIG_ARCHIVE_MAP", "DPTConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Optional[Any] = ["DPTFeatureExtractor"]
A : str = ["DPTImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Optional[Any] = [
"DPT_PRETRAINED_MODEL_ARCHIVE_LIST",
"DPTForDepthEstimation",
"DPTForSemanticSegmentation",
"DPTModel",
"DPTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_dpt import DPTFeatureExtractor
from .image_processing_dpt import DPTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dpt import (
DPT_PRETRAINED_MODEL_ARCHIVE_LIST,
DPTForDepthEstimation,
DPTForSemanticSegmentation,
DPTModel,
DPTPreTrainedModel,
)
else:
import sys
A : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 305
| 1
|
import argparse
import requests
import torch
from PIL import Image
from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel
def a__ ( __UpperCamelCase ):
# vision encoder
if "img_encoder.pos_embed" in name:
SCREAMING_SNAKE_CASE_ = name.replace("img_encoder.pos_embed" , "vision_model.embeddings.position_embeddings" )
if "img_encoder.patch_embed.proj" in name:
SCREAMING_SNAKE_CASE_ = name.replace("img_encoder.patch_embed.proj" , "vision_model.embeddings.patch_embeddings.projection" )
if "img_encoder.patch_embed.norm" in name:
SCREAMING_SNAKE_CASE_ = name.replace("img_encoder.patch_embed.norm" , "vision_model.embeddings.layernorm" )
if "img_encoder.layers" in name:
SCREAMING_SNAKE_CASE_ = name.replace("img_encoder.layers" , "vision_model.encoder.stages" )
if "blocks" in name and "res" not in name:
SCREAMING_SNAKE_CASE_ = name.replace("blocks" , "layers" )
if "attn" in name and "pre_assign" not in name:
SCREAMING_SNAKE_CASE_ = name.replace("attn" , "self_attn" )
if "proj" in name and "self_attn" in name and "text" not in name:
SCREAMING_SNAKE_CASE_ = name.replace("proj" , "out_proj" )
if "pre_assign_attn.attn.proj" in name:
SCREAMING_SNAKE_CASE_ = name.replace("pre_assign_attn.attn.proj" , "pre_assign_attn.attn.out_proj" )
if "norm1" in name:
SCREAMING_SNAKE_CASE_ = name.replace("norm1" , "layer_norm1" )
if "norm2" in name and "pre_assign" not in name:
SCREAMING_SNAKE_CASE_ = name.replace("norm2" , "layer_norm2" )
if "img_encoder.norm" in name:
SCREAMING_SNAKE_CASE_ = name.replace("img_encoder.norm" , "vision_model.layernorm" )
# text encoder
if "text_encoder.token_embedding" in name:
SCREAMING_SNAKE_CASE_ = name.replace("text_encoder.token_embedding" , "text_model.embeddings.token_embedding" )
if "text_encoder.positional_embedding" in name:
SCREAMING_SNAKE_CASE_ = name.replace("text_encoder.positional_embedding" , "text_model.embeddings.position_embedding.weight" )
if "text_encoder.transformer.resblocks." in name:
SCREAMING_SNAKE_CASE_ = name.replace("text_encoder.transformer.resblocks." , "text_model.encoder.layers." )
if "ln_1" in name:
SCREAMING_SNAKE_CASE_ = name.replace("ln_1" , "layer_norm1" )
if "ln_2" in name:
SCREAMING_SNAKE_CASE_ = name.replace("ln_2" , "layer_norm2" )
if "c_fc" in name:
SCREAMING_SNAKE_CASE_ = name.replace("c_fc" , "fc1" )
if "c_proj" in name:
SCREAMING_SNAKE_CASE_ = name.replace("c_proj" , "fc2" )
if "text_encoder" in name:
SCREAMING_SNAKE_CASE_ = name.replace("text_encoder" , "text_model" )
if "ln_final" in name:
SCREAMING_SNAKE_CASE_ = name.replace("ln_final" , "final_layer_norm" )
# projection layers
if "img_projector.linear_hidden." in name:
SCREAMING_SNAKE_CASE_ = name.replace("img_projector.linear_hidden." , "visual_projection." )
if "img_projector.linear_out." in name:
SCREAMING_SNAKE_CASE_ = name.replace("img_projector.linear_out." , "visual_projection.3." )
if "text_projector.linear_hidden" in name:
SCREAMING_SNAKE_CASE_ = name.replace("text_projector.linear_hidden" , "text_projection" )
if "text_projector.linear_out" in name:
SCREAMING_SNAKE_CASE_ = name.replace("text_projector.linear_out" , "text_projection.3" )
return name
def a__ ( __UpperCamelCase , __UpperCamelCase ):
for key in orig_state_dict.copy().keys():
SCREAMING_SNAKE_CASE_ = orig_state_dict.pop(__UpperCamelCase )
if "qkv" in key:
# weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
SCREAMING_SNAKE_CASE_ = key.split("." )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = int(key_split[2] ), int(key_split[4] )
SCREAMING_SNAKE_CASE_ = config.vision_config.hidden_size
if "weight" in key:
SCREAMING_SNAKE_CASE_ = val[:dim, :]
SCREAMING_SNAKE_CASE_ = val[dim : dim * 2, :]
SCREAMING_SNAKE_CASE_ = val[-dim:, :]
else:
SCREAMING_SNAKE_CASE_ = val[:dim]
SCREAMING_SNAKE_CASE_ = val[dim : dim * 2]
SCREAMING_SNAKE_CASE_ = val[-dim:]
elif "in_proj" in key:
# weights and biases of the key, value and query projections of text encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
SCREAMING_SNAKE_CASE_ = key.split("." )
SCREAMING_SNAKE_CASE_ = int(key_split[3] )
SCREAMING_SNAKE_CASE_ = config.text_config.hidden_size
if "weight" in key:
SCREAMING_SNAKE_CASE_ = val[:dim, :]
SCREAMING_SNAKE_CASE_ = val[
dim : dim * 2, :
]
SCREAMING_SNAKE_CASE_ = val[-dim:, :]
else:
SCREAMING_SNAKE_CASE_ = val[:dim]
SCREAMING_SNAKE_CASE_ = val[dim : dim * 2]
SCREAMING_SNAKE_CASE_ = val[-dim:]
else:
SCREAMING_SNAKE_CASE_ = rename_key(__UpperCamelCase )
# squeeze if necessary
if (
"text_projection.0" in new_name
or "text_projection.3" in new_name
or "visual_projection.0" in new_name
or "visual_projection.3" in new_name
):
SCREAMING_SNAKE_CASE_ = val.squeeze_()
else:
SCREAMING_SNAKE_CASE_ = val
return orig_state_dict
def a__ ( ):
SCREAMING_SNAKE_CASE_ = "http://images.cocodataset.org/val2017/000000039769.jpg"
SCREAMING_SNAKE_CASE_ = Image.open(requests.get(__UpperCamelCase , stream=__UpperCamelCase ).raw )
return im
@torch.no_grad()
def a__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase="groupvit-gcc-yfcc" , __UpperCamelCase=False ):
SCREAMING_SNAKE_CASE_ = GroupViTConfig()
SCREAMING_SNAKE_CASE_ = GroupViTModel(__UpperCamelCase ).eval()
SCREAMING_SNAKE_CASE_ = torch.load(__UpperCamelCase , map_location="cpu" )["model"]
SCREAMING_SNAKE_CASE_ = convert_state_dict(__UpperCamelCase , __UpperCamelCase )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = model.load_state_dict(__UpperCamelCase , strict=__UpperCamelCase )
assert missing_keys == ["text_model.embeddings.position_ids"]
assert (unexpected_keys == ["multi_label_logit_scale"]) or (len(__UpperCamelCase ) == 0)
# verify result
SCREAMING_SNAKE_CASE_ = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32" )
SCREAMING_SNAKE_CASE_ = prepare_img()
SCREAMING_SNAKE_CASE_ = processor(text=["a photo of a cat", "a photo of a dog"] , images=__UpperCamelCase , padding=__UpperCamelCase , return_tensors="pt" )
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = model(**__UpperCamelCase )
if model_name == "groupvit-gcc-yfcc":
SCREAMING_SNAKE_CASE_ = torch.tensor([[13.3523, 6.3629]] )
elif model_name == "groupvit-gcc-redcaps":
SCREAMING_SNAKE_CASE_ = torch.tensor([[16.1873, 8.6230]] )
else:
raise ValueError(F'''Model name {model_name} not supported.''' )
assert torch.allclose(outputs.logits_per_image , __UpperCamelCase , atol=1E-3 )
processor.save_pretrained(__UpperCamelCase )
model.save_pretrained(__UpperCamelCase )
print("Successfully saved processor and model to" , __UpperCamelCase )
if push_to_hub:
print("Pushing to the hub..." )
processor.push_to_hub(__UpperCamelCase , organization="nielsr" )
model.push_to_hub(__UpperCamelCase , organization="nielsr" )
if __name__ == "__main__":
A : List[Any] = argparse.ArgumentParser()
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to dump the processor and PyTorch model."
)
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to GroupViT checkpoint")
parser.add_argument(
"--model_name",
default="groupvit-gccy-fcc",
type=str,
help="Name of the model. Expecting either 'groupvit-gcc-yfcc' or 'groupvit-gcc-redcaps'",
)
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether or not to push the converted model and processor to the 🤗 hub using the provided `model_name`.",
)
A : Optional[Any] = parser.parse_args()
convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 305
|
from __future__ import annotations
import collections
import pprint
from pathlib import Path
def a__ ( __UpperCamelCase ):
return "".join(sorted(__UpperCamelCase ) )
def a__ ( __UpperCamelCase ):
return word_by_signature[signature(__UpperCamelCase )]
A : str = Path(__file__).parent.joinpath("words.txt").read_text(encoding="utf-8")
A : int = sorted({word.strip().lower() for word in data.splitlines()})
A : Tuple = collections.defaultdict(list)
for word in word_list:
word_by_signature[signature(word)].append(word)
if __name__ == "__main__":
A : Union[str, Any] = {word: anagram(word) for word in word_list if len(anagram(word)) > 1}
with open("anagrams.txt", "w") as file:
file.write("all_anagrams = \n ")
file.write(pprint.pformat(all_anagrams))
| 305
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available
A : Any = {"tokenization_herbert": ["HerbertTokenizer"]}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : List[Any] = ["HerbertTokenizerFast"]
if TYPE_CHECKING:
from .tokenization_herbert import HerbertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_herbert_fast import HerbertTokenizerFast
else:
import sys
A : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 305
|
import copy
import os
from typing import TYPE_CHECKING, List, Union
if TYPE_CHECKING:
pass
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A : int = logging.get_logger(__name__)
A : str = {
"kakaobrain/align-base": "https://huggingface.co/kakaobrain/align-base/resolve/main/config.json",
}
class lowerCamelCase (SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
lowerCamelCase__ = '''align_text_model'''
def __init__( self : Optional[Any] , __magic_name__ : Union[str, Any]=30_522 , __magic_name__ : Tuple=768 , __magic_name__ : List[str]=12 , __magic_name__ : Optional[Any]=12 , __magic_name__ : str=3_072 , __magic_name__ : Dict="gelu" , __magic_name__ : Any=0.1 , __magic_name__ : Optional[int]=0.1 , __magic_name__ : List[str]=512 , __magic_name__ : Any=2 , __magic_name__ : Optional[Any]=0.02 , __magic_name__ : int=1e-12 , __magic_name__ : str=0 , __magic_name__ : Optional[Any]="absolute" , __magic_name__ : Optional[Any]=True , **__magic_name__ : Tuple , ) -> Union[str, Any]:
super().__init__(**__magic_name__ )
SCREAMING_SNAKE_CASE_ = vocab_size
SCREAMING_SNAKE_CASE_ = hidden_size
SCREAMING_SNAKE_CASE_ = num_hidden_layers
SCREAMING_SNAKE_CASE_ = num_attention_heads
SCREAMING_SNAKE_CASE_ = hidden_act
SCREAMING_SNAKE_CASE_ = intermediate_size
SCREAMING_SNAKE_CASE_ = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ = max_position_embeddings
SCREAMING_SNAKE_CASE_ = type_vocab_size
SCREAMING_SNAKE_CASE_ = initializer_range
SCREAMING_SNAKE_CASE_ = layer_norm_eps
SCREAMING_SNAKE_CASE_ = position_embedding_type
SCREAMING_SNAKE_CASE_ = use_cache
SCREAMING_SNAKE_CASE_ = pad_token_id
@classmethod
def __A ( cls : Any , __magic_name__ : Union[str, os.PathLike] , **__magic_name__ : Optional[Any] ) -> "PretrainedConfig":
cls._set_token_in_kwargs(__magic_name__ )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = cls.get_config_dict(__magic_name__ , **__magic_name__ )
# get the text config dict if we are loading from AlignConfig
if config_dict.get("model_type" ) == "align":
SCREAMING_SNAKE_CASE_ = config_dict["text_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__magic_name__ , **__magic_name__ )
class lowerCamelCase (SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
lowerCamelCase__ = '''align_vision_model'''
def __init__( self : List[str] , __magic_name__ : int = 3 , __magic_name__ : int = 600 , __magic_name__ : float = 2.0 , __magic_name__ : float = 3.1 , __magic_name__ : int = 8 , __magic_name__ : List[int] = [3, 3, 5, 3, 5, 5, 3] , __magic_name__ : List[int] = [32, 16, 24, 40, 80, 112, 192] , __magic_name__ : List[int] = [16, 24, 40, 80, 112, 192, 320] , __magic_name__ : List[int] = [] , __magic_name__ : List[int] = [1, 2, 2, 2, 1, 2, 1] , __magic_name__ : List[int] = [1, 2, 2, 3, 3, 4, 1] , __magic_name__ : List[int] = [1, 6, 6, 6, 6, 6, 6] , __magic_name__ : float = 0.25 , __magic_name__ : str = "swish" , __magic_name__ : int = 2_560 , __magic_name__ : str = "mean" , __magic_name__ : float = 0.02 , __magic_name__ : float = 0.001 , __magic_name__ : float = 0.99 , __magic_name__ : float = 0.2 , **__magic_name__ : List[Any] , ) -> Tuple:
super().__init__(**__magic_name__ )
SCREAMING_SNAKE_CASE_ = num_channels
SCREAMING_SNAKE_CASE_ = image_size
SCREAMING_SNAKE_CASE_ = width_coefficient
SCREAMING_SNAKE_CASE_ = depth_coefficient
SCREAMING_SNAKE_CASE_ = depth_divisor
SCREAMING_SNAKE_CASE_ = kernel_sizes
SCREAMING_SNAKE_CASE_ = in_channels
SCREAMING_SNAKE_CASE_ = out_channels
SCREAMING_SNAKE_CASE_ = depthwise_padding
SCREAMING_SNAKE_CASE_ = strides
SCREAMING_SNAKE_CASE_ = num_block_repeats
SCREAMING_SNAKE_CASE_ = expand_ratios
SCREAMING_SNAKE_CASE_ = squeeze_expansion_ratio
SCREAMING_SNAKE_CASE_ = hidden_act
SCREAMING_SNAKE_CASE_ = hidden_dim
SCREAMING_SNAKE_CASE_ = pooling_type
SCREAMING_SNAKE_CASE_ = initializer_range
SCREAMING_SNAKE_CASE_ = batch_norm_eps
SCREAMING_SNAKE_CASE_ = batch_norm_momentum
SCREAMING_SNAKE_CASE_ = drop_connect_rate
SCREAMING_SNAKE_CASE_ = sum(__magic_name__ ) * 4
@classmethod
def __A ( cls : List[str] , __magic_name__ : Union[str, os.PathLike] , **__magic_name__ : Dict ) -> "PretrainedConfig":
cls._set_token_in_kwargs(__magic_name__ )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = cls.get_config_dict(__magic_name__ , **__magic_name__ )
# get the vision config dict if we are loading from AlignConfig
if config_dict.get("model_type" ) == "align":
SCREAMING_SNAKE_CASE_ = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__magic_name__ , **__magic_name__ )
class lowerCamelCase (SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
lowerCamelCase__ = '''align'''
lowerCamelCase__ = True
def __init__( self : Optional[Any] , __magic_name__ : Dict=None , __magic_name__ : List[Any]=None , __magic_name__ : str=640 , __magic_name__ : Any=1.0 , __magic_name__ : Dict=0.02 , **__magic_name__ : Union[str, Any] , ) -> int:
super().__init__(**__magic_name__ )
if text_config is None:
SCREAMING_SNAKE_CASE_ = {}
logger.info("text_config is None. Initializing the AlignTextConfig with default values." )
if vision_config is None:
SCREAMING_SNAKE_CASE_ = {}
logger.info("vision_config is None. Initializing the AlignVisionConfig with default values." )
SCREAMING_SNAKE_CASE_ = AlignTextConfig(**__magic_name__ )
SCREAMING_SNAKE_CASE_ = AlignVisionConfig(**__magic_name__ )
SCREAMING_SNAKE_CASE_ = projection_dim
SCREAMING_SNAKE_CASE_ = temperature_init_value
SCREAMING_SNAKE_CASE_ = initializer_range
@classmethod
def __A ( cls : List[str] , __magic_name__ : AlignTextConfig , __magic_name__ : AlignVisionConfig , **__magic_name__ : Tuple ) -> Any:
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **__magic_name__ )
def __A ( self : int ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = copy.deepcopy(self.__dict__ )
SCREAMING_SNAKE_CASE_ = self.text_config.to_dict()
SCREAMING_SNAKE_CASE_ = self.vision_config.to_dict()
SCREAMING_SNAKE_CASE_ = self.__class__.model_type
return output
| 305
| 1
|
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=SCREAMING_SNAKE_CASE__ )
class lowerCamelCase (SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
lowerCamelCase__ = field(default='''summarization''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
lowerCamelCase__ = Features({'''text''': Value('''string''' )} )
lowerCamelCase__ = Features({'''summary''': Value('''string''' )} )
lowerCamelCase__ = "text"
lowerCamelCase__ = "summary"
@property
def __A ( self : Dict ) -> Dict[str, str]:
return {self.text_column: "text", self.summary_column: "summary"}
| 305
|
import unittest
from transformers.testing_utils import CaptureStdout
from transformers.tools.python_interpreter import evaluate
def a__ ( __UpperCamelCase ):
return x + 2
class lowerCamelCase (unittest.TestCase ):
"""simple docstring"""
def __A ( self : List[Any] ) -> int:
SCREAMING_SNAKE_CASE_ = "x = 3"
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = evaluate(__magic_name__ , {} , state=__magic_name__ )
assert result == 3
self.assertDictEqual(__magic_name__ , {"x": 3} )
SCREAMING_SNAKE_CASE_ = "x = y"
SCREAMING_SNAKE_CASE_ = {"y": 5}
SCREAMING_SNAKE_CASE_ = evaluate(__magic_name__ , {} , state=__magic_name__ )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(__magic_name__ , {"x": 5, "y": 5} )
def __A ( self : Union[str, Any] ) -> str:
SCREAMING_SNAKE_CASE_ = "y = add_two(x)"
SCREAMING_SNAKE_CASE_ = {"x": 3}
SCREAMING_SNAKE_CASE_ = evaluate(__magic_name__ , {"add_two": add_two} , state=__magic_name__ )
assert result == 5
self.assertDictEqual(__magic_name__ , {"x": 3, "y": 5} )
# Won't work without the tool
with CaptureStdout() as out:
SCREAMING_SNAKE_CASE_ = evaluate(__magic_name__ , {} , state=__magic_name__ )
assert result is None
assert "tried to execute add_two" in out.out
def __A ( self : List[str] ) -> int:
SCREAMING_SNAKE_CASE_ = "x = 3"
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = evaluate(__magic_name__ , {} , state=__magic_name__ )
assert result == 3
self.assertDictEqual(__magic_name__ , {"x": 3} )
def __A ( self : Optional[Any] ) -> str:
SCREAMING_SNAKE_CASE_ = "test_dict = {'x': x, 'y': add_two(x)}"
SCREAMING_SNAKE_CASE_ = {"x": 3}
SCREAMING_SNAKE_CASE_ = evaluate(__magic_name__ , {"add_two": add_two} , state=__magic_name__ )
self.assertDictEqual(__magic_name__ , {"x": 3, "y": 5} )
self.assertDictEqual(__magic_name__ , {"x": 3, "test_dict": {"x": 3, "y": 5}} )
def __A ( self : Optional[int] ) -> List[str]:
SCREAMING_SNAKE_CASE_ = "x = 3\ny = 5"
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = evaluate(__magic_name__ , {} , state=__magic_name__ )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(__magic_name__ , {"x": 3, "y": 5} )
def __A ( self : Any ) -> List[str]:
SCREAMING_SNAKE_CASE_ = "text = f'This is x: {x}.'"
SCREAMING_SNAKE_CASE_ = {"x": 3}
SCREAMING_SNAKE_CASE_ = evaluate(__magic_name__ , {} , state=__magic_name__ )
# evaluate returns the value of the last assignment.
assert result == "This is x: 3."
self.assertDictEqual(__magic_name__ , {"x": 3, "text": "This is x: 3."} )
def __A ( self : int ) -> Tuple:
SCREAMING_SNAKE_CASE_ = "if x <= 3:\n y = 2\nelse:\n y = 5"
SCREAMING_SNAKE_CASE_ = {"x": 3}
SCREAMING_SNAKE_CASE_ = evaluate(__magic_name__ , {} , state=__magic_name__ )
# evaluate returns the value of the last assignment.
assert result == 2
self.assertDictEqual(__magic_name__ , {"x": 3, "y": 2} )
SCREAMING_SNAKE_CASE_ = {"x": 8}
SCREAMING_SNAKE_CASE_ = evaluate(__magic_name__ , {} , state=__magic_name__ )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(__magic_name__ , {"x": 8, "y": 5} )
def __A ( self : str ) -> str:
SCREAMING_SNAKE_CASE_ = "test_list = [x, add_two(x)]"
SCREAMING_SNAKE_CASE_ = {"x": 3}
SCREAMING_SNAKE_CASE_ = evaluate(__magic_name__ , {"add_two": add_two} , state=__magic_name__ )
self.assertListEqual(__magic_name__ , [3, 5] )
self.assertDictEqual(__magic_name__ , {"x": 3, "test_list": [3, 5]} )
def __A ( self : Union[str, Any] ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = "y = x"
SCREAMING_SNAKE_CASE_ = {"x": 3}
SCREAMING_SNAKE_CASE_ = evaluate(__magic_name__ , {} , state=__magic_name__ )
assert result == 3
self.assertDictEqual(__magic_name__ , {"x": 3, "y": 3} )
def __A ( self : Tuple ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = "test_list = [x, add_two(x)]\ntest_list[1]"
SCREAMING_SNAKE_CASE_ = {"x": 3}
SCREAMING_SNAKE_CASE_ = evaluate(__magic_name__ , {"add_two": add_two} , state=__magic_name__ )
assert result == 5
self.assertDictEqual(__magic_name__ , {"x": 3, "test_list": [3, 5]} )
SCREAMING_SNAKE_CASE_ = "test_dict = {'x': x, 'y': add_two(x)}\ntest_dict['y']"
SCREAMING_SNAKE_CASE_ = {"x": 3}
SCREAMING_SNAKE_CASE_ = evaluate(__magic_name__ , {"add_two": add_two} , state=__magic_name__ )
assert result == 5
self.assertDictEqual(__magic_name__ , {"x": 3, "test_dict": {"x": 3, "y": 5}} )
def __A ( self : Tuple ) -> Any:
SCREAMING_SNAKE_CASE_ = "x = 0\nfor i in range(3):\n x = i"
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = evaluate(__magic_name__ , {"range": range} , state=__magic_name__ )
assert result == 2
self.assertDictEqual(__magic_name__ , {"x": 2, "i": 2} )
| 305
| 1
|
import os
import pickle
import unittest
from transformers import AutoTokenizer
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.models.bert_japanese.tokenization_bert_japanese import (
VOCAB_FILES_NAMES,
BertJapaneseTokenizer,
CharacterTokenizer,
JumanppTokenizer,
MecabTokenizer,
SudachiTokenizer,
WordpieceTokenizer,
)
from transformers.testing_utils import custom_tokenizers, require_jumanpp, require_sudachi
from ...test_tokenization_common import TokenizerTesterMixin
@custom_tokenizers
class lowerCamelCase (SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase__ = BertJapaneseTokenizer
lowerCamelCase__ = False
lowerCamelCase__ = True
def __A ( self : List[Any] ) -> Dict:
super().setUp()
SCREAMING_SNAKE_CASE_ = [
"[UNK]",
"[CLS]",
"[SEP]",
"こんにちは",
"こん",
"にちは",
"ばんは",
"##こん",
"##にちは",
"##ばんは",
"世界",
"##世界",
"、",
"##、",
"。",
"##。",
]
SCREAMING_SNAKE_CASE_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
def __A ( self : Tuple , __magic_name__ : str ) -> List[str]:
SCREAMING_SNAKE_CASE_ = "こんにちは、世界。 \nこんばんは、世界。"
SCREAMING_SNAKE_CASE_ = "こんにちは 、 世界 。 こんばんは 、 世界 。"
return input_text, output_text
def __A ( self : int , __magic_name__ : Optional[Any] ) -> int:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.get_input_output_texts(__magic_name__ )
SCREAMING_SNAKE_CASE_ = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ )
SCREAMING_SNAKE_CASE_ = tokenizer.decode(__magic_name__ , clean_up_tokenization_spaces=__magic_name__ )
return text, ids
def __A ( self : List[str] ) -> List[Any]:
pass # TODO add if relevant
def __A ( self : Any ) -> Optional[int]:
pass # TODO add if relevant
def __A ( self : Dict ) -> int:
pass # TODO add if relevant
def __A ( self : List[str] ) -> Any:
SCREAMING_SNAKE_CASE_ = self.tokenizer_class(self.vocab_file )
SCREAMING_SNAKE_CASE_ = tokenizer.tokenize("こんにちは、世界。\nこんばんは、世界。" )
self.assertListEqual(__magic_name__ , ["こんにちは", "、", "世界", "。", "こん", "##ばんは", "、", "世界", "。"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__magic_name__ ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
def __A ( self : Optional[int] ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = self.tokenizer_class(self.vocab_file , word_tokenizer_type="mecab" )
self.assertIsNotNone(__magic_name__ )
SCREAMING_SNAKE_CASE_ = "こんにちは、世界。\nこんばんは、世界。"
SCREAMING_SNAKE_CASE_ = tokenizer.tokenize(__magic_name__ )
self.assertListEqual(__magic_name__ , ["こんにちは", "、", "世界", "。", "こん", "##ばんは", "、", "世界", "。"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__magic_name__ ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
SCREAMING_SNAKE_CASE_ = os.path.join(self.tmpdirname , "tokenizer.bin" )
with open(__magic_name__ , "wb" ) as handle:
pickle.dump(__magic_name__ , __magic_name__ )
with open(__magic_name__ , "rb" ) as handle:
SCREAMING_SNAKE_CASE_ = pickle.load(__magic_name__ )
SCREAMING_SNAKE_CASE_ = tokenizer_new.tokenize(__magic_name__ )
self.assertListEqual(__magic_name__ , __magic_name__ )
def __A ( self : Tuple ) -> str:
SCREAMING_SNAKE_CASE_ = MecabTokenizer(mecab_dic="ipadic" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップルストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", "。"] , )
def __A ( self : Dict ) -> List[Any]:
try:
SCREAMING_SNAKE_CASE_ = MecabTokenizer(mecab_dic="unidic_lite" )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップル", "ストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", "。"] , )
def __A ( self : Any ) -> Optional[Any]:
try:
SCREAMING_SNAKE_CASE_ = MecabTokenizer(mecab_dic="unidic" )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップル", "ストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", "。"] , )
def __A ( self : int ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = MecabTokenizer(do_lower_case=__magic_name__ , mecab_dic="ipadic" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップルストア", "で", "iphone", "8", "が", "発売", "さ", "れ", "た", "。"] , )
def __A ( self : int ) -> Any:
try:
SCREAMING_SNAKE_CASE_ = MecabTokenizer(
do_lower_case=__magic_name__ , normalize_text=__magic_name__ , mecab_option="-d /usr/local/lib/mecab/dic/jumandic" )
except RuntimeError:
# if dict doesn't exist in the system, previous code raises this error.
return
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップルストア", "で", "iPhone", "8", "が", "発売", "さ", "れた", "\u3000", "。"] , )
def __A ( self : Any ) -> List[str]:
SCREAMING_SNAKE_CASE_ = MecabTokenizer(normalize_text=__magic_name__ , mecab_dic="ipadic" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップルストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", " ", "。"] , )
@require_sudachi
def __A ( self : List[str] ) -> int:
SCREAMING_SNAKE_CASE_ = self.tokenizer_class(self.vocab_file , word_tokenizer_type="sudachi" )
self.assertIsNotNone(__magic_name__ )
SCREAMING_SNAKE_CASE_ = "こんにちは、世界。\nこんばんは、世界。"
SCREAMING_SNAKE_CASE_ = tokenizer.tokenize(__magic_name__ )
self.assertListEqual(__magic_name__ , ["こんにちは", "、", "世界", "。", "こん", "##ばんは", "、", "世界", "。"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__magic_name__ ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
SCREAMING_SNAKE_CASE_ = os.path.join(self.tmpdirname , "tokenizer.bin" )
with open(__magic_name__ , "wb" ) as handle:
pickle.dump(__magic_name__ , __magic_name__ )
with open(__magic_name__ , "rb" ) as handle:
SCREAMING_SNAKE_CASE_ = pickle.load(__magic_name__ )
SCREAMING_SNAKE_CASE_ = tokenizer_new.tokenize(__magic_name__ )
self.assertListEqual(__magic_name__ , __magic_name__ )
@require_sudachi
def __A ( self : Any ) -> int:
SCREAMING_SNAKE_CASE_ = SudachiTokenizer(sudachi_dict_type="core" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , [" ", "\t", "アップル", "ストア", "で", "iPhone", "8", " ", "が", " ", " ", "\n ", "発売", "さ", "れ", "た", " ", "。", " ", " "] , )
@require_sudachi
def __A ( self : Optional[int] ) -> Any:
SCREAMING_SNAKE_CASE_ = SudachiTokenizer(sudachi_dict_type="core" , sudachi_split_mode="A" )
self.assertListEqual(tokenizer.tokenize("外国人参政権" ) , ["外国", "人", "参政", "権"] )
@require_sudachi
def __A ( self : Optional[int] ) -> List[str]:
SCREAMING_SNAKE_CASE_ = SudachiTokenizer(sudachi_dict_type="core" , sudachi_split_mode="B" )
self.assertListEqual(tokenizer.tokenize("外国人参政権" ) , ["外国人", "参政権"] )
@require_sudachi
def __A ( self : List[Any] ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = SudachiTokenizer(sudachi_dict_type="core" , sudachi_split_mode="C" )
self.assertListEqual(tokenizer.tokenize("外国人参政権" ) , ["外国人参政権"] )
@require_sudachi
def __A ( self : List[Any] ) -> Dict:
SCREAMING_SNAKE_CASE_ = SudachiTokenizer(do_lower_case=__magic_name__ , sudachi_dict_type="core" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , [" ", "\t", "アップル", "ストア", "で", "iphone", "8", " ", "が", " ", " ", "\n ", "発売", "さ", "れ", "た", " ", "。", " ", " "] , )
@require_sudachi
def __A ( self : Dict ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = SudachiTokenizer(normalize_text=__magic_name__ , sudachi_dict_type="core" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , [" ", "\t", "アップル", "ストア", "で", "iPhone", "8", " ", "が", " ", " ", "\n ", "発売", "さ", "れ", "た", "\u3000", "。", " ", " "] , )
@require_sudachi
def __A ( self : Optional[Any] ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = SudachiTokenizer(trim_whitespace=__magic_name__ , sudachi_dict_type="core" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップル", "ストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", "。"] , )
@require_jumanpp
def __A ( self : Union[str, Any] ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = self.tokenizer_class(self.vocab_file , word_tokenizer_type="jumanpp" )
self.assertIsNotNone(__magic_name__ )
SCREAMING_SNAKE_CASE_ = "こんにちは、世界。\nこんばんは、世界。"
SCREAMING_SNAKE_CASE_ = tokenizer.tokenize(__magic_name__ )
self.assertListEqual(__magic_name__ , ["こんにちは", "、", "世界", "。", "こん", "##ばんは", "、", "世界", "。"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__magic_name__ ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
SCREAMING_SNAKE_CASE_ = os.path.join(self.tmpdirname , "tokenizer.bin" )
with open(__magic_name__ , "wb" ) as handle:
pickle.dump(__magic_name__ , __magic_name__ )
with open(__magic_name__ , "rb" ) as handle:
SCREAMING_SNAKE_CASE_ = pickle.load(__magic_name__ )
SCREAMING_SNAKE_CASE_ = tokenizer_new.tokenize(__magic_name__ )
self.assertListEqual(__magic_name__ , __magic_name__ )
@require_jumanpp
def __A ( self : Union[str, Any] ) -> List[str]:
SCREAMING_SNAKE_CASE_ = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップル", "ストア", "で", "iPhone", "8", "\u3000", "が", "\u3000", "\u3000", "\u3000", "発売", "さ", "れた", "\u3000", "。"] , )
@require_jumanpp
def __A ( self : Any ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = JumanppTokenizer(do_lower_case=__magic_name__ )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップル", "ストア", "で", "iphone", "8", "\u3000", "が", "\u3000", "\u3000", "\u3000", "発売", "さ", "れた", "\u3000", "。"] , )
@require_jumanpp
def __A ( self : Optional[Any] ) -> Any:
SCREAMING_SNAKE_CASE_ = JumanppTokenizer(normalize_text=__magic_name__ )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["ア", "ッ", "フ", "゚", "ル", "ストア", "で", "iPhone", "8", "\u3000", "が", "\u3000", "\u3000", "\u3000", "発売", "さ", "れた", "\u3000", "。"] , )
@require_jumanpp
def __A ( self : Union[str, Any] ) -> List[str]:
SCREAMING_SNAKE_CASE_ = JumanppTokenizer(trim_whitespace=__magic_name__ )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップル", "ストア", "で", "iPhone", "8", "が", "発売", "さ", "れた", "。"] , )
@require_jumanpp
def __A ( self : List[str] ) -> Tuple:
SCREAMING_SNAKE_CASE_ = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize("ありがとうございますm(_ _)m見つけるのが大変です。" ) , ["ありがとう", "ございます", "m(_ _)m", "見つける", "の", "が", "大変です", "。"] , )
def __A ( self : str ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = ["[UNK]", "[CLS]", "[SEP]", "こんにちは", "こん", "にちは", "ばんは", "##こん", "##にちは", "##ばんは"]
SCREAMING_SNAKE_CASE_ = {}
for i, token in enumerate(__magic_name__ ):
SCREAMING_SNAKE_CASE_ = i
SCREAMING_SNAKE_CASE_ = WordpieceTokenizer(vocab=__magic_name__ , unk_token="[UNK]" )
self.assertListEqual(tokenizer.tokenize("" ) , [] )
self.assertListEqual(tokenizer.tokenize("こんにちは" ) , ["こんにちは"] )
self.assertListEqual(tokenizer.tokenize("こんばんは" ) , ["こん", "##ばんは"] )
self.assertListEqual(tokenizer.tokenize("こんばんは こんばんにちは こんにちは" ) , ["こん", "##ばんは", "[UNK]", "こんにちは"] )
def __A ( self : List[str] ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = BertJapaneseTokenizer.from_pretrained("nlp-waseda/roberta-base-japanese-with-auto-jumanpp" )
SCREAMING_SNAKE_CASE_ = tokenizer.subword_tokenizer
SCREAMING_SNAKE_CASE_ = subword_tokenizer.tokenize("国境 の 長い トンネル を 抜ける と 雪国 であった 。" )
self.assertListEqual(__magic_name__ , ["▁国境", "▁の", "▁長い", "▁トンネル", "▁を", "▁抜ける", "▁と", "▁雪", "国", "▁であった", "▁。"] )
SCREAMING_SNAKE_CASE_ = subword_tokenizer.tokenize("こんばんは こんばん にち は こんにちは" )
self.assertListEqual(__magic_name__ , ["▁こん", "ばん", "は", "▁こん", "ばん", "▁に", "ち", "▁は", "▁こんにちは"] )
def __A ( self : Optional[int] ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = self.tokenizer_class.from_pretrained("cl-tohoku/bert-base-japanese" )
SCREAMING_SNAKE_CASE_ = tokenizer.encode("ありがとう。" , add_special_tokens=__magic_name__ )
SCREAMING_SNAKE_CASE_ = tokenizer.encode("どういたしまして。" , add_special_tokens=__magic_name__ )
SCREAMING_SNAKE_CASE_ = tokenizer.build_inputs_with_special_tokens(__magic_name__ )
SCREAMING_SNAKE_CASE_ = tokenizer.build_inputs_with_special_tokens(__magic_name__ , __magic_name__ )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class lowerCamelCase (SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase__ = BertJapaneseTokenizer
lowerCamelCase__ = False
def __A ( self : Dict ) -> List[Any]:
super().setUp()
SCREAMING_SNAKE_CASE_ = ["[UNK]", "[CLS]", "[SEP]", "こ", "ん", "に", "ち", "は", "ば", "世", "界", "、", "。"]
SCREAMING_SNAKE_CASE_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
def __A ( self : Optional[Any] , **__magic_name__ : Dict ) -> Any:
return BertJapaneseTokenizer.from_pretrained(self.tmpdirname , subword_tokenizer_type="character" , **__magic_name__ )
def __A ( self : Tuple , __magic_name__ : int ) -> Any:
SCREAMING_SNAKE_CASE_ = "こんにちは、世界。 \nこんばんは、世界。"
SCREAMING_SNAKE_CASE_ = "こ ん に ち は 、 世 界 。 こ ん ば ん は 、 世 界 。"
return input_text, output_text
def __A ( self : Optional[Any] ) -> str:
pass # TODO add if relevant
def __A ( self : List[Any] ) -> List[str]:
pass # TODO add if relevant
def __A ( self : List[str] ) -> Optional[Any]:
pass # TODO add if relevant
def __A ( self : Dict ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = self.tokenizer_class(self.vocab_file , subword_tokenizer_type="character" )
SCREAMING_SNAKE_CASE_ = tokenizer.tokenize("こんにちは、世界。 \nこんばんは、世界。" )
self.assertListEqual(
__magic_name__ , ["こ", "ん", "に", "ち", "は", "、", "世", "界", "。", "こ", "ん", "ば", "ん", "は", "、", "世", "界", "。"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__magic_name__ ) , [3, 4, 5, 6, 7, 11, 9, 10, 12, 3, 4, 8, 4, 7, 11, 9, 10, 12] )
def __A ( self : Optional[int] ) -> str:
SCREAMING_SNAKE_CASE_ = ["[UNK]", "[CLS]", "[SEP]", "こ", "ん", "に", "ち", "は", "ば", "世", "界", "、", "。"]
SCREAMING_SNAKE_CASE_ = {}
for i, token in enumerate(__magic_name__ ):
SCREAMING_SNAKE_CASE_ = i
SCREAMING_SNAKE_CASE_ = CharacterTokenizer(vocab=__magic_name__ , unk_token="[UNK]" )
self.assertListEqual(tokenizer.tokenize("" ) , [] )
self.assertListEqual(tokenizer.tokenize("こんにちは" ) , ["こ", "ん", "に", "ち", "は"] )
self.assertListEqual(tokenizer.tokenize("こんにちほ" ) , ["こ", "ん", "に", "ち", "[UNK]"] )
def __A ( self : Dict ) -> str:
SCREAMING_SNAKE_CASE_ = self.tokenizer_class.from_pretrained("cl-tohoku/bert-base-japanese-char" )
SCREAMING_SNAKE_CASE_ = tokenizer.encode("ありがとう。" , add_special_tokens=__magic_name__ )
SCREAMING_SNAKE_CASE_ = tokenizer.encode("どういたしまして。" , add_special_tokens=__magic_name__ )
SCREAMING_SNAKE_CASE_ = tokenizer.build_inputs_with_special_tokens(__magic_name__ )
SCREAMING_SNAKE_CASE_ = tokenizer.build_inputs_with_special_tokens(__magic_name__ , __magic_name__ )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class lowerCamelCase (unittest.TestCase ):
"""simple docstring"""
def __A ( self : Any ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = "cl-tohoku/bert-base-japanese"
SCREAMING_SNAKE_CASE_ = AutoTokenizer.from_pretrained(__magic_name__ )
self.assertIsInstance(__magic_name__ , __magic_name__ )
class lowerCamelCase (unittest.TestCase ):
"""simple docstring"""
def __A ( self : List[str] ) -> Any:
SCREAMING_SNAKE_CASE_ = "cl-tohoku/bert-base-japanese"
with self.assertLogs("transformers" , level="WARNING" ) as cm:
BertTokenizer.from_pretrained(__magic_name__ )
self.assertTrue(
cm.records[0].message.startswith(
"The tokenizer class you load from this checkpoint is not the same type as the class this function"
" is called from." ) )
SCREAMING_SNAKE_CASE_ = "bert-base-cased"
with self.assertLogs("transformers" , level="WARNING" ) as cm:
BertJapaneseTokenizer.from_pretrained(__magic_name__ )
self.assertTrue(
cm.records[0].message.startswith(
"The tokenizer class you load from this checkpoint is not the same type as the class this function"
" is called from." ) )
| 305
|
import numpy as np
import pandas as pd
from sklearn.preprocessing import Normalizer
from sklearn.svm import SVR
from statsmodels.tsa.statespace.sarimax import SARIMAX
def a__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = np.array([[1, item, train_mtch[i]] for i, item in enumerate(__UpperCamelCase )] )
SCREAMING_SNAKE_CASE_ = np.array(__UpperCamelCase )
SCREAMING_SNAKE_CASE_ = np.dot(np.dot(np.linalg.inv(np.dot(x.transpose() , __UpperCamelCase ) ) , x.transpose() ) , __UpperCamelCase )
return abs(beta[0] + test_dt[0] * beta[1] + test_mtch[0] + beta[2] )
def a__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = (1, 2, 1)
SCREAMING_SNAKE_CASE_ = (1, 1, 0, 7)
SCREAMING_SNAKE_CASE_ = SARIMAX(
__UpperCamelCase , exog=__UpperCamelCase , order=__UpperCamelCase , seasonal_order=__UpperCamelCase )
SCREAMING_SNAKE_CASE_ = model.fit(disp=__UpperCamelCase , maxiter=6_0_0 , method="nm" )
SCREAMING_SNAKE_CASE_ = model_fit.predict(1 , len(__UpperCamelCase ) , exog=[test_match] )
return result[0]
def a__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = SVR(kernel="rbf" , C=1 , gamma=0.1 , epsilon=0.1 )
regressor.fit(__UpperCamelCase , __UpperCamelCase )
SCREAMING_SNAKE_CASE_ = regressor.predict(__UpperCamelCase )
return y_pred[0]
def a__ ( __UpperCamelCase ):
train_user.sort()
SCREAMING_SNAKE_CASE_ = np.percentile(__UpperCamelCase , 2_5 )
SCREAMING_SNAKE_CASE_ = np.percentile(__UpperCamelCase , 7_5 )
SCREAMING_SNAKE_CASE_ = qa - qa
SCREAMING_SNAKE_CASE_ = qa - (iqr * 0.1)
return low_lim
def a__ ( __UpperCamelCase , __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = 0
for i in list_vote:
if i > actual_result:
SCREAMING_SNAKE_CASE_ = not_safe + 1
else:
if abs(abs(__UpperCamelCase ) - abs(__UpperCamelCase ) ) <= 0.1:
safe += 1
else:
not_safe += 1
return safe > not_safe
if __name__ == "__main__":
# data_input_df = pd.read_csv("ex_data.csv", header=None)
A : Dict = [[1_82_31, 0.0, 1], [2_26_21, 1.0, 2], [1_56_75, 0.0, 3], [2_35_83, 1.0, 4]]
A : Optional[Any] = pd.DataFrame(
data_input, columns=["total_user", "total_even", "days"]
)
A : Union[str, Any] = Normalizer().fit_transform(data_input_df.values)
# split data
A : Optional[int] = normalize_df[:, 2].tolist()
A : List[str] = normalize_df[:, 0].tolist()
A : int = normalize_df[:, 1].tolist()
# for svr (input variable = total date and total match)
A : int = normalize_df[:, [1, 2]].tolist()
A : Tuple = x[: len(x) - 1]
A : str = x[len(x) - 1 :]
# for linear regression & sarimax
A : Tuple = total_date[: len(total_date) - 1]
A : Optional[int] = total_user[: len(total_user) - 1]
A : str = total_match[: len(total_match) - 1]
A : List[Any] = total_date[len(total_date) - 1 :]
A : List[Any] = total_user[len(total_user) - 1 :]
A : Optional[Any] = total_match[len(total_match) - 1 :]
# voting system with forecasting
A : Optional[int] = [
linear_regression_prediction(
trn_date, trn_user, trn_match, tst_date, tst_match
),
sarimax_predictor(trn_user, trn_match, tst_match),
support_vector_regressor(x_train, x_test, trn_user),
]
# check the safety of today's data
A : str = "" if data_safety_checker(res_vote, tst_user) else "not "
print("Today's data is {not_str}safe.")
| 305
| 1
|
import math
def a__ ( __UpperCamelCase , __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = len(__UpperCamelCase )
SCREAMING_SNAKE_CASE_ = int(math.floor(math.sqrt(__UpperCamelCase ) ) )
SCREAMING_SNAKE_CASE_ = 0
while arr[min(__UpperCamelCase , __UpperCamelCase ) - 1] < x:
SCREAMING_SNAKE_CASE_ = step
step += int(math.floor(math.sqrt(__UpperCamelCase ) ) )
if prev >= n:
return -1
while arr[prev] < x:
SCREAMING_SNAKE_CASE_ = prev + 1
if prev == min(__UpperCamelCase , __UpperCamelCase ):
return -1
if arr[prev] == x:
return prev
return -1
if __name__ == "__main__":
A : str = input("Enter numbers separated by a comma:\n").strip()
A : Union[str, Any] = [int(item) for item in user_input.split(",")]
A : Optional[int] = int(input("Enter the number to be searched:\n"))
A : int = jump_search(arr, x)
if res == -1:
print("Number not found!")
else:
print(f"Number {x} is at index {res}")
| 305
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
A : List[str] = {"configuration_swin": ["SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP", "SwinConfig", "SwinOnnxConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Any = [
"SWIN_PRETRAINED_MODEL_ARCHIVE_LIST",
"SwinForImageClassification",
"SwinForMaskedImageModeling",
"SwinModel",
"SwinPreTrainedModel",
"SwinBackbone",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : str = [
"TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFSwinForImageClassification",
"TFSwinForMaskedImageModeling",
"TFSwinModel",
"TFSwinPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_swin import SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinConfig, SwinOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swin import (
SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinBackbone,
SwinForImageClassification,
SwinForMaskedImageModeling,
SwinModel,
SwinPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_swin import (
TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSwinForImageClassification,
TFSwinForMaskedImageModeling,
TFSwinModel,
TFSwinPreTrainedModel,
)
else:
import sys
A : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 305
| 1
|
import torch
def a__ ( ):
if torch.cuda.is_available():
SCREAMING_SNAKE_CASE_ = torch.cuda.device_count()
else:
SCREAMING_SNAKE_CASE_ = 0
print(F'''Successfully ran on {num_gpus} GPUs''' )
if __name__ == "__main__":
main()
| 305
|
import tempfile
import unittest
import numpy as np
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_flax
if is_flax_available():
import os
from flax.core.frozen_dict import unfreeze
from flax.traverse_util import flatten_dict
from transformers import FlaxBertModel
A : Union[str, Any] = "0.12" # assumed parallelism: 8
@require_flax
@is_staging_test
class lowerCamelCase (unittest.TestCase ):
"""simple docstring"""
@classmethod
def __A ( cls : Any ) -> Dict:
SCREAMING_SNAKE_CASE_ = TOKEN
HfFolder.save_token(__magic_name__ )
@classmethod
def __A ( cls : Optional[int] ) -> Tuple:
try:
delete_repo(token=cls._token , repo_id="test-model-flax" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-model-flax-org" )
except HTTPError:
pass
def __A ( self : str ) -> str:
SCREAMING_SNAKE_CASE_ = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
SCREAMING_SNAKE_CASE_ = FlaxBertModel(__magic_name__ )
model.push_to_hub("test-model-flax" , use_auth_token=self._token )
SCREAMING_SNAKE_CASE_ = FlaxBertModel.from_pretrained(F'''{USER}/test-model-flax''' )
SCREAMING_SNAKE_CASE_ = flatten_dict(unfreeze(model.params ) )
SCREAMING_SNAKE_CASE_ = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
SCREAMING_SNAKE_CASE_ = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(__magic_name__ , 1e-3 , msg=F'''{key} not identical''' )
# Reset repo
delete_repo(token=self._token , repo_id="test-model-flax" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(__magic_name__ , repo_id="test-model-flax" , push_to_hub=__magic_name__ , use_auth_token=self._token )
SCREAMING_SNAKE_CASE_ = FlaxBertModel.from_pretrained(F'''{USER}/test-model-flax''' )
SCREAMING_SNAKE_CASE_ = flatten_dict(unfreeze(model.params ) )
SCREAMING_SNAKE_CASE_ = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
SCREAMING_SNAKE_CASE_ = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(__magic_name__ , 1e-3 , msg=F'''{key} not identical''' )
def __A ( self : int ) -> Tuple:
SCREAMING_SNAKE_CASE_ = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
SCREAMING_SNAKE_CASE_ = FlaxBertModel(__magic_name__ )
model.push_to_hub("valid_org/test-model-flax-org" , use_auth_token=self._token )
SCREAMING_SNAKE_CASE_ = FlaxBertModel.from_pretrained("valid_org/test-model-flax-org" )
SCREAMING_SNAKE_CASE_ = flatten_dict(unfreeze(model.params ) )
SCREAMING_SNAKE_CASE_ = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
SCREAMING_SNAKE_CASE_ = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(__magic_name__ , 1e-3 , msg=F'''{key} not identical''' )
# Reset repo
delete_repo(token=self._token , repo_id="valid_org/test-model-flax-org" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(
__magic_name__ , repo_id="valid_org/test-model-flax-org" , push_to_hub=__magic_name__ , use_auth_token=self._token )
SCREAMING_SNAKE_CASE_ = FlaxBertModel.from_pretrained("valid_org/test-model-flax-org" )
SCREAMING_SNAKE_CASE_ = flatten_dict(unfreeze(model.params ) )
SCREAMING_SNAKE_CASE_ = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
SCREAMING_SNAKE_CASE_ = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(__magic_name__ , 1e-3 , msg=F'''{key} not identical''' )
def a__ ( __UpperCamelCase , __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = flatten_dict(modela.params )
SCREAMING_SNAKE_CASE_ = flatten_dict(modela.params )
for key in flat_params_a.keys():
if np.sum(np.abs(flat_params_a[key] - flat_params_a[key] ) ) > 1E-4:
SCREAMING_SNAKE_CASE_ = False
return models_are_equal
@require_flax
class lowerCamelCase (unittest.TestCase ):
"""simple docstring"""
def __A ( self : str ) -> Dict:
SCREAMING_SNAKE_CASE_ = BertConfig.from_pretrained("hf-internal-testing/tiny-bert-flax-only" )
SCREAMING_SNAKE_CASE_ = FlaxBertModel(__magic_name__ )
SCREAMING_SNAKE_CASE_ = "bert"
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(__magic_name__ , __magic_name__ ) )
with self.assertRaises(__magic_name__ ):
SCREAMING_SNAKE_CASE_ = FlaxBertModel.from_pretrained(__magic_name__ )
SCREAMING_SNAKE_CASE_ = FlaxBertModel.from_pretrained(__magic_name__ , subfolder=__magic_name__ )
self.assertTrue(check_models_equal(__magic_name__ , __magic_name__ ) )
def __A ( self : Optional[Any] ) -> Tuple:
SCREAMING_SNAKE_CASE_ = BertConfig.from_pretrained("hf-internal-testing/tiny-bert-flax-only" )
SCREAMING_SNAKE_CASE_ = FlaxBertModel(__magic_name__ )
SCREAMING_SNAKE_CASE_ = "bert"
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(__magic_name__ , __magic_name__ ) , max_shard_size="10KB" )
with self.assertRaises(__magic_name__ ):
SCREAMING_SNAKE_CASE_ = FlaxBertModel.from_pretrained(__magic_name__ )
SCREAMING_SNAKE_CASE_ = FlaxBertModel.from_pretrained(__magic_name__ , subfolder=__magic_name__ )
self.assertTrue(check_models_equal(__magic_name__ , __magic_name__ ) )
def __A ( self : Optional[int] ) -> Dict:
SCREAMING_SNAKE_CASE_ = "bert"
SCREAMING_SNAKE_CASE_ = "hf-internal-testing/tiny-random-bert-subfolder"
with self.assertRaises(__magic_name__ ):
SCREAMING_SNAKE_CASE_ = FlaxBertModel.from_pretrained(__magic_name__ )
SCREAMING_SNAKE_CASE_ = FlaxBertModel.from_pretrained(__magic_name__ , subfolder=__magic_name__ )
self.assertIsNotNone(__magic_name__ )
def __A ( self : List[str] ) -> Dict:
SCREAMING_SNAKE_CASE_ = "bert"
SCREAMING_SNAKE_CASE_ = "hf-internal-testing/tiny-random-bert-sharded-subfolder"
with self.assertRaises(__magic_name__ ):
SCREAMING_SNAKE_CASE_ = FlaxBertModel.from_pretrained(__magic_name__ )
SCREAMING_SNAKE_CASE_ = FlaxBertModel.from_pretrained(__magic_name__ , subfolder=__magic_name__ )
self.assertIsNotNone(__magic_name__ )
| 305
| 1
|
import argparse
import os
from pathlib import Path
import torch
from bark.generation import _load_model as _bark_load_model
from huggingface_hub import hf_hub_download
from transformers import EncodecConfig, EncodecModel, set_seed
from transformers.models.bark.configuration_bark import (
BarkCoarseConfig,
BarkConfig,
BarkFineConfig,
BarkSemanticConfig,
)
from transformers.models.bark.generation_configuration_bark import (
BarkCoarseGenerationConfig,
BarkFineGenerationConfig,
BarkGenerationConfig,
BarkSemanticGenerationConfig,
)
from transformers.models.bark.modeling_bark import BarkCoarseModel, BarkFineModel, BarkModel, BarkSemanticModel
from transformers.utils import logging
logging.set_verbosity_info()
A : Union[str, Any] = logging.get_logger(__name__)
set_seed(7_70)
A : Optional[int] = {
"c_attn": "att_proj",
"c_proj": "out_proj",
"c_fc": "in_proj",
"transformer.": "",
"h.": "layers.",
"ln_1": "layernorm_1",
"ln_2": "layernorm_2",
"ln_f": "layernorm_final",
"wpe": "position_embeds_layer",
"wte": "input_embeds_layer",
}
A : Any = {
"text_small": {
"repo_id": "suno/bark",
"file_name": "text.pt",
},
"coarse_small": {
"repo_id": "suno/bark",
"file_name": "coarse.pt",
},
"fine_small": {
"repo_id": "suno/bark",
"file_name": "fine.pt",
},
"text": {
"repo_id": "suno/bark",
"file_name": "text_2.pt",
},
"coarse": {
"repo_id": "suno/bark",
"file_name": "coarse_2.pt",
},
"fine": {
"repo_id": "suno/bark",
"file_name": "fine_2.pt",
},
}
A : List[str] = os.path.dirname(os.path.abspath(__file__))
A : Dict = os.path.join(os.path.expanduser("~"), ".cache")
A : List[str] = os.path.join(os.getenv("XDG_CACHE_HOME", default_cache_dir), "suno", "bark_v0")
def a__ ( __UpperCamelCase , __UpperCamelCase=False ):
SCREAMING_SNAKE_CASE_ = model_type
if use_small:
key += "_small"
return os.path.join(__UpperCamelCase , REMOTE_MODEL_PATHS[key]["file_name"] )
def a__ ( __UpperCamelCase , __UpperCamelCase ):
os.makedirs(__UpperCamelCase , exist_ok=__UpperCamelCase )
hf_hub_download(repo_id=__UpperCamelCase , filename=__UpperCamelCase , local_dir=__UpperCamelCase )
def a__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=False , __UpperCamelCase="text" ):
if model_type == "text":
SCREAMING_SNAKE_CASE_ = BarkSemanticModel
SCREAMING_SNAKE_CASE_ = BarkSemanticConfig
SCREAMING_SNAKE_CASE_ = BarkSemanticGenerationConfig
elif model_type == "coarse":
SCREAMING_SNAKE_CASE_ = BarkCoarseModel
SCREAMING_SNAKE_CASE_ = BarkCoarseConfig
SCREAMING_SNAKE_CASE_ = BarkCoarseGenerationConfig
elif model_type == "fine":
SCREAMING_SNAKE_CASE_ = BarkFineModel
SCREAMING_SNAKE_CASE_ = BarkFineConfig
SCREAMING_SNAKE_CASE_ = BarkFineGenerationConfig
else:
raise NotImplementedError()
SCREAMING_SNAKE_CASE_ = F'''{model_type}_small''' if use_small else model_type
SCREAMING_SNAKE_CASE_ = REMOTE_MODEL_PATHS[model_key]
if not os.path.exists(__UpperCamelCase ):
logger.info(F'''{model_type} model not found, downloading into `{CACHE_DIR}`.''' )
_download(model_info["repo_id"] , model_info["file_name"] )
SCREAMING_SNAKE_CASE_ = torch.load(__UpperCamelCase , map_location=__UpperCamelCase )
# this is a hack
SCREAMING_SNAKE_CASE_ = checkpoint["model_args"]
if "input_vocab_size" not in model_args:
SCREAMING_SNAKE_CASE_ = model_args["vocab_size"]
SCREAMING_SNAKE_CASE_ = model_args["vocab_size"]
del model_args["vocab_size"]
# convert Bark model arguments to HF Bark model arguments
SCREAMING_SNAKE_CASE_ = model_args.pop("n_head" )
SCREAMING_SNAKE_CASE_ = model_args.pop("n_embd" )
SCREAMING_SNAKE_CASE_ = model_args.pop("n_layer" )
SCREAMING_SNAKE_CASE_ = ConfigClass(**checkpoint["model_args"] )
SCREAMING_SNAKE_CASE_ = ModelClass(config=__UpperCamelCase )
SCREAMING_SNAKE_CASE_ = GenerationConfigClass()
SCREAMING_SNAKE_CASE_ = model_generation_config
SCREAMING_SNAKE_CASE_ = checkpoint["model"]
# fixup checkpoint
SCREAMING_SNAKE_CASE_ = "_orig_mod."
for k, v in list(state_dict.items() ):
if k.startswith(__UpperCamelCase ):
# replace part of the key with corresponding layer name in HF implementation
SCREAMING_SNAKE_CASE_ = k[len(__UpperCamelCase ) :]
for old_layer_name in new_layer_name_dict:
SCREAMING_SNAKE_CASE_ = new_k.replace(__UpperCamelCase , new_layer_name_dict[old_layer_name] )
SCREAMING_SNAKE_CASE_ = state_dict.pop(__UpperCamelCase )
SCREAMING_SNAKE_CASE_ = set(state_dict.keys() ) - set(model.state_dict().keys() )
SCREAMING_SNAKE_CASE_ = {k for k in extra_keys if not k.endswith(".attn.bias" )}
SCREAMING_SNAKE_CASE_ = set(model.state_dict().keys() ) - set(state_dict.keys() )
SCREAMING_SNAKE_CASE_ = {k for k in missing_keys if not k.endswith(".attn.bias" )}
if len(__UpperCamelCase ) != 0:
raise ValueError(F'''extra keys found: {extra_keys}''' )
if len(__UpperCamelCase ) != 0:
raise ValueError(F'''missing keys: {missing_keys}''' )
model.load_state_dict(__UpperCamelCase , strict=__UpperCamelCase )
SCREAMING_SNAKE_CASE_ = model.num_parameters(exclude_embeddings=__UpperCamelCase )
SCREAMING_SNAKE_CASE_ = checkpoint["best_val_loss"].item()
logger.info(F'''model loaded: {round(n_params/1E6 , 1 )}M params, {round(__UpperCamelCase , 3 )} loss''' )
model.eval()
model.to(__UpperCamelCase )
del checkpoint, state_dict
return model
def a__ ( __UpperCamelCase , __UpperCamelCase=False , __UpperCamelCase="text" ):
if model_type not in ("text", "coarse", "fine"):
raise NotImplementedError()
SCREAMING_SNAKE_CASE_ = "cpu" # do conversion on cpu
SCREAMING_SNAKE_CASE_ = _get_ckpt_path(__UpperCamelCase , use_small=__UpperCamelCase )
SCREAMING_SNAKE_CASE_ = _load_model(__UpperCamelCase , __UpperCamelCase , model_type=__UpperCamelCase , use_small=__UpperCamelCase )
# load bark initial model
SCREAMING_SNAKE_CASE_ = _bark_load_model(__UpperCamelCase , "cpu" , model_type=__UpperCamelCase , use_small=__UpperCamelCase )
if model_type == "text":
SCREAMING_SNAKE_CASE_ = bark_model["model"]
if model.num_parameters(exclude_embeddings=__UpperCamelCase ) != bark_model.get_num_params():
raise ValueError("initial and new models don't have the same number of parameters" )
# check if same output as the bark model
SCREAMING_SNAKE_CASE_ = 5
SCREAMING_SNAKE_CASE_ = 1_0
if model_type in ["text", "coarse"]:
SCREAMING_SNAKE_CASE_ = torch.randint(2_5_6 , (batch_size, sequence_length) , dtype=torch.int )
SCREAMING_SNAKE_CASE_ = bark_model(__UpperCamelCase )[0]
SCREAMING_SNAKE_CASE_ = model(__UpperCamelCase )
# take last logits
SCREAMING_SNAKE_CASE_ = output_new_model_total.logits[:, [-1], :]
else:
SCREAMING_SNAKE_CASE_ = 3
SCREAMING_SNAKE_CASE_ = 8
SCREAMING_SNAKE_CASE_ = torch.randint(2_5_6 , (batch_size, sequence_length, n_codes_total) , dtype=torch.int )
SCREAMING_SNAKE_CASE_ = model(__UpperCamelCase , __UpperCamelCase )
SCREAMING_SNAKE_CASE_ = bark_model(__UpperCamelCase , __UpperCamelCase )
SCREAMING_SNAKE_CASE_ = output_new_model_total.logits
# output difference should come from the difference of self-attention implementation design
if output_new_model.shape != output_old_model.shape:
raise ValueError("initial and new outputs don't have the same shape" )
if (output_new_model - output_old_model).abs().max().item() > 1E-3:
raise ValueError("initial and new outputs are not equal" )
Path(__UpperCamelCase ).mkdir(exist_ok=__UpperCamelCase )
model.save_pretrained(__UpperCamelCase )
def a__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , ):
SCREAMING_SNAKE_CASE_ = os.path.join(__UpperCamelCase , __UpperCamelCase )
SCREAMING_SNAKE_CASE_ = BarkSemanticConfig.from_pretrained(os.path.join(__UpperCamelCase , "config.json" ) )
SCREAMING_SNAKE_CASE_ = BarkCoarseConfig.from_pretrained(os.path.join(__UpperCamelCase , "config.json" ) )
SCREAMING_SNAKE_CASE_ = BarkFineConfig.from_pretrained(os.path.join(__UpperCamelCase , "config.json" ) )
SCREAMING_SNAKE_CASE_ = EncodecConfig.from_pretrained("facebook/encodec_24khz" )
SCREAMING_SNAKE_CASE_ = BarkSemanticModel.from_pretrained(__UpperCamelCase )
SCREAMING_SNAKE_CASE_ = BarkCoarseModel.from_pretrained(__UpperCamelCase )
SCREAMING_SNAKE_CASE_ = BarkFineModel.from_pretrained(__UpperCamelCase )
SCREAMING_SNAKE_CASE_ = EncodecModel.from_pretrained("facebook/encodec_24khz" )
SCREAMING_SNAKE_CASE_ = BarkConfig.from_sub_model_configs(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
SCREAMING_SNAKE_CASE_ = BarkGenerationConfig.from_sub_model_configs(
semantic.generation_config , coarseAcoustic.generation_config , fineAcoustic.generation_config )
SCREAMING_SNAKE_CASE_ = BarkModel(__UpperCamelCase )
SCREAMING_SNAKE_CASE_ = semantic
SCREAMING_SNAKE_CASE_ = coarseAcoustic
SCREAMING_SNAKE_CASE_ = fineAcoustic
SCREAMING_SNAKE_CASE_ = codec
SCREAMING_SNAKE_CASE_ = bark_generation_config
Path(__UpperCamelCase ).mkdir(exist_ok=__UpperCamelCase )
bark.save_pretrained(__UpperCamelCase , repo_id=__UpperCamelCase , push_to_hub=__UpperCamelCase )
if __name__ == "__main__":
A : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("model_type", type=str, help="text, coarse or fine.")
parser.add_argument("pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--is_small", action="store_true", help="convert the small version instead of the large.")
A : Tuple = parser.parse_args()
load_model(args.pytorch_dump_folder_path, model_type=args.model_type, use_small=args.is_small)
| 305
|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.utils import ComputeEnvironment
from .cluster import get_cluster_input
from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
from .sagemaker import get_sagemaker_input
A : Union[str, Any] = "Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine"
def a__ ( ):
SCREAMING_SNAKE_CASE_ = _ask_options(
"In which compute environment are you running?" , ["This machine", "AWS (Amazon SageMaker)"] , _convert_compute_environment , )
if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
SCREAMING_SNAKE_CASE_ = get_sagemaker_input()
else:
SCREAMING_SNAKE_CASE_ = get_cluster_input()
return config
def a__ ( __UpperCamelCase=None ):
if subparsers is not None:
SCREAMING_SNAKE_CASE_ = subparsers.add_parser("config" , description=__UpperCamelCase )
else:
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser("Accelerate config command" , description=__UpperCamelCase )
parser.add_argument(
"--config_file" , default=__UpperCamelCase , help=(
"The path to use to store the config file. Will default to a file named default_config.yaml in the cache "
"location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have "
"such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed "
"with 'huggingface'."
) , )
if subparsers is not None:
parser.set_defaults(func=__UpperCamelCase )
return parser
def a__ ( __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = get_user_input()
if args.config_file is not None:
SCREAMING_SNAKE_CASE_ = args.config_file
else:
if not os.path.isdir(__UpperCamelCase ):
os.makedirs(__UpperCamelCase )
SCREAMING_SNAKE_CASE_ = default_yaml_config_file
if config_file.endswith(".json" ):
config.to_json_file(__UpperCamelCase )
else:
config.to_yaml_file(__UpperCamelCase )
print(F'''accelerate configuration saved at {config_file}''' )
def a__ ( ):
SCREAMING_SNAKE_CASE_ = config_command_parser()
SCREAMING_SNAKE_CASE_ = parser.parse_args()
config_command(__UpperCamelCase )
if __name__ == "__main__":
main()
| 305
| 1
|
def a__ ( __UpperCamelCase , __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = [1]
for i in range(2 , __UpperCamelCase ):
factorials.append(factorials[-1] * i )
assert 0 <= k < factorials[-1] * n, "k out of bounds"
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = list(range(__UpperCamelCase ) )
# Find permutation
while factorials:
SCREAMING_SNAKE_CASE_ = factorials.pop()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = divmod(__UpperCamelCase , __UpperCamelCase )
permutation.append(elements[number] )
elements.remove(elements[number] )
permutation.append(elements[0] )
return permutation
if __name__ == "__main__":
import doctest
doctest.testmod()
| 305
|
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=SCREAMING_SNAKE_CASE__ )
class lowerCamelCase (SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
lowerCamelCase__ = field(default='''summarization''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
lowerCamelCase__ = Features({'''text''': Value('''string''' )} )
lowerCamelCase__ = Features({'''summary''': Value('''string''' )} )
lowerCamelCase__ = "text"
lowerCamelCase__ = "summary"
@property
def __A ( self : Dict ) -> Dict[str, str]:
return {self.text_column: "text", self.summary_column: "summary"}
| 305
| 1
|
# using dfs for finding eulerian path traversal
def a__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=None ):
SCREAMING_SNAKE_CASE_ = (path or []) + [u]
for v in graph[u]:
if visited_edge[u][v] is False:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = True, True
SCREAMING_SNAKE_CASE_ = dfs(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
return path
def a__ ( __UpperCamelCase , __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = -1
for i in range(__UpperCamelCase ):
if i not in graph.keys():
continue
if len(graph[i] ) % 2 == 1:
odd_degree_nodes += 1
SCREAMING_SNAKE_CASE_ = i
if odd_degree_nodes == 0:
return 1, odd_node
if odd_degree_nodes == 2:
return 2, odd_node
return 3, odd_node
def a__ ( __UpperCamelCase , __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = [[False for _ in range(max_node + 1 )] for _ in range(max_node + 1 )]
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = check_circuit_or_path(__UpperCamelCase , __UpperCamelCase )
if check == 3:
print("graph is not Eulerian" )
print("no path" )
return
SCREAMING_SNAKE_CASE_ = 1
if check == 2:
SCREAMING_SNAKE_CASE_ = odd_node
print("graph has a Euler path" )
if check == 1:
print("graph has a Euler cycle" )
SCREAMING_SNAKE_CASE_ = dfs(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
print(__UpperCamelCase )
def a__ ( ):
SCREAMING_SNAKE_CASE_ = {1: [2, 3, 4], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [4]}
SCREAMING_SNAKE_CASE_ = {1: [2, 3, 4, 5], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [1, 4]}
SCREAMING_SNAKE_CASE_ = {1: [2, 3, 4], 2: [1, 3, 4], 3: [1, 2], 4: [1, 2, 5], 5: [4]}
SCREAMING_SNAKE_CASE_ = {1: [2, 3], 2: [1, 3], 3: [1, 2]}
SCREAMING_SNAKE_CASE_ = {
1: [],
2: []
# all degree is zero
}
SCREAMING_SNAKE_CASE_ = 1_0
check_euler(__UpperCamelCase , __UpperCamelCase )
check_euler(__UpperCamelCase , __UpperCamelCase )
check_euler(__UpperCamelCase , __UpperCamelCase )
check_euler(__UpperCamelCase , __UpperCamelCase )
check_euler(__UpperCamelCase , __UpperCamelCase )
if __name__ == "__main__":
main()
| 305
|
from ....utils import logging
A : List[str] = logging.get_logger(__name__)
class lowerCamelCase (SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def __init__( self : List[str] , __magic_name__ : Optional[Any] , __magic_name__ : Any=None , __magic_name__ : List[str]=2_048 ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = config.__dict__
SCREAMING_SNAKE_CASE_ = modal_hidden_size
if num_labels:
SCREAMING_SNAKE_CASE_ = num_labels
| 305
| 1
|
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A : Union[str, Any] = logging.get_logger(__name__)
A : Dict = {
"huggingface/informer-tourism-monthly": (
"https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json"
),
# See all Informer models at https://huggingface.co/models?filter=informer
}
class lowerCamelCase (SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
lowerCamelCase__ = '''informer'''
lowerCamelCase__ = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
'''num_hidden_layers''': '''encoder_layers''',
}
def __init__( self : str , __magic_name__ : Optional[int] = None , __magic_name__ : Optional[int] = None , __magic_name__ : str = "student_t" , __magic_name__ : str = "nll" , __magic_name__ : int = 1 , __magic_name__ : List[int] = None , __magic_name__ : Optional[Union[str, bool]] = "mean" , __magic_name__ : int = 0 , __magic_name__ : int = 0 , __magic_name__ : int = 0 , __magic_name__ : int = 0 , __magic_name__ : Optional[List[int]] = None , __magic_name__ : Optional[List[int]] = None , __magic_name__ : int = 64 , __magic_name__ : int = 32 , __magic_name__ : int = 32 , __magic_name__ : int = 2 , __magic_name__ : int = 2 , __magic_name__ : int = 2 , __magic_name__ : int = 2 , __magic_name__ : bool = True , __magic_name__ : str = "gelu" , __magic_name__ : float = 0.05 , __magic_name__ : float = 0.1 , __magic_name__ : float = 0.1 , __magic_name__ : float = 0.1 , __magic_name__ : float = 0.1 , __magic_name__ : int = 100 , __magic_name__ : float = 0.02 , __magic_name__ : Optional[int]=True , __magic_name__ : str = "prob" , __magic_name__ : int = 5 , __magic_name__ : bool = True , **__magic_name__ : int , ) -> Any:
# time series specific configuration
SCREAMING_SNAKE_CASE_ = prediction_length
SCREAMING_SNAKE_CASE_ = context_length or prediction_length
SCREAMING_SNAKE_CASE_ = distribution_output
SCREAMING_SNAKE_CASE_ = loss
SCREAMING_SNAKE_CASE_ = input_size
SCREAMING_SNAKE_CASE_ = num_time_features
SCREAMING_SNAKE_CASE_ = lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7]
SCREAMING_SNAKE_CASE_ = scaling
SCREAMING_SNAKE_CASE_ = num_dynamic_real_features
SCREAMING_SNAKE_CASE_ = num_static_real_features
SCREAMING_SNAKE_CASE_ = num_static_categorical_features
# set cardinality
if cardinality and num_static_categorical_features > 0:
if len(__magic_name__ ) != num_static_categorical_features:
raise ValueError(
"The cardinality should be a list of the same length as `num_static_categorical_features`" )
SCREAMING_SNAKE_CASE_ = cardinality
else:
SCREAMING_SNAKE_CASE_ = [0]
# set embedding_dimension
if embedding_dimension and num_static_categorical_features > 0:
if len(__magic_name__ ) != num_static_categorical_features:
raise ValueError(
"The embedding dimension should be a list of the same length as `num_static_categorical_features`" )
SCREAMING_SNAKE_CASE_ = embedding_dimension
else:
SCREAMING_SNAKE_CASE_ = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
SCREAMING_SNAKE_CASE_ = num_parallel_samples
# Transformer architecture configuration
SCREAMING_SNAKE_CASE_ = input_size * len(self.lags_sequence ) + self._number_of_features
SCREAMING_SNAKE_CASE_ = d_model
SCREAMING_SNAKE_CASE_ = encoder_attention_heads
SCREAMING_SNAKE_CASE_ = decoder_attention_heads
SCREAMING_SNAKE_CASE_ = encoder_ffn_dim
SCREAMING_SNAKE_CASE_ = decoder_ffn_dim
SCREAMING_SNAKE_CASE_ = encoder_layers
SCREAMING_SNAKE_CASE_ = decoder_layers
SCREAMING_SNAKE_CASE_ = dropout
SCREAMING_SNAKE_CASE_ = attention_dropout
SCREAMING_SNAKE_CASE_ = activation_dropout
SCREAMING_SNAKE_CASE_ = encoder_layerdrop
SCREAMING_SNAKE_CASE_ = decoder_layerdrop
SCREAMING_SNAKE_CASE_ = activation_function
SCREAMING_SNAKE_CASE_ = init_std
SCREAMING_SNAKE_CASE_ = use_cache
# Informer
SCREAMING_SNAKE_CASE_ = attention_type
SCREAMING_SNAKE_CASE_ = sampling_factor
SCREAMING_SNAKE_CASE_ = distil
super().__init__(is_encoder_decoder=__magic_name__ , **__magic_name__ )
@property
def __A ( self : List[Any] ) -> int:
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 305
|
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class lowerCamelCase (SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
lowerCamelCase__ = ['''image_processor''', '''tokenizer''']
lowerCamelCase__ = '''ViltImageProcessor'''
lowerCamelCase__ = ('''BertTokenizer''', '''BertTokenizerFast''')
def __init__( self : Optional[int] , __magic_name__ : str=None , __magic_name__ : List[str]=None , **__magic_name__ : Any ) -> str:
SCREAMING_SNAKE_CASE_ = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , __magic_name__ , )
SCREAMING_SNAKE_CASE_ = kwargs.pop("feature_extractor" )
SCREAMING_SNAKE_CASE_ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(__magic_name__ , __magic_name__ )
SCREAMING_SNAKE_CASE_ = self.image_processor
def __call__( self : List[str] , __magic_name__ : List[str] , __magic_name__ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , __magic_name__ : bool = True , __magic_name__ : Union[bool, str, PaddingStrategy] = False , __magic_name__ : Union[bool, str, TruncationStrategy] = None , __magic_name__ : Optional[int] = None , __magic_name__ : int = 0 , __magic_name__ : Optional[int] = None , __magic_name__ : Optional[bool] = None , __magic_name__ : Optional[bool] = None , __magic_name__ : bool = False , __magic_name__ : bool = False , __magic_name__ : bool = False , __magic_name__ : bool = False , __magic_name__ : bool = True , __magic_name__ : Optional[Union[str, TensorType]] = None , **__magic_name__ : str , ) -> BatchEncoding:
SCREAMING_SNAKE_CASE_ = self.tokenizer(
text=__magic_name__ , add_special_tokens=__magic_name__ , padding=__magic_name__ , truncation=__magic_name__ , max_length=__magic_name__ , stride=__magic_name__ , pad_to_multiple_of=__magic_name__ , return_token_type_ids=__magic_name__ , return_attention_mask=__magic_name__ , return_overflowing_tokens=__magic_name__ , return_special_tokens_mask=__magic_name__ , return_offsets_mapping=__magic_name__ , return_length=__magic_name__ , verbose=__magic_name__ , return_tensors=__magic_name__ , **__magic_name__ , )
# add pixel_values + pixel_mask
SCREAMING_SNAKE_CASE_ = self.image_processor(__magic_name__ , return_tensors=__magic_name__ )
encoding.update(__magic_name__ )
return encoding
def __A ( self : Optional[int] , *__magic_name__ : List[Any] , **__magic_name__ : Optional[Any] ) -> Any:
return self.tokenizer.batch_decode(*__magic_name__ , **__magic_name__ )
def __A ( self : Dict , *__magic_name__ : List[Any] , **__magic_name__ : Union[str, Any] ) -> str:
return self.tokenizer.decode(*__magic_name__ , **__magic_name__ )
@property
def __A ( self : Optional[int] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = self.tokenizer.model_input_names
SCREAMING_SNAKE_CASE_ = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def __A ( self : Dict ) -> List[Any]:
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , __magic_name__ , )
return self.image_processor_class
@property
def __A ( self : int ) -> List[Any]:
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , __magic_name__ , )
return self.image_processor
| 305
| 1
|
import itertools
from dataclasses import dataclass
from typing import List, Optional
import pyarrow as pa
import pyarrow.parquet as pq
import datasets
from datasets.table import table_cast
A : str = datasets.utils.logging.get_logger(__name__)
@dataclass
class lowerCamelCase (datasets.BuilderConfig ):
"""simple docstring"""
lowerCamelCase__ = 1_0_0_0_0
lowerCamelCase__ = None
lowerCamelCase__ = None
class lowerCamelCase (datasets.ArrowBasedBuilder ):
"""simple docstring"""
lowerCamelCase__ = ParquetConfig
def __A ( self : int ) -> Optional[Any]:
return datasets.DatasetInfo(features=self.config.features )
def __A ( self : Tuple , __magic_name__ : List[str] ) -> Union[str, Any]:
if not self.config.data_files:
raise ValueError(F'''At least one data file must be specified, but got data_files={self.config.data_files}''' )
SCREAMING_SNAKE_CASE_ = dl_manager.download_and_extract(self.config.data_files )
if isinstance(__magic_name__ , (str, list, tuple) ):
SCREAMING_SNAKE_CASE_ = data_files
if isinstance(__magic_name__ , __magic_name__ ):
SCREAMING_SNAKE_CASE_ = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
SCREAMING_SNAKE_CASE_ = [dl_manager.iter_files(__magic_name__ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"files": files} )]
SCREAMING_SNAKE_CASE_ = []
for split_name, files in data_files.items():
if isinstance(__magic_name__ , __magic_name__ ):
SCREAMING_SNAKE_CASE_ = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
SCREAMING_SNAKE_CASE_ = [dl_manager.iter_files(__magic_name__ ) for file in files]
# Infer features is they are stoed in the arrow schema
if self.info.features is None:
for file in itertools.chain.from_iterable(__magic_name__ ):
with open(__magic_name__ , "rb" ) as f:
SCREAMING_SNAKE_CASE_ = datasets.Features.from_arrow_schema(pq.read_schema(__magic_name__ ) )
break
splits.append(datasets.SplitGenerator(name=__magic_name__ , gen_kwargs={"files": files} ) )
return splits
def __A ( self : Tuple , __magic_name__ : pa.Table ) -> pa.Table:
if self.info.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
SCREAMING_SNAKE_CASE_ = table_cast(__magic_name__ , self.info.features.arrow_schema )
return pa_table
def __A ( self : List[str] , __magic_name__ : Optional[Any] ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = self.info.features.arrow_schema if self.info.features is not None else None
if self.info.features is not None and self.config.columns is not None:
if sorted(field.name for field in schema ) != sorted(self.config.columns ):
raise ValueError(
F'''Tried to load parquet data with columns \'{self.config.columns}\' with mismatching features \'{self.info.features}\'''' )
for file_idx, file in enumerate(itertools.chain.from_iterable(__magic_name__ ) ):
with open(__magic_name__ , "rb" ) as f:
SCREAMING_SNAKE_CASE_ = pq.ParquetFile(__magic_name__ )
try:
for batch_idx, record_batch in enumerate(
parquet_file.iter_batches(batch_size=self.config.batch_size , columns=self.config.columns ) ):
SCREAMING_SNAKE_CASE_ = pa.Table.from_batches([record_batch] )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield F'''{file_idx}_{batch_idx}''', self._cast_table(__magic_name__ )
except ValueError as e:
logger.error(F'''Failed to read file \'{file}\' with error {type(__magic_name__ )}: {e}''' )
raise
| 305
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
A : str = logging.get_logger(__name__)
A : Optional[int] = {
"microsoft/table-transformer-detection": (
"https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json"
),
}
class lowerCamelCase (SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
lowerCamelCase__ = '''table-transformer'''
lowerCamelCase__ = ['''past_key_values''']
lowerCamelCase__ = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
}
def __init__( self : List[Any] , __magic_name__ : Optional[Any]=True , __magic_name__ : Dict=None , __magic_name__ : Any=3 , __magic_name__ : List[str]=100 , __magic_name__ : Union[str, Any]=6 , __magic_name__ : Dict=2_048 , __magic_name__ : str=8 , __magic_name__ : int=6 , __magic_name__ : List[Any]=2_048 , __magic_name__ : Optional[int]=8 , __magic_name__ : Optional[int]=0.0 , __magic_name__ : List[Any]=0.0 , __magic_name__ : Optional[Any]=True , __magic_name__ : List[Any]="relu" , __magic_name__ : List[str]=256 , __magic_name__ : List[str]=0.1 , __magic_name__ : int=0.0 , __magic_name__ : Optional[Any]=0.0 , __magic_name__ : Tuple=0.02 , __magic_name__ : str=1.0 , __magic_name__ : int=False , __magic_name__ : Dict="sine" , __magic_name__ : Union[str, Any]="resnet50" , __magic_name__ : Optional[Any]=True , __magic_name__ : str=False , __magic_name__ : List[str]=1 , __magic_name__ : int=5 , __magic_name__ : Union[str, Any]=2 , __magic_name__ : Tuple=1 , __magic_name__ : Optional[int]=1 , __magic_name__ : Optional[Any]=5 , __magic_name__ : Optional[int]=2 , __magic_name__ : Union[str, Any]=0.1 , **__magic_name__ : Tuple , ) -> str:
if backbone_config is not None and use_timm_backbone:
raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`." )
if not use_timm_backbone:
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
SCREAMING_SNAKE_CASE_ = CONFIG_MAPPING["resnet"](out_features=["stage4"] )
elif isinstance(__magic_name__ , __magic_name__ ):
SCREAMING_SNAKE_CASE_ = backbone_config.get("model_type" )
SCREAMING_SNAKE_CASE_ = CONFIG_MAPPING[backbone_model_type]
SCREAMING_SNAKE_CASE_ = config_class.from_dict(__magic_name__ )
# set timm attributes to None
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None, None, None
SCREAMING_SNAKE_CASE_ = use_timm_backbone
SCREAMING_SNAKE_CASE_ = backbone_config
SCREAMING_SNAKE_CASE_ = num_channels
SCREAMING_SNAKE_CASE_ = num_queries
SCREAMING_SNAKE_CASE_ = d_model
SCREAMING_SNAKE_CASE_ = encoder_ffn_dim
SCREAMING_SNAKE_CASE_ = encoder_layers
SCREAMING_SNAKE_CASE_ = encoder_attention_heads
SCREAMING_SNAKE_CASE_ = decoder_ffn_dim
SCREAMING_SNAKE_CASE_ = decoder_layers
SCREAMING_SNAKE_CASE_ = decoder_attention_heads
SCREAMING_SNAKE_CASE_ = dropout
SCREAMING_SNAKE_CASE_ = attention_dropout
SCREAMING_SNAKE_CASE_ = activation_dropout
SCREAMING_SNAKE_CASE_ = activation_function
SCREAMING_SNAKE_CASE_ = init_std
SCREAMING_SNAKE_CASE_ = init_xavier_std
SCREAMING_SNAKE_CASE_ = encoder_layerdrop
SCREAMING_SNAKE_CASE_ = decoder_layerdrop
SCREAMING_SNAKE_CASE_ = encoder_layers
SCREAMING_SNAKE_CASE_ = auxiliary_loss
SCREAMING_SNAKE_CASE_ = position_embedding_type
SCREAMING_SNAKE_CASE_ = backbone
SCREAMING_SNAKE_CASE_ = use_pretrained_backbone
SCREAMING_SNAKE_CASE_ = dilation
# Hungarian matcher
SCREAMING_SNAKE_CASE_ = class_cost
SCREAMING_SNAKE_CASE_ = bbox_cost
SCREAMING_SNAKE_CASE_ = giou_cost
# Loss coefficients
SCREAMING_SNAKE_CASE_ = mask_loss_coefficient
SCREAMING_SNAKE_CASE_ = dice_loss_coefficient
SCREAMING_SNAKE_CASE_ = bbox_loss_coefficient
SCREAMING_SNAKE_CASE_ = giou_loss_coefficient
SCREAMING_SNAKE_CASE_ = eos_coefficient
super().__init__(is_encoder_decoder=__magic_name__ , **__magic_name__ )
@property
def __A ( self : Union[str, Any] ) -> int:
return self.encoder_attention_heads
@property
def __A ( self : Any ) -> int:
return self.d_model
class lowerCamelCase (SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
lowerCamelCase__ = version.parse('''1.11''' )
@property
def __A ( self : Tuple ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
("pixel_mask", {0: "batch"}),
] )
@property
def __A ( self : Any ) -> float:
return 1e-5
@property
def __A ( self : int ) -> int:
return 12
| 305
| 1
|
import math
A : Any = 10
A : List[Any] = 7
A : int = BALLS_PER_COLOUR * NUM_COLOURS
def a__ ( __UpperCamelCase = 2_0 ):
SCREAMING_SNAKE_CASE_ = math.comb(__UpperCamelCase , __UpperCamelCase )
SCREAMING_SNAKE_CASE_ = math.comb(NUM_BALLS - BALLS_PER_COLOUR , __UpperCamelCase )
SCREAMING_SNAKE_CASE_ = NUM_COLOURS * (1 - missing_colour / total)
return F'''{result:.9f}'''
if __name__ == "__main__":
print(solution(20))
| 305
|
import warnings
from diffusers import StableDiffusionImgaImgPipeline # noqa F401
warnings.warn(
"The `image_to_image.py` script is outdated. Please use directly `from diffusers import"
" StableDiffusionImg2ImgPipeline` instead."
)
| 305
| 1
|
import argparse
import torch
from ...utils import logging
from . import AlbertConfig, AlbertForPreTraining, load_tf_weights_in_albert
logging.set_verbosity_info()
def a__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
# Initialise PyTorch model
SCREAMING_SNAKE_CASE_ = AlbertConfig.from_json_file(__UpperCamelCase )
print(F'''Building PyTorch model from configuration: {config}''' )
SCREAMING_SNAKE_CASE_ = AlbertForPreTraining(__UpperCamelCase )
# Load weights from tf checkpoint
load_tf_weights_in_albert(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , __UpperCamelCase )
if __name__ == "__main__":
A : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--albert_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained ALBERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
A : Any = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.albert_config_file, args.pytorch_dump_path)
| 305
|
from __future__ import annotations
import numpy as np
def a__ ( __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = np.shape(__UpperCamelCase )
if rows != columns:
SCREAMING_SNAKE_CASE_ = (
"'table' has to be of square shaped array but got a "
F'''{rows}x{columns} array:\n{table}'''
)
raise ValueError(__UpperCamelCase )
SCREAMING_SNAKE_CASE_ = np.zeros((rows, columns) )
SCREAMING_SNAKE_CASE_ = np.zeros((rows, columns) )
for i in range(__UpperCamelCase ):
for j in range(__UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = sum(lower[i][k] * upper[k][j] for k in range(__UpperCamelCase ) )
if upper[j][j] == 0:
raise ArithmeticError("No LU decomposition exists" )
SCREAMING_SNAKE_CASE_ = (table[i][j] - total) / upper[j][j]
SCREAMING_SNAKE_CASE_ = 1
for j in range(__UpperCamelCase , __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = sum(lower[i][k] * upper[k][j] for k in range(__UpperCamelCase ) )
SCREAMING_SNAKE_CASE_ = table[i][j] - total
return lower, upper
if __name__ == "__main__":
import doctest
doctest.testmod()
| 305
| 1
|
def a__ ( __UpperCamelCase ):
if n_term == "":
return []
SCREAMING_SNAKE_CASE_ = []
for temp in range(int(__UpperCamelCase ) ):
series.append(F'''1/{temp + 1}''' if series else "1" )
return series
if __name__ == "__main__":
A : Optional[int] = input("Enter the last number (nth term) of the Harmonic Series")
print("Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n")
print(harmonic_series(nth_term))
| 305
|
from math import pi, sqrt, tan
def a__ ( __UpperCamelCase ):
if side_length < 0:
raise ValueError("surface_area_cube() only accepts non-negative values" )
return 6 * side_length**2
def a__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
if length < 0 or breadth < 0 or height < 0:
raise ValueError("surface_area_cuboid() only accepts non-negative values" )
return 2 * ((length * breadth) + (breadth * height) + (length * height))
def a__ ( __UpperCamelCase ):
if radius < 0:
raise ValueError("surface_area_sphere() only accepts non-negative values" )
return 4 * pi * radius**2
def a__ ( __UpperCamelCase ):
if radius < 0:
raise ValueError("surface_area_hemisphere() only accepts non-negative values" )
return 3 * pi * radius**2
def a__ ( __UpperCamelCase , __UpperCamelCase ):
if radius < 0 or height < 0:
raise ValueError("surface_area_cone() only accepts non-negative values" )
return pi * radius * (radius + (height**2 + radius**2) ** 0.5)
def a__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
if radius_a < 0 or radius_a < 0 or height < 0:
raise ValueError(
"surface_area_conical_frustum() only accepts non-negative values" )
SCREAMING_SNAKE_CASE_ = (height**2 + (radius_a - radius_a) ** 2) ** 0.5
return pi * ((slant_height * (radius_a + radius_a)) + radius_a**2 + radius_a**2)
def a__ ( __UpperCamelCase , __UpperCamelCase ):
if radius < 0 or height < 0:
raise ValueError("surface_area_cylinder() only accepts non-negative values" )
return 2 * pi * radius * (height + radius)
def a__ ( __UpperCamelCase , __UpperCamelCase ):
if torus_radius < 0 or tube_radius < 0:
raise ValueError("surface_area_torus() only accepts non-negative values" )
if torus_radius < tube_radius:
raise ValueError(
"surface_area_torus() does not support spindle or self intersecting tori" )
return 4 * pow(__UpperCamelCase , 2 ) * torus_radius * tube_radius
def a__ ( __UpperCamelCase , __UpperCamelCase ):
if length < 0 or width < 0:
raise ValueError("area_rectangle() only accepts non-negative values" )
return length * width
def a__ ( __UpperCamelCase ):
if side_length < 0:
raise ValueError("area_square() only accepts non-negative values" )
return side_length**2
def a__ ( __UpperCamelCase , __UpperCamelCase ):
if base < 0 or height < 0:
raise ValueError("area_triangle() only accepts non-negative values" )
return (base * height) / 2
def a__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
if sidea < 0 or sidea < 0 or sidea < 0:
raise ValueError("area_triangle_three_sides() only accepts non-negative values" )
elif sidea + sidea < sidea or sidea + sidea < sidea or sidea + sidea < sidea:
raise ValueError("Given three sides do not form a triangle" )
SCREAMING_SNAKE_CASE_ = (sidea + sidea + sidea) / 2
SCREAMING_SNAKE_CASE_ = sqrt(
semi_perimeter
* (semi_perimeter - sidea)
* (semi_perimeter - sidea)
* (semi_perimeter - sidea) )
return area
def a__ ( __UpperCamelCase , __UpperCamelCase ):
if base < 0 or height < 0:
raise ValueError("area_parallelogram() only accepts non-negative values" )
return base * height
def a__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
if basea < 0 or basea < 0 or height < 0:
raise ValueError("area_trapezium() only accepts non-negative values" )
return 1 / 2 * (basea + basea) * height
def a__ ( __UpperCamelCase ):
if radius < 0:
raise ValueError("area_circle() only accepts non-negative values" )
return pi * radius**2
def a__ ( __UpperCamelCase , __UpperCamelCase ):
if radius_x < 0 or radius_y < 0:
raise ValueError("area_ellipse() only accepts non-negative values" )
return pi * radius_x * radius_y
def a__ ( __UpperCamelCase , __UpperCamelCase ):
if diagonal_a < 0 or diagonal_a < 0:
raise ValueError("area_rhombus() only accepts non-negative values" )
return 1 / 2 * diagonal_a * diagonal_a
def a__ ( __UpperCamelCase , __UpperCamelCase ):
if not isinstance(__UpperCamelCase , __UpperCamelCase ) or sides < 3:
raise ValueError(
"area_reg_polygon() only accepts integers greater than or \
equal to three as number of sides" )
elif length < 0:
raise ValueError(
"area_reg_polygon() only accepts non-negative values as \
length of a side" )
return (sides * length**2) / (4 * tan(pi / sides ))
return (sides * length**2) / (4 * tan(pi / sides ))
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True) # verbose so we can see methods missing tests
print("[DEMO] Areas of various geometric shapes: \n")
print(f"Rectangle: {area_rectangle(10, 20) = }")
print(f"Square: {area_square(10) = }")
print(f"Triangle: {area_triangle(10, 10) = }")
print(f"Triangle: {area_triangle_three_sides(5, 12, 13) = }")
print(f"Parallelogram: {area_parallelogram(10, 20) = }")
print(f"Rhombus: {area_rhombus(10, 20) = }")
print(f"Trapezium: {area_trapezium(10, 20, 30) = }")
print(f"Circle: {area_circle(20) = }")
print(f"Ellipse: {area_ellipse(10, 20) = }")
print("\nSurface Areas of various geometric shapes: \n")
print(f"Cube: {surface_area_cube(20) = }")
print(f"Cuboid: {surface_area_cuboid(10, 20, 30) = }")
print(f"Sphere: {surface_area_sphere(20) = }")
print(f"Hemisphere: {surface_area_hemisphere(20) = }")
print(f"Cone: {surface_area_cone(10, 20) = }")
print(f"Conical Frustum: {surface_area_conical_frustum(10, 20, 30) = }")
print(f"Cylinder: {surface_area_cylinder(10, 20) = }")
print(f"Torus: {surface_area_torus(20, 10) = }")
print(f"Equilateral Triangle: {area_reg_polygon(3, 10) = }")
print(f"Square: {area_reg_polygon(4, 10) = }")
print(f"Reqular Pentagon: {area_reg_polygon(5, 10) = }")
| 305
| 1
|
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class lowerCamelCase :
"""simple docstring"""
lowerCamelCase__ = 42
lowerCamelCase__ = None
lowerCamelCase__ = None
A : Any = namedtuple("CoinsDistribResult", "moves excess")
def a__ ( __UpperCamelCase ):
if root is None:
return 0
# Validation
def count_nodes(__UpperCamelCase ) -> int:
if node is None:
return 0
return count_nodes(node.left ) + count_nodes(node.right ) + 1
def count_coins(__UpperCamelCase ) -> int:
if node is None:
return 0
return count_coins(node.left ) + count_coins(node.right ) + node.data
if count_nodes(__UpperCamelCase ) != count_coins(__UpperCamelCase ):
raise ValueError("The nodes number should be same as the number of coins" )
# Main calculation
def get_distrib(__UpperCamelCase ) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0 , 1 )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = get_distrib(node.left )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = get_distrib(node.right )
SCREAMING_SNAKE_CASE_ = 1 - left_distrib_excess
SCREAMING_SNAKE_CASE_ = 1 - right_distrib_excess
SCREAMING_SNAKE_CASE_ = (
left_distrib_moves
+ right_distrib_moves
+ abs(__UpperCamelCase )
+ abs(__UpperCamelCase )
)
SCREAMING_SNAKE_CASE_ = node.data - coins_to_left - coins_to_right
return CoinsDistribResult(__UpperCamelCase , __UpperCamelCase )
return get_distrib(__UpperCamelCase )[0]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 305
|
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...file_utils import TensorType, is_torch_available
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
A : List[str] = logging.get_logger(__name__)
A : int = {
"facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json",
# See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small
}
class lowerCamelCase (SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
lowerCamelCase__ = '''blenderbot-small'''
lowerCamelCase__ = ['''past_key_values''']
lowerCamelCase__ = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self : Dict , __magic_name__ : Dict=50_265 , __magic_name__ : str=512 , __magic_name__ : List[Any]=8 , __magic_name__ : Any=2_048 , __magic_name__ : Dict=16 , __magic_name__ : Any=8 , __magic_name__ : Optional[int]=2_048 , __magic_name__ : Dict=16 , __magic_name__ : Tuple=0.0 , __magic_name__ : Dict=0.0 , __magic_name__ : Optional[int]=True , __magic_name__ : Any=True , __magic_name__ : Dict="gelu" , __magic_name__ : Tuple=512 , __magic_name__ : List[str]=0.1 , __magic_name__ : List[Any]=0.0 , __magic_name__ : List[Any]=0.0 , __magic_name__ : Tuple=0.02 , __magic_name__ : Union[str, Any]=1 , __magic_name__ : List[Any]=False , __magic_name__ : str=0 , __magic_name__ : Dict=1 , __magic_name__ : str=2 , __magic_name__ : Union[str, Any]=2 , **__magic_name__ : Optional[Any] , ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = vocab_size
SCREAMING_SNAKE_CASE_ = max_position_embeddings
SCREAMING_SNAKE_CASE_ = d_model
SCREAMING_SNAKE_CASE_ = encoder_ffn_dim
SCREAMING_SNAKE_CASE_ = encoder_layers
SCREAMING_SNAKE_CASE_ = encoder_attention_heads
SCREAMING_SNAKE_CASE_ = decoder_ffn_dim
SCREAMING_SNAKE_CASE_ = decoder_layers
SCREAMING_SNAKE_CASE_ = decoder_attention_heads
SCREAMING_SNAKE_CASE_ = dropout
SCREAMING_SNAKE_CASE_ = attention_dropout
SCREAMING_SNAKE_CASE_ = activation_dropout
SCREAMING_SNAKE_CASE_ = activation_function
SCREAMING_SNAKE_CASE_ = init_std
SCREAMING_SNAKE_CASE_ = encoder_layerdrop
SCREAMING_SNAKE_CASE_ = decoder_layerdrop
SCREAMING_SNAKE_CASE_ = use_cache
SCREAMING_SNAKE_CASE_ = encoder_layers
SCREAMING_SNAKE_CASE_ = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=__magic_name__ , bos_token_id=__magic_name__ , eos_token_id=__magic_name__ , is_encoder_decoder=__magic_name__ , decoder_start_token_id=__magic_name__ , forced_eos_token_id=__magic_name__ , **__magic_name__ , )
class lowerCamelCase (SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
@property
def __A ( self : str ) -> Mapping[str, Mapping[int, str]]:
if self.task in ["default", "seq2seq-lm"]:
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
SCREAMING_SNAKE_CASE_ = {0: "batch"}
SCREAMING_SNAKE_CASE_ = {0: "batch", 1: "past_decoder_sequence + sequence"}
else:
SCREAMING_SNAKE_CASE_ = {0: "batch", 1: "decoder_sequence"}
SCREAMING_SNAKE_CASE_ = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(__magic_name__ , direction="inputs" )
elif self.task == "causal-lm":
# TODO: figure this case out.
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.num_layers
for i in range(__magic_name__ ):
SCREAMING_SNAKE_CASE_ = {0: "batch", 2: "past_sequence + sequence"}
SCREAMING_SNAKE_CASE_ = {0: "batch", 2: "past_sequence + sequence"}
else:
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
("decoder_input_ids", {0: "batch", 1: "decoder_sequence"}),
("decoder_attention_mask", {0: "batch", 1: "decoder_sequence"}),
] )
return common_inputs
@property
def __A ( self : Tuple ) -> Mapping[str, Mapping[int, str]]:
if self.task in ["default", "seq2seq-lm"]:
SCREAMING_SNAKE_CASE_ = super().outputs
else:
SCREAMING_SNAKE_CASE_ = super(__magic_name__ , self ).outputs
if self.use_past:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.num_layers
for i in range(__magic_name__ ):
SCREAMING_SNAKE_CASE_ = {0: "batch", 2: "past_sequence + sequence"}
SCREAMING_SNAKE_CASE_ = {0: "batch", 2: "past_sequence + sequence"}
return common_outputs
def __A ( self : int , __magic_name__ : PreTrainedTokenizer , __magic_name__ : int = -1 , __magic_name__ : int = -1 , __magic_name__ : bool = False , __magic_name__ : Optional[TensorType] = None , ) -> Mapping[str, Any]:
SCREAMING_SNAKE_CASE_ = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
# Generate decoder inputs
SCREAMING_SNAKE_CASE_ = seq_length if not self.use_past else 1
SCREAMING_SNAKE_CASE_ = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
SCREAMING_SNAKE_CASE_ = {F'''decoder_{name}''': tensor for name, tensor in decoder_inputs.items()}
SCREAMING_SNAKE_CASE_ = dict(**__magic_name__ , **__magic_name__ )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = common_inputs["input_ids"].shape
SCREAMING_SNAKE_CASE_ = common_inputs["decoder_input_ids"].shape[1]
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.num_attention_heads
SCREAMING_SNAKE_CASE_ = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
SCREAMING_SNAKE_CASE_ = decoder_seq_length + 3
SCREAMING_SNAKE_CASE_ = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
SCREAMING_SNAKE_CASE_ = torch.cat(
[common_inputs["decoder_attention_mask"], torch.ones(__magic_name__ , __magic_name__ )] , dim=1 )
SCREAMING_SNAKE_CASE_ = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.num_layers
SCREAMING_SNAKE_CASE_ = min(__magic_name__ , __magic_name__ )
SCREAMING_SNAKE_CASE_ = max(__magic_name__ , __magic_name__ ) - min_num_layers
SCREAMING_SNAKE_CASE_ = "encoder" if num_encoder_layers > num_decoder_layers else "decoder"
for _ in range(__magic_name__ ):
common_inputs["past_key_values"].append(
(
torch.zeros(__magic_name__ ),
torch.zeros(__magic_name__ ),
torch.zeros(__magic_name__ ),
torch.zeros(__magic_name__ ),
) )
# TODO: test this.
SCREAMING_SNAKE_CASE_ = encoder_shape if remaining_side_name == "encoder" else decoder_shape
for _ in range(__magic_name__ , __magic_name__ ):
common_inputs["past_key_values"].append((torch.zeros(__magic_name__ ), torch.zeros(__magic_name__ )) )
return common_inputs
def __A ( self : Union[str, Any] , __magic_name__ : PreTrainedTokenizer , __magic_name__ : int = -1 , __magic_name__ : int = -1 , __magic_name__ : bool = False , __magic_name__ : Optional[TensorType] = None , ) -> Mapping[str, Any]:
SCREAMING_SNAKE_CASE_ = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
SCREAMING_SNAKE_CASE_ = seqlen + 2
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.num_layers
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.num_attention_heads
SCREAMING_SNAKE_CASE_ = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
SCREAMING_SNAKE_CASE_ = common_inputs["attention_mask"].dtype
SCREAMING_SNAKE_CASE_ = torch.cat(
[common_inputs["attention_mask"], torch.ones(__magic_name__ , __magic_name__ , dtype=__magic_name__ )] , dim=1 )
SCREAMING_SNAKE_CASE_ = [
(torch.zeros(__magic_name__ ), torch.zeros(__magic_name__ )) for _ in range(__magic_name__ )
]
return common_inputs
def __A ( self : Dict , __magic_name__ : PreTrainedTokenizer , __magic_name__ : int = -1 , __magic_name__ : int = -1 , __magic_name__ : bool = False , __magic_name__ : Optional[TensorType] = None , ) -> Mapping[str, Any]:
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
SCREAMING_SNAKE_CASE_ = compute_effective_axis_dimension(
__magic_name__ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
SCREAMING_SNAKE_CASE_ = tokenizer.num_special_tokens_to_add(__magic_name__ )
SCREAMING_SNAKE_CASE_ = compute_effective_axis_dimension(
__magic_name__ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__magic_name__ )
# Generate dummy inputs according to compute batch and sequence
SCREAMING_SNAKE_CASE_ = [" ".join([tokenizer.unk_token] ) * seq_length] * batch_size
SCREAMING_SNAKE_CASE_ = dict(tokenizer(__magic_name__ , return_tensors=__magic_name__ ) )
return common_inputs
def __A ( self : Optional[Any] , __magic_name__ : PreTrainedTokenizer , __magic_name__ : int = -1 , __magic_name__ : int = -1 , __magic_name__ : bool = False , __magic_name__ : Optional[TensorType] = None , ) -> Mapping[str, Any]:
if self.task in ["default", "seq2seq-lm"]:
SCREAMING_SNAKE_CASE_ = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
__magic_name__ , batch_size=__magic_name__ , seq_length=__magic_name__ , is_pair=__magic_name__ , framework=__magic_name__ )
elif self.task == "causal-lm":
SCREAMING_SNAKE_CASE_ = self._generate_dummy_inputs_for_causal_lm(
__magic_name__ , batch_size=__magic_name__ , seq_length=__magic_name__ , is_pair=__magic_name__ , framework=__magic_name__ )
else:
SCREAMING_SNAKE_CASE_ = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__magic_name__ , batch_size=__magic_name__ , seq_length=__magic_name__ , is_pair=__magic_name__ , framework=__magic_name__ )
return common_inputs
def __A ( self : Optional[Any] , __magic_name__ : str , __magic_name__ : List[Any] , __magic_name__ : str , __magic_name__ : List[str] ) -> List[str]:
if self.task in ["default", "seq2seq-lm"]:
SCREAMING_SNAKE_CASE_ = super()._flatten_past_key_values_(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
else:
SCREAMING_SNAKE_CASE_ = super(__magic_name__ , self )._flatten_past_key_values_(
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
| 305
| 1
|
import enum
import warnings
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
A : str = logging.get_logger(__name__)
class lowerCamelCase (enum.Enum ):
"""simple docstring"""
lowerCamelCase__ = 0
lowerCamelCase__ = 1
@add_end_docstrings(SCREAMING_SNAKE_CASE__ )
class lowerCamelCase (SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
lowerCamelCase__ = '''generated'''
def __init__( self : int , *__magic_name__ : Optional[Any] , **__magic_name__ : Any ) -> int:
super().__init__(*__magic_name__ , **__magic_name__ )
self.check_model_type(
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if self.framework == "tf"
else MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING )
def __A ( self : str , __magic_name__ : Optional[int]=None , __magic_name__ : Optional[Any]=None , __magic_name__ : Tuple=None , __magic_name__ : List[str]=None , __magic_name__ : Optional[Any]=None , __magic_name__ : Optional[int]=None , **__magic_name__ : Union[str, Any] , ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = {}
if truncation is not None:
SCREAMING_SNAKE_CASE_ = truncation
SCREAMING_SNAKE_CASE_ = generate_kwargs
SCREAMING_SNAKE_CASE_ = {}
if return_tensors is not None and return_type is None:
SCREAMING_SNAKE_CASE_ = ReturnType.TENSORS if return_tensors else ReturnType.TEXT
if return_type is not None:
SCREAMING_SNAKE_CASE_ = return_type
if clean_up_tokenization_spaces is not None:
SCREAMING_SNAKE_CASE_ = clean_up_tokenization_spaces
if stop_sequence is not None:
SCREAMING_SNAKE_CASE_ = self.tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ )
if len(__magic_name__ ) > 1:
warnings.warn(
"Stopping on a multiple token sequence is not yet supported on transformers. The first token of"
" the stop sequence will be used as the stop sequence string in the interim." )
SCREAMING_SNAKE_CASE_ = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def __A ( self : Optional[int] , __magic_name__ : int , __magic_name__ : int , __magic_name__ : int ) -> Union[str, Any]:
return True
def __A ( self : List[Any] , *__magic_name__ : str , __magic_name__ : Any ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = self.model.config.prefix if self.model.config.prefix is not None else ""
if isinstance(args[0] , __magic_name__ ):
if self.tokenizer.pad_token_id is None:
raise ValueError("Please make sure that the tokenizer has a pad_token_id when using a batch input" )
SCREAMING_SNAKE_CASE_ = ([prefix + arg for arg in args[0]],)
SCREAMING_SNAKE_CASE_ = True
elif isinstance(args[0] , __magic_name__ ):
SCREAMING_SNAKE_CASE_ = (prefix + args[0],)
SCREAMING_SNAKE_CASE_ = False
else:
raise ValueError(
F''' `args[0]`: {args[0]} have the wrong format. The should be either of type `str` or type `list`''' )
SCREAMING_SNAKE_CASE_ = self.tokenizer(*__magic_name__ , padding=__magic_name__ , truncation=__magic_name__ , return_tensors=self.framework )
# This is produced by tokenizers but is an invalid generate kwargs
if "token_type_ids" in inputs:
del inputs["token_type_ids"]
return inputs
def __call__( self : List[Any] , *__magic_name__ : Optional[int] , **__magic_name__ : str ) -> Tuple:
SCREAMING_SNAKE_CASE_ = super().__call__(*__magic_name__ , **__magic_name__ )
if (
isinstance(args[0] , __magic_name__ )
and all(isinstance(__magic_name__ , __magic_name__ ) for el in args[0] )
and all(len(__magic_name__ ) == 1 for res in result )
):
return [res[0] for res in result]
return result
def __A ( self : List[Any] , __magic_name__ : List[Any] , __magic_name__ : str=TruncationStrategy.DO_NOT_TRUNCATE , **__magic_name__ : Optional[Any] ) -> Dict:
SCREAMING_SNAKE_CASE_ = self._parse_and_tokenize(__magic_name__ , truncation=__magic_name__ , **__magic_name__ )
return inputs
def __A ( self : Any , __magic_name__ : Optional[int] , **__magic_name__ : List[str] ) -> int:
if self.framework == "pt":
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = model_inputs["input_ids"].shape
elif self.framework == "tf":
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = tf.shape(model_inputs["input_ids"] ).numpy()
SCREAMING_SNAKE_CASE_ = generate_kwargs.get("min_length" , self.model.config.min_length )
SCREAMING_SNAKE_CASE_ = generate_kwargs.get("max_length" , self.model.config.max_length )
self.check_inputs(__magic_name__ , generate_kwargs["min_length"] , generate_kwargs["max_length"] )
SCREAMING_SNAKE_CASE_ = self.model.generate(**__magic_name__ , **__magic_name__ )
SCREAMING_SNAKE_CASE_ = output_ids.shape[0]
if self.framework == "pt":
SCREAMING_SNAKE_CASE_ = output_ids.reshape(__magic_name__ , out_b // in_b , *output_ids.shape[1:] )
elif self.framework == "tf":
SCREAMING_SNAKE_CASE_ = tf.reshape(__magic_name__ , (in_b, out_b // in_b, *output_ids.shape[1:]) )
return {"output_ids": output_ids}
def __A ( self : Union[str, Any] , __magic_name__ : str , __magic_name__ : List[str]=ReturnType.TEXT , __magic_name__ : Optional[int]=False ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = []
for output_ids in model_outputs["output_ids"][0]:
if return_type == ReturnType.TENSORS:
SCREAMING_SNAKE_CASE_ = {F'''{self.return_name}_token_ids''': output_ids}
elif return_type == ReturnType.TEXT:
SCREAMING_SNAKE_CASE_ = {
F'''{self.return_name}_text''': self.tokenizer.decode(
__magic_name__ , skip_special_tokens=__magic_name__ , clean_up_tokenization_spaces=__magic_name__ , )
}
records.append(__magic_name__ )
return records
@add_end_docstrings(SCREAMING_SNAKE_CASE__ )
class lowerCamelCase (SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
lowerCamelCase__ = '''summary'''
def __call__( self : str , *__magic_name__ : List[str] , **__magic_name__ : int ) -> Tuple:
return super().__call__(*__magic_name__ , **__magic_name__ )
def __A ( self : Any , __magic_name__ : int , __magic_name__ : int , __magic_name__ : int ) -> bool:
if max_length < min_length:
logger.warning(F'''Your min_length={min_length} must be inferior than your max_length={max_length}.''' )
if input_length < max_length:
logger.warning(
F'''Your max_length is set to {max_length}, but your input_length is only {input_length}. Since this is '''
"a summarization task, where outputs shorter than the input are typically wanted, you might "
F'''consider decreasing max_length manually, e.g. summarizer(\'...\', max_length={input_length//2})''' )
@add_end_docstrings(SCREAMING_SNAKE_CASE__ )
class lowerCamelCase (SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
lowerCamelCase__ = '''translation'''
def __A ( self : Tuple , __magic_name__ : int , __magic_name__ : int , __magic_name__ : int ) -> str:
if input_length > 0.9 * max_length:
logger.warning(
F'''Your input_length: {input_length} is bigger than 0.9 * max_length: {max_length}. You might consider '''
"increasing your max_length manually, e.g. translator('...', max_length=400)" )
return True
def __A ( self : List[str] , *__magic_name__ : Tuple , __magic_name__ : Union[str, Any]=TruncationStrategy.DO_NOT_TRUNCATE , __magic_name__ : Union[str, Any]=None , __magic_name__ : Optional[int]=None ) -> Tuple:
if getattr(self.tokenizer , "_build_translation_inputs" , __magic_name__ ):
return self.tokenizer._build_translation_inputs(
*__magic_name__ , return_tensors=self.framework , truncation=__magic_name__ , src_lang=__magic_name__ , tgt_lang=__magic_name__ )
else:
return super()._parse_and_tokenize(*__magic_name__ , truncation=__magic_name__ )
def __A ( self : Dict , __magic_name__ : Union[str, Any]=None , __magic_name__ : Optional[Any]=None , **__magic_name__ : str ) -> List[Any]:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = super()._sanitize_parameters(**__magic_name__ )
if src_lang is not None:
SCREAMING_SNAKE_CASE_ = src_lang
if tgt_lang is not None:
SCREAMING_SNAKE_CASE_ = tgt_lang
if src_lang is None and tgt_lang is None:
# Backward compatibility, direct arguments use is preferred.
SCREAMING_SNAKE_CASE_ = kwargs.get("task" , self.task )
SCREAMING_SNAKE_CASE_ = task.split("_" )
if task and len(__magic_name__ ) == 4:
# translation, XX, to YY
SCREAMING_SNAKE_CASE_ = items[1]
SCREAMING_SNAKE_CASE_ = items[3]
return preprocess_params, forward_params, postprocess_params
def __call__( self : Optional[int] , *__magic_name__ : int , **__magic_name__ : Optional[int] ) -> Dict:
return super().__call__(*__magic_name__ , **__magic_name__ )
| 305
|
import inspect
import unittest
from datasets import load_dataset
from packaging import version
from transformers import BeitConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_MAPPING,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
)
from transformers.models.beit.modeling_beit import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
import PIL
from PIL import Image
from transformers import BeitImageProcessor
class lowerCamelCase :
"""simple docstring"""
def __init__( self : List[Any] , __magic_name__ : List[str] , __magic_name__ : int=100 , __magic_name__ : Optional[Any]=13 , __magic_name__ : Dict=30 , __magic_name__ : Tuple=2 , __magic_name__ : str=3 , __magic_name__ : str=True , __magic_name__ : Optional[int]=True , __magic_name__ : Union[str, Any]=32 , __magic_name__ : Optional[int]=4 , __magic_name__ : Dict=4 , __magic_name__ : Tuple=37 , __magic_name__ : Any="gelu" , __magic_name__ : int=0.1 , __magic_name__ : List[str]=0.1 , __magic_name__ : Optional[int]=10 , __magic_name__ : Tuple=0.02 , __magic_name__ : Optional[int]=3 , __magic_name__ : List[str]=None , __magic_name__ : Tuple=[0, 1, 2, 3] , ) -> List[str]:
SCREAMING_SNAKE_CASE_ = parent
SCREAMING_SNAKE_CASE_ = 100
SCREAMING_SNAKE_CASE_ = batch_size
SCREAMING_SNAKE_CASE_ = image_size
SCREAMING_SNAKE_CASE_ = patch_size
SCREAMING_SNAKE_CASE_ = num_channels
SCREAMING_SNAKE_CASE_ = is_training
SCREAMING_SNAKE_CASE_ = use_labels
SCREAMING_SNAKE_CASE_ = hidden_size
SCREAMING_SNAKE_CASE_ = num_hidden_layers
SCREAMING_SNAKE_CASE_ = num_attention_heads
SCREAMING_SNAKE_CASE_ = intermediate_size
SCREAMING_SNAKE_CASE_ = hidden_act
SCREAMING_SNAKE_CASE_ = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ = type_sequence_label_size
SCREAMING_SNAKE_CASE_ = initializer_range
SCREAMING_SNAKE_CASE_ = scope
SCREAMING_SNAKE_CASE_ = out_indices
SCREAMING_SNAKE_CASE_ = num_labels
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
SCREAMING_SNAKE_CASE_ = (image_size // patch_size) ** 2
SCREAMING_SNAKE_CASE_ = num_patches + 1
def __A ( self : Any ) -> int:
SCREAMING_SNAKE_CASE_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = None
if self.use_labels:
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
SCREAMING_SNAKE_CASE_ = self.get_config()
return config, pixel_values, labels, pixel_labels
def __A ( self : Dict ) -> Optional[int]:
return BeitConfig(
vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__magic_name__ , initializer_range=self.initializer_range , out_indices=self.out_indices , )
def __A ( self : Optional[int] , __magic_name__ : List[str] , __magic_name__ : Optional[int] , __magic_name__ : Tuple , __magic_name__ : Tuple ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = BeitModel(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
SCREAMING_SNAKE_CASE_ = model(__magic_name__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __A ( self : Union[str, Any] , __magic_name__ : List[str] , __magic_name__ : int , __magic_name__ : int , __magic_name__ : str ) -> int:
SCREAMING_SNAKE_CASE_ = BeitForMaskedImageModeling(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
SCREAMING_SNAKE_CASE_ = model(__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) )
def __A ( self : Dict , __magic_name__ : List[Any] , __magic_name__ : int , __magic_name__ : Tuple , __magic_name__ : Optional[Any] ) -> int:
SCREAMING_SNAKE_CASE_ = self.type_sequence_label_size
SCREAMING_SNAKE_CASE_ = BeitForImageClassification(__magic_name__ )
model.to(__magic_name__ )
model.eval()
SCREAMING_SNAKE_CASE_ = model(__magic_name__ , labels=__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = BeitForImageClassification(__magic_name__ )
model.to(__magic_name__ )
model.eval()
SCREAMING_SNAKE_CASE_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE_ = model(__magic_name__ , labels=__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __A ( self : Tuple , __magic_name__ : Any , __magic_name__ : Optional[Any] , __magic_name__ : Tuple , __magic_name__ : int ) -> int:
SCREAMING_SNAKE_CASE_ = self.num_labels
SCREAMING_SNAKE_CASE_ = BeitForSemanticSegmentation(__magic_name__ )
model.to(__magic_name__ )
model.eval()
SCREAMING_SNAKE_CASE_ = model(__magic_name__ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
SCREAMING_SNAKE_CASE_ = model(__magic_name__ , labels=__magic_name__ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
def __A ( self : str ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = config_and_inputs
SCREAMING_SNAKE_CASE_ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class lowerCamelCase (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase__ = (
(BeitModel, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation)
if is_torch_available()
else ()
)
lowerCamelCase__ = (
{
'''feature-extraction''': BeitModel,
'''image-classification''': BeitForImageClassification,
'''image-segmentation''': BeitForSemanticSegmentation,
}
if is_torch_available()
else {}
)
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
def __A ( self : Tuple ) -> Any:
SCREAMING_SNAKE_CASE_ = BeitModelTester(self )
SCREAMING_SNAKE_CASE_ = ConfigTester(self , config_class=__magic_name__ , has_text_modality=__magic_name__ , hidden_size=37 )
def __A ( self : Dict ) -> List[Any]:
self.config_tester.run_common_tests()
@unittest.skip(reason="BEiT does not use inputs_embeds" )
def __A ( self : List[str] ) -> Optional[Any]:
pass
@require_torch_multi_gpu
@unittest.skip(reason="BEiT has some layers using `add_module` which doesn't work well with `nn.DataParallel`" )
def __A ( self : str ) -> List[str]:
pass
def __A ( self : List[Any] ) -> List[str]:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ = model_class(__magic_name__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
SCREAMING_SNAKE_CASE_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__magic_name__ , nn.Linear ) )
def __A ( self : Union[str, Any] ) -> int:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ = model_class(__magic_name__ )
SCREAMING_SNAKE_CASE_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE_ = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE_ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __magic_name__ )
def __A ( self : Tuple ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__magic_name__ )
def __A ( self : Dict ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__magic_name__ )
def __A ( self : Optional[int] ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__magic_name__ )
def __A ( self : Optional[Any] ) -> List[str]:
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__magic_name__ )
def __A ( self : int ) -> Optional[int]:
if not self.model_tester.is_training:
return
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if model_class in [*get_values(__magic_name__ ), BeitForMaskedImageModeling]:
continue
SCREAMING_SNAKE_CASE_ = model_class(__magic_name__ )
model.to(__magic_name__ )
model.train()
SCREAMING_SNAKE_CASE_ = self._prepare_for_class(__magic_name__ , __magic_name__ , return_labels=__magic_name__ )
SCREAMING_SNAKE_CASE_ = model(**__magic_name__ ).loss
loss.backward()
def __A ( self : Any ) -> Tuple:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if (
model_class in [*get_values(__magic_name__ ), BeitForMaskedImageModeling]
or not model_class.supports_gradient_checkpointing
):
continue
SCREAMING_SNAKE_CASE_ = model_class(__magic_name__ )
model.gradient_checkpointing_enable()
model.to(__magic_name__ )
model.train()
SCREAMING_SNAKE_CASE_ = self._prepare_for_class(__magic_name__ , __magic_name__ , return_labels=__magic_name__ )
SCREAMING_SNAKE_CASE_ = model(**__magic_name__ ).loss
loss.backward()
def __A ( self : List[str] ) -> str:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ = _config_zero_init(__magic_name__ )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ = model_class(config=__magic_name__ )
for name, param in model.named_parameters():
# we skip lambda parameters as these require special initial values
# determined by config.layer_scale_init_value
if "lambda" in name:
continue
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@slow
def __A ( self : int ) -> Optional[int]:
for model_name in BEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE_ = BeitModel.from_pretrained(__magic_name__ )
self.assertIsNotNone(__magic_name__ )
def a__ ( ):
SCREAMING_SNAKE_CASE_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class lowerCamelCase (unittest.TestCase ):
"""simple docstring"""
@cached_property
def __A ( self : List[Any] ) -> str:
return BeitImageProcessor.from_pretrained("microsoft/beit-base-patch16-224" ) if is_vision_available() else None
@slow
def __A ( self : List[str] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = BeitForMaskedImageModeling.from_pretrained("microsoft/beit-base-patch16-224-pt22k" ).to(__magic_name__ )
SCREAMING_SNAKE_CASE_ = self.default_image_processor
SCREAMING_SNAKE_CASE_ = prepare_img()
SCREAMING_SNAKE_CASE_ = image_processor(images=__magic_name__ , return_tensors="pt" ).pixel_values.to(__magic_name__ )
# prepare bool_masked_pos
SCREAMING_SNAKE_CASE_ = torch.ones((1, 196) , dtype=torch.bool ).to(__magic_name__ )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = model(pixel_values=__magic_name__ , bool_masked_pos=__magic_name__ )
SCREAMING_SNAKE_CASE_ = outputs.logits
# verify the logits
SCREAMING_SNAKE_CASE_ = torch.Size((1, 196, 8_192) )
self.assertEqual(logits.shape , __magic_name__ )
SCREAMING_SNAKE_CASE_ = torch.tensor(
[[-3.2437, 0.5072, -13.9174], [-3.2456, 0.4948, -13.9401], [-3.2033, 0.5121, -13.8550]] ).to(__magic_name__ )
self.assertTrue(torch.allclose(logits[bool_masked_pos][:3, :3] , __magic_name__ , atol=1e-2 ) )
@slow
def __A ( self : Tuple ) -> int:
SCREAMING_SNAKE_CASE_ = BeitForImageClassification.from_pretrained("microsoft/beit-base-patch16-224" ).to(__magic_name__ )
SCREAMING_SNAKE_CASE_ = self.default_image_processor
SCREAMING_SNAKE_CASE_ = prepare_img()
SCREAMING_SNAKE_CASE_ = image_processor(images=__magic_name__ , return_tensors="pt" ).to(__magic_name__ )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = model(**__magic_name__ )
SCREAMING_SNAKE_CASE_ = outputs.logits
# verify the logits
SCREAMING_SNAKE_CASE_ = torch.Size((1, 1_000) )
self.assertEqual(logits.shape , __magic_name__ )
SCREAMING_SNAKE_CASE_ = torch.tensor([-1.2385, -1.0987, -1.0108] ).to(__magic_name__ )
self.assertTrue(torch.allclose(logits[0, :3] , __magic_name__ , atol=1e-4 ) )
SCREAMING_SNAKE_CASE_ = 281
self.assertEqual(logits.argmax(-1 ).item() , __magic_name__ )
@slow
def __A ( self : Optional[Any] ) -> int:
SCREAMING_SNAKE_CASE_ = BeitForImageClassification.from_pretrained("microsoft/beit-large-patch16-224-pt22k-ft22k" ).to(
__magic_name__ )
SCREAMING_SNAKE_CASE_ = self.default_image_processor
SCREAMING_SNAKE_CASE_ = prepare_img()
SCREAMING_SNAKE_CASE_ = image_processor(images=__magic_name__ , return_tensors="pt" ).to(__magic_name__ )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = model(**__magic_name__ )
SCREAMING_SNAKE_CASE_ = outputs.logits
# verify the logits
SCREAMING_SNAKE_CASE_ = torch.Size((1, 21_841) )
self.assertEqual(logits.shape , __magic_name__ )
SCREAMING_SNAKE_CASE_ = torch.tensor([1.6881, -0.2787, 0.5901] ).to(__magic_name__ )
self.assertTrue(torch.allclose(logits[0, :3] , __magic_name__ , atol=1e-4 ) )
SCREAMING_SNAKE_CASE_ = 2_396
self.assertEqual(logits.argmax(-1 ).item() , __magic_name__ )
@slow
def __A ( self : Tuple ) -> Tuple:
SCREAMING_SNAKE_CASE_ = BeitForSemanticSegmentation.from_pretrained("microsoft/beit-base-finetuned-ade-640-640" )
SCREAMING_SNAKE_CASE_ = model.to(__magic_name__ )
SCREAMING_SNAKE_CASE_ = BeitImageProcessor(do_resize=__magic_name__ , size=640 , do_center_crop=__magic_name__ )
SCREAMING_SNAKE_CASE_ = load_dataset("hf-internal-testing/fixtures_ade20k" , split="test" )
SCREAMING_SNAKE_CASE_ = Image.open(ds[0]["file"] )
SCREAMING_SNAKE_CASE_ = image_processor(images=__magic_name__ , return_tensors="pt" ).to(__magic_name__ )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = model(**__magic_name__ )
SCREAMING_SNAKE_CASE_ = outputs.logits
# verify the logits
SCREAMING_SNAKE_CASE_ = torch.Size((1, 150, 160, 160) )
self.assertEqual(logits.shape , __magic_name__ )
SCREAMING_SNAKE_CASE_ = version.parse(PIL.__version__ ) < version.parse("9.0.0" )
if is_pillow_less_than_a:
SCREAMING_SNAKE_CASE_ = torch.tensor(
[
[[-4.9225, -2.3954, -3.0522], [-2.8822, -1.0046, -1.7561], [-2.9549, -1.3228, -2.1347]],
[[-5.8168, -3.4129, -4.0778], [-3.8651, -2.2214, -3.0277], [-3.8356, -2.4643, -3.3535]],
[[-0.0078, 3.9952, 4.0754], [2.9856, 4.6944, 5.0035], [3.2413, 4.7813, 4.9969]],
] , device=__magic_name__ , )
else:
SCREAMING_SNAKE_CASE_ = torch.tensor(
[
[[-4.8960, -2.3688, -3.0355], [-2.8478, -0.9836, -1.7418], [-2.9449, -1.3332, -2.1456]],
[[-5.8081, -3.4124, -4.1006], [-3.8561, -2.2081, -3.0323], [-3.8365, -2.4601, -3.3669]],
[[-0.0309, 3.9868, 4.0540], [2.9640, 4.6877, 4.9976], [3.2081, 4.7690, 4.9942]],
] , device=__magic_name__ , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , __magic_name__ , atol=1e-4 ) )
@slow
def __A ( self : List[str] ) -> Tuple:
SCREAMING_SNAKE_CASE_ = BeitForSemanticSegmentation.from_pretrained("microsoft/beit-base-finetuned-ade-640-640" )
SCREAMING_SNAKE_CASE_ = model.to(__magic_name__ )
SCREAMING_SNAKE_CASE_ = BeitImageProcessor(do_resize=__magic_name__ , size=640 , do_center_crop=__magic_name__ )
SCREAMING_SNAKE_CASE_ = load_dataset("hf-internal-testing/fixtures_ade20k" , split="test" )
SCREAMING_SNAKE_CASE_ = Image.open(ds[0]["file"] )
SCREAMING_SNAKE_CASE_ = image_processor(images=__magic_name__ , return_tensors="pt" ).to(__magic_name__ )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = model(**__magic_name__ )
SCREAMING_SNAKE_CASE_ = outputs.logits.detach().cpu()
SCREAMING_SNAKE_CASE_ = image_processor.post_process_semantic_segmentation(outputs=__magic_name__ , target_sizes=[(500, 300)] )
SCREAMING_SNAKE_CASE_ = torch.Size((500, 300) )
self.assertEqual(segmentation[0].shape , __magic_name__ )
SCREAMING_SNAKE_CASE_ = image_processor.post_process_semantic_segmentation(outputs=__magic_name__ )
SCREAMING_SNAKE_CASE_ = torch.Size((160, 160) )
self.assertEqual(segmentation[0].shape , __magic_name__ )
| 305
| 1
|
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
ConditionalDetrConfig,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
A : int = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
A : Union[str, Any] = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f"transformer.encoder.layers.{i}.self_attn.out_proj.weight", f"encoder.layers.{i}.self_attn.out_proj.weight")
)
rename_keys.append(
(f"transformer.encoder.layers.{i}.self_attn.out_proj.bias", f"encoder.layers.{i}.self_attn.out_proj.bias")
)
rename_keys.append((f"transformer.encoder.layers.{i}.linear1.weight", f"encoder.layers.{i}.fc1.weight"))
rename_keys.append((f"transformer.encoder.layers.{i}.linear1.bias", f"encoder.layers.{i}.fc1.bias"))
rename_keys.append((f"transformer.encoder.layers.{i}.linear2.weight", f"encoder.layers.{i}.fc2.weight"))
rename_keys.append((f"transformer.encoder.layers.{i}.linear2.bias", f"encoder.layers.{i}.fc2.bias"))
rename_keys.append(
(f"transformer.encoder.layers.{i}.norm1.weight", f"encoder.layers.{i}.self_attn_layer_norm.weight")
)
rename_keys.append((f"transformer.encoder.layers.{i}.norm1.bias", f"encoder.layers.{i}.self_attn_layer_norm.bias"))
rename_keys.append((f"transformer.encoder.layers.{i}.norm2.weight", f"encoder.layers.{i}.final_layer_norm.weight"))
rename_keys.append((f"transformer.encoder.layers.{i}.norm2.bias", f"encoder.layers.{i}.final_layer_norm.bias"))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(f"transformer.decoder.layers.{i}.self_attn.out_proj.weight", f"decoder.layers.{i}.self_attn.out_proj.weight")
)
rename_keys.append(
(f"transformer.decoder.layers.{i}.self_attn.out_proj.bias", f"decoder.layers.{i}.self_attn.out_proj.bias")
)
rename_keys.append(
(
f"transformer.decoder.layers.{i}.cross_attn.out_proj.weight",
f"decoder.layers.{i}.encoder_attn.out_proj.weight",
)
)
rename_keys.append(
(
f"transformer.decoder.layers.{i}.cross_attn.out_proj.bias",
f"decoder.layers.{i}.encoder_attn.out_proj.bias",
)
)
rename_keys.append((f"transformer.decoder.layers.{i}.linear1.weight", f"decoder.layers.{i}.fc1.weight"))
rename_keys.append((f"transformer.decoder.layers.{i}.linear1.bias", f"decoder.layers.{i}.fc1.bias"))
rename_keys.append((f"transformer.decoder.layers.{i}.linear2.weight", f"decoder.layers.{i}.fc2.weight"))
rename_keys.append((f"transformer.decoder.layers.{i}.linear2.bias", f"decoder.layers.{i}.fc2.bias"))
rename_keys.append(
(f"transformer.decoder.layers.{i}.norm1.weight", f"decoder.layers.{i}.self_attn_layer_norm.weight")
)
rename_keys.append((f"transformer.decoder.layers.{i}.norm1.bias", f"decoder.layers.{i}.self_attn_layer_norm.bias"))
rename_keys.append(
(f"transformer.decoder.layers.{i}.norm2.weight", f"decoder.layers.{i}.encoder_attn_layer_norm.weight")
)
rename_keys.append(
(f"transformer.decoder.layers.{i}.norm2.bias", f"decoder.layers.{i}.encoder_attn_layer_norm.bias")
)
rename_keys.append((f"transformer.decoder.layers.{i}.norm3.weight", f"decoder.layers.{i}.final_layer_norm.weight"))
rename_keys.append((f"transformer.decoder.layers.{i}.norm3.bias", f"decoder.layers.{i}.final_layer_norm.bias"))
# q, k, v projections in self/cross-attention in decoder for conditional DETR
rename_keys.append(
(f"transformer.decoder.layers.{i}.sa_qcontent_proj.weight", f"decoder.layers.{i}.sa_qcontent_proj.weight")
)
rename_keys.append(
(f"transformer.decoder.layers.{i}.sa_kcontent_proj.weight", f"decoder.layers.{i}.sa_kcontent_proj.weight")
)
rename_keys.append(
(f"transformer.decoder.layers.{i}.sa_qpos_proj.weight", f"decoder.layers.{i}.sa_qpos_proj.weight")
)
rename_keys.append(
(f"transformer.decoder.layers.{i}.sa_kpos_proj.weight", f"decoder.layers.{i}.sa_kpos_proj.weight")
)
rename_keys.append((f"transformer.decoder.layers.{i}.sa_v_proj.weight", f"decoder.layers.{i}.sa_v_proj.weight"))
rename_keys.append(
(f"transformer.decoder.layers.{i}.ca_qcontent_proj.weight", f"decoder.layers.{i}.ca_qcontent_proj.weight")
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight"))
rename_keys.append(
(f"transformer.decoder.layers.{i}.ca_kcontent_proj.weight", f"decoder.layers.{i}.ca_kcontent_proj.weight")
)
rename_keys.append(
(f"transformer.decoder.layers.{i}.ca_kpos_proj.weight", f"decoder.layers.{i}.ca_kpos_proj.weight")
)
rename_keys.append((f"transformer.decoder.layers.{i}.ca_v_proj.weight", f"decoder.layers.{i}.ca_v_proj.weight"))
rename_keys.append(
(f"transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight", f"decoder.layers.{i}.ca_qpos_sine_proj.weight")
)
rename_keys.append(
(f"transformer.decoder.layers.{i}.sa_qcontent_proj.bias", f"decoder.layers.{i}.sa_qcontent_proj.bias")
)
rename_keys.append(
(f"transformer.decoder.layers.{i}.sa_kcontent_proj.bias", f"decoder.layers.{i}.sa_kcontent_proj.bias")
)
rename_keys.append((f"transformer.decoder.layers.{i}.sa_qpos_proj.bias", f"decoder.layers.{i}.sa_qpos_proj.bias"))
rename_keys.append((f"transformer.decoder.layers.{i}.sa_kpos_proj.bias", f"decoder.layers.{i}.sa_kpos_proj.bias"))
rename_keys.append((f"transformer.decoder.layers.{i}.sa_v_proj.bias", f"decoder.layers.{i}.sa_v_proj.bias"))
rename_keys.append(
(f"transformer.decoder.layers.{i}.ca_qcontent_proj.bias", f"decoder.layers.{i}.ca_qcontent_proj.bias")
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias"))
rename_keys.append(
(f"transformer.decoder.layers.{i}.ca_kcontent_proj.bias", f"decoder.layers.{i}.ca_kcontent_proj.bias")
)
rename_keys.append((f"transformer.decoder.layers.{i}.ca_kpos_proj.bias", f"decoder.layers.{i}.ca_kpos_proj.bias"))
rename_keys.append((f"transformer.decoder.layers.{i}.ca_v_proj.bias", f"decoder.layers.{i}.ca_v_proj.bias"))
rename_keys.append(
(f"transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias", f"decoder.layers.{i}.ca_qpos_sine_proj.bias")
)
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
# for conditional DETR, also convert reference point head and query scale MLP
rename_keys.extend(
[
("input_proj.weight", "input_projection.weight"),
("input_proj.bias", "input_projection.bias"),
("query_embed.weight", "query_position_embeddings.weight"),
("transformer.decoder.norm.weight", "decoder.layernorm.weight"),
("transformer.decoder.norm.bias", "decoder.layernorm.bias"),
("class_embed.weight", "class_labels_classifier.weight"),
("class_embed.bias", "class_labels_classifier.bias"),
("bbox_embed.layers.0.weight", "bbox_predictor.layers.0.weight"),
("bbox_embed.layers.0.bias", "bbox_predictor.layers.0.bias"),
("bbox_embed.layers.1.weight", "bbox_predictor.layers.1.weight"),
("bbox_embed.layers.1.bias", "bbox_predictor.layers.1.bias"),
("bbox_embed.layers.2.weight", "bbox_predictor.layers.2.weight"),
("bbox_embed.layers.2.bias", "bbox_predictor.layers.2.bias"),
("transformer.decoder.ref_point_head.layers.0.weight", "decoder.ref_point_head.layers.0.weight"),
("transformer.decoder.ref_point_head.layers.0.bias", "decoder.ref_point_head.layers.0.bias"),
("transformer.decoder.ref_point_head.layers.1.weight", "decoder.ref_point_head.layers.1.weight"),
("transformer.decoder.ref_point_head.layers.1.bias", "decoder.ref_point_head.layers.1.bias"),
("transformer.decoder.query_scale.layers.0.weight", "decoder.query_scale.layers.0.weight"),
("transformer.decoder.query_scale.layers.0.bias", "decoder.query_scale.layers.0.bias"),
("transformer.decoder.query_scale.layers.1.weight", "decoder.query_scale.layers.1.weight"),
("transformer.decoder.query_scale.layers.1.bias", "decoder.query_scale.layers.1.bias"),
("transformer.decoder.layers.0.ca_qpos_proj.weight", "decoder.layers.0.ca_qpos_proj.weight"),
("transformer.decoder.layers.0.ca_qpos_proj.bias", "decoder.layers.0.ca_qpos_proj.bias"),
]
)
def a__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = state_dict.pop(__UpperCamelCase )
SCREAMING_SNAKE_CASE_ = val
def a__ ( __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
SCREAMING_SNAKE_CASE_ = key.replace("backbone.0.body" , "backbone.conv_encoder.model" )
SCREAMING_SNAKE_CASE_ = value
else:
SCREAMING_SNAKE_CASE_ = value
return new_state_dict
def a__ ( __UpperCamelCase , __UpperCamelCase=False ):
SCREAMING_SNAKE_CASE_ = ""
if is_panoptic:
SCREAMING_SNAKE_CASE_ = "conditional_detr."
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
SCREAMING_SNAKE_CASE_ = state_dict.pop(F'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight''' )
SCREAMING_SNAKE_CASE_ = state_dict.pop(F'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE_ = in_proj_weight[:2_5_6, :]
SCREAMING_SNAKE_CASE_ = in_proj_bias[:2_5_6]
SCREAMING_SNAKE_CASE_ = in_proj_weight[2_5_6:5_1_2, :]
SCREAMING_SNAKE_CASE_ = in_proj_bias[2_5_6:5_1_2]
SCREAMING_SNAKE_CASE_ = in_proj_weight[-2_5_6:, :]
SCREAMING_SNAKE_CASE_ = in_proj_bias[-2_5_6:]
def a__ ( ):
SCREAMING_SNAKE_CASE_ = "http://images.cocodataset.org/val2017/000000039769.jpg"
SCREAMING_SNAKE_CASE_ = Image.open(requests.get(__UpperCamelCase , stream=__UpperCamelCase ).raw )
return im
@torch.no_grad()
def a__ ( __UpperCamelCase , __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = ConditionalDetrConfig()
# set backbone and dilation attributes
if "resnet101" in model_name:
SCREAMING_SNAKE_CASE_ = "resnet101"
if "dc5" in model_name:
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = "panoptic" in model_name
if is_panoptic:
SCREAMING_SNAKE_CASE_ = 2_5_0
else:
SCREAMING_SNAKE_CASE_ = 9_1
SCREAMING_SNAKE_CASE_ = "huggingface/label-files"
SCREAMING_SNAKE_CASE_ = "coco-detection-id2label.json"
SCREAMING_SNAKE_CASE_ = json.load(open(hf_hub_download(__UpperCamelCase , __UpperCamelCase , repo_type="dataset" ) , "r" ) )
SCREAMING_SNAKE_CASE_ = {int(__UpperCamelCase ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE_ = idalabel
SCREAMING_SNAKE_CASE_ = {v: k for k, v in idalabel.items()}
# load image processor
SCREAMING_SNAKE_CASE_ = "coco_panoptic" if is_panoptic else "coco_detection"
SCREAMING_SNAKE_CASE_ = ConditionalDetrImageProcessor(format=__UpperCamelCase )
# prepare image
SCREAMING_SNAKE_CASE_ = prepare_img()
SCREAMING_SNAKE_CASE_ = image_processor(images=__UpperCamelCase , return_tensors="pt" )
SCREAMING_SNAKE_CASE_ = encoding["pixel_values"]
logger.info(F'''Converting model {model_name}...''' )
# load original model from torch hub
SCREAMING_SNAKE_CASE_ = torch.hub.load("DeppMeng/ConditionalDETR" , __UpperCamelCase , pretrained=__UpperCamelCase ).eval()
SCREAMING_SNAKE_CASE_ = conditional_detr.state_dict()
# rename keys
for src, dest in rename_keys:
if is_panoptic:
SCREAMING_SNAKE_CASE_ = "conditional_detr." + src
rename_key(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
SCREAMING_SNAKE_CASE_ = rename_backbone_keys(__UpperCamelCase )
# query, key and value matrices need special treatment
read_in_q_k_v(__UpperCamelCase , is_panoptic=__UpperCamelCase )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
SCREAMING_SNAKE_CASE_ = "conditional_detr.model." if is_panoptic else "model."
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith("conditional_detr" )
and not key.startswith("class_labels_classifier" )
and not key.startswith("bbox_predictor" )
):
SCREAMING_SNAKE_CASE_ = state_dict.pop(__UpperCamelCase )
SCREAMING_SNAKE_CASE_ = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
SCREAMING_SNAKE_CASE_ = state_dict.pop(__UpperCamelCase )
SCREAMING_SNAKE_CASE_ = val
elif key.startswith("bbox_attention" ) or key.startswith("mask_head" ):
continue
else:
SCREAMING_SNAKE_CASE_ = state_dict.pop(__UpperCamelCase )
SCREAMING_SNAKE_CASE_ = val
else:
if not key.startswith("class_labels_classifier" ) and not key.startswith("bbox_predictor" ):
SCREAMING_SNAKE_CASE_ = state_dict.pop(__UpperCamelCase )
SCREAMING_SNAKE_CASE_ = val
# finally, create HuggingFace model and load state dict
SCREAMING_SNAKE_CASE_ = ConditionalDetrForSegmentation(__UpperCamelCase ) if is_panoptic else ConditionalDetrForObjectDetection(__UpperCamelCase )
model.load_state_dict(__UpperCamelCase )
model.eval()
model.push_to_hub(repo_id=__UpperCamelCase , organization="DepuMeng" , commit_message="Add model" )
# verify our conversion
SCREAMING_SNAKE_CASE_ = conditional_detr(__UpperCamelCase )
SCREAMING_SNAKE_CASE_ = model(__UpperCamelCase )
assert torch.allclose(outputs.logits , original_outputs["pred_logits"] , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes , original_outputs["pred_boxes"] , atol=1E-4 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs["pred_masks"] , atol=1E-4 )
# Save model and image processor
logger.info(F'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(__UpperCamelCase ).mkdir(exist_ok=__UpperCamelCase )
model.save_pretrained(__UpperCamelCase )
image_processor.save_pretrained(__UpperCamelCase )
if __name__ == "__main__":
A : Optional[Any] = argparse.ArgumentParser()
parser.add_argument(
"--model_name",
default="conditional_detr_resnet50",
type=str,
help="Name of the CONDITIONAL_DETR model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
A : List[Any] = parser.parse_args()
convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 305
|
from __future__ import annotations
A : Dict = "#"
class lowerCamelCase :
"""simple docstring"""
def __init__( self : Dict ) -> None:
SCREAMING_SNAKE_CASE_ = {}
def __A ( self : List[Any] , __magic_name__ : str ) -> None:
SCREAMING_SNAKE_CASE_ = self._trie
for char in text:
if char not in trie:
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = trie[char]
SCREAMING_SNAKE_CASE_ = True
def __A ( self : Union[str, Any] , __magic_name__ : str ) -> tuple | list:
SCREAMING_SNAKE_CASE_ = self._trie
for char in prefix:
if char in trie:
SCREAMING_SNAKE_CASE_ = trie[char]
else:
return []
return self._elements(__magic_name__ )
def __A ( self : int , __magic_name__ : dict ) -> tuple:
SCREAMING_SNAKE_CASE_ = []
for c, v in d.items():
SCREAMING_SNAKE_CASE_ = [" "] if c == END else [(c + s) for s in self._elements(__magic_name__ )]
result.extend(__magic_name__ )
return tuple(__magic_name__ )
A : Union[str, Any] = Trie()
A : Optional[int] = ("depart", "detergent", "daring", "dog", "deer", "deal")
for word in words:
trie.insert_word(word)
def a__ ( __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = trie.find_word(__UpperCamelCase )
return tuple(string + word for word in suffixes )
def a__ ( ):
print(autocomplete_using_trie("de" ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 305
| 1
|
import torch
from diffusers import DDPMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class lowerCamelCase (SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
lowerCamelCase__ = (DDPMParallelScheduler,)
def __A ( self : Dict , **__magic_name__ : Optional[int] ) -> int:
SCREAMING_SNAKE_CASE_ = {
"num_train_timesteps": 1_000,
"beta_start": 0.0001,
"beta_end": 0.02,
"beta_schedule": "linear",
"variance_type": "fixed_small",
"clip_sample": True,
}
config.update(**__magic_name__ )
return config
def __A ( self : Any ) -> Optional[Any]:
for timesteps in [1, 5, 100, 1_000]:
self.check_over_configs(num_train_timesteps=__magic_name__ )
def __A ( self : Optional[int] ) -> str:
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=__magic_name__ , beta_end=__magic_name__ )
def __A ( self : Dict ) -> List[str]:
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=__magic_name__ )
def __A ( self : Dict ) -> Any:
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=__magic_name__ )
def __A ( self : str ) -> Optional[Any]:
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=__magic_name__ )
def __A ( self : Dict ) -> List[Any]:
self.check_over_configs(thresholding=__magic_name__ )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=__magic_name__ , prediction_type=__magic_name__ , sample_max_value=__magic_name__ , )
def __A ( self : Optional[Any] ) -> Any:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=__magic_name__ )
def __A ( self : Any ) -> Tuple:
for t in [0, 500, 999]:
self.check_over_forward(time_step=__magic_name__ )
def __A ( self : str ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE_ = self.get_scheduler_config()
SCREAMING_SNAKE_CASE_ = scheduler_class(**__magic_name__ )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_0979 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.02 ) ) < 1e-5
def __A ( self : str ) -> Any:
SCREAMING_SNAKE_CASE_ = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE_ = self.get_scheduler_config()
SCREAMING_SNAKE_CASE_ = scheduler_class(**__magic_name__ )
SCREAMING_SNAKE_CASE_ = len(__magic_name__ )
SCREAMING_SNAKE_CASE_ = self.dummy_model()
SCREAMING_SNAKE_CASE_ = self.dummy_sample_deter
SCREAMING_SNAKE_CASE_ = self.dummy_sample_deter + 0.1
SCREAMING_SNAKE_CASE_ = self.dummy_sample_deter - 0.1
SCREAMING_SNAKE_CASE_ = samplea.shape[0]
SCREAMING_SNAKE_CASE_ = torch.stack([samplea, samplea, samplea] , dim=0 )
SCREAMING_SNAKE_CASE_ = torch.arange(__magic_name__ )[0:3, None].repeat(1 , __magic_name__ )
SCREAMING_SNAKE_CASE_ = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
SCREAMING_SNAKE_CASE_ = scheduler.batch_step_no_noise(__magic_name__ , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) )
SCREAMING_SNAKE_CASE_ = torch.sum(torch.abs(__magic_name__ ) )
SCREAMING_SNAKE_CASE_ = torch.mean(torch.abs(__magic_name__ ) )
assert abs(result_sum.item() - 1153.1833 ) < 1e-2
assert abs(result_mean.item() - 0.5005 ) < 1e-3
def __A ( self : int ) -> Any:
SCREAMING_SNAKE_CASE_ = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE_ = self.get_scheduler_config()
SCREAMING_SNAKE_CASE_ = scheduler_class(**__magic_name__ )
SCREAMING_SNAKE_CASE_ = len(__magic_name__ )
SCREAMING_SNAKE_CASE_ = self.dummy_model()
SCREAMING_SNAKE_CASE_ = self.dummy_sample_deter
SCREAMING_SNAKE_CASE_ = torch.manual_seed(0 )
for t in reversed(range(__magic_name__ ) ):
# 1. predict noise residual
SCREAMING_SNAKE_CASE_ = model(__magic_name__ , __magic_name__ )
# 2. predict previous mean of sample x_t-1
SCREAMING_SNAKE_CASE_ = scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , generator=__magic_name__ ).prev_sample
SCREAMING_SNAKE_CASE_ = pred_prev_sample
SCREAMING_SNAKE_CASE_ = torch.sum(torch.abs(__magic_name__ ) )
SCREAMING_SNAKE_CASE_ = torch.mean(torch.abs(__magic_name__ ) )
assert abs(result_sum.item() - 258.9606 ) < 1e-2
assert abs(result_mean.item() - 0.3372 ) < 1e-3
def __A ( self : str ) -> Tuple:
SCREAMING_SNAKE_CASE_ = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE_ = self.get_scheduler_config(prediction_type="v_prediction" )
SCREAMING_SNAKE_CASE_ = scheduler_class(**__magic_name__ )
SCREAMING_SNAKE_CASE_ = len(__magic_name__ )
SCREAMING_SNAKE_CASE_ = self.dummy_model()
SCREAMING_SNAKE_CASE_ = self.dummy_sample_deter
SCREAMING_SNAKE_CASE_ = torch.manual_seed(0 )
for t in reversed(range(__magic_name__ ) ):
# 1. predict noise residual
SCREAMING_SNAKE_CASE_ = model(__magic_name__ , __magic_name__ )
# 2. predict previous mean of sample x_t-1
SCREAMING_SNAKE_CASE_ = scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , generator=__magic_name__ ).prev_sample
SCREAMING_SNAKE_CASE_ = pred_prev_sample
SCREAMING_SNAKE_CASE_ = torch.sum(torch.abs(__magic_name__ ) )
SCREAMING_SNAKE_CASE_ = torch.mean(torch.abs(__magic_name__ ) )
assert abs(result_sum.item() - 202.0296 ) < 1e-2
assert abs(result_mean.item() - 0.2631 ) < 1e-3
def __A ( self : int ) -> Tuple:
SCREAMING_SNAKE_CASE_ = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE_ = self.get_scheduler_config()
SCREAMING_SNAKE_CASE_ = scheduler_class(**__magic_name__ )
SCREAMING_SNAKE_CASE_ = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=__magic_name__ )
SCREAMING_SNAKE_CASE_ = scheduler.timesteps
for i, timestep in enumerate(__magic_name__ ):
if i == len(__magic_name__ ) - 1:
SCREAMING_SNAKE_CASE_ = -1
else:
SCREAMING_SNAKE_CASE_ = timesteps[i + 1]
SCREAMING_SNAKE_CASE_ = scheduler.previous_timestep(__magic_name__ )
SCREAMING_SNAKE_CASE_ = prev_t.item()
self.assertEqual(__magic_name__ , __magic_name__ )
def __A ( self : str ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE_ = self.get_scheduler_config()
SCREAMING_SNAKE_CASE_ = scheduler_class(**__magic_name__ )
SCREAMING_SNAKE_CASE_ = [100, 87, 50, 51, 0]
with self.assertRaises(__magic_name__ , msg="`custom_timesteps` must be in descending order." ):
scheduler.set_timesteps(timesteps=__magic_name__ )
def __A ( self : List[Any] ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE_ = self.get_scheduler_config()
SCREAMING_SNAKE_CASE_ = scheduler_class(**__magic_name__ )
SCREAMING_SNAKE_CASE_ = [100, 87, 50, 1, 0]
SCREAMING_SNAKE_CASE_ = len(__magic_name__ )
with self.assertRaises(__magic_name__ , msg="Can only pass one of `num_inference_steps` or `custom_timesteps`." ):
scheduler.set_timesteps(num_inference_steps=__magic_name__ , timesteps=__magic_name__ )
def __A ( self : str ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE_ = self.get_scheduler_config()
SCREAMING_SNAKE_CASE_ = scheduler_class(**__magic_name__ )
SCREAMING_SNAKE_CASE_ = [scheduler.config.num_train_timesteps]
with self.assertRaises(
__magic_name__ , msg="`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}" , ):
scheduler.set_timesteps(timesteps=__magic_name__ )
| 305
|
from collections import deque
class lowerCamelCase :
"""simple docstring"""
def __init__( self : str , __magic_name__ : str , __magic_name__ : int , __magic_name__ : int ) -> None:
SCREAMING_SNAKE_CASE_ = process_name # process name
SCREAMING_SNAKE_CASE_ = arrival_time # arrival time of the process
# completion time of finished process or last interrupted time
SCREAMING_SNAKE_CASE_ = arrival_time
SCREAMING_SNAKE_CASE_ = burst_time # remaining burst time
SCREAMING_SNAKE_CASE_ = 0 # total time of the process wait in ready queue
SCREAMING_SNAKE_CASE_ = 0 # time from arrival time to completion time
class lowerCamelCase :
"""simple docstring"""
def __init__( self : Tuple , __magic_name__ : int , __magic_name__ : list[int] , __magic_name__ : deque[Process] , __magic_name__ : int , ) -> None:
# total number of mlfq's queues
SCREAMING_SNAKE_CASE_ = number_of_queues
# time slice of queues that round robin algorithm applied
SCREAMING_SNAKE_CASE_ = time_slices
# unfinished process is in this ready_queue
SCREAMING_SNAKE_CASE_ = queue
# current time
SCREAMING_SNAKE_CASE_ = current_time
# finished process is in this sequence queue
SCREAMING_SNAKE_CASE_ = deque()
def __A ( self : Dict ) -> list[str]:
SCREAMING_SNAKE_CASE_ = []
for i in range(len(self.finish_queue ) ):
sequence.append(self.finish_queue[i].process_name )
return sequence
def __A ( self : List[str] , __magic_name__ : list[Process] ) -> list[int]:
SCREAMING_SNAKE_CASE_ = []
for i in range(len(__magic_name__ ) ):
waiting_times.append(queue[i].waiting_time )
return waiting_times
def __A ( self : List[str] , __magic_name__ : list[Process] ) -> list[int]:
SCREAMING_SNAKE_CASE_ = []
for i in range(len(__magic_name__ ) ):
turnaround_times.append(queue[i].turnaround_time )
return turnaround_times
def __A ( self : Tuple , __magic_name__ : list[Process] ) -> list[int]:
SCREAMING_SNAKE_CASE_ = []
for i in range(len(__magic_name__ ) ):
completion_times.append(queue[i].stop_time )
return completion_times
def __A ( self : str , __magic_name__ : deque[Process] ) -> list[int]:
return [q.burst_time for q in queue]
def __A ( self : Optional[Any] , __magic_name__ : Process ) -> int:
process.waiting_time += self.current_time - process.stop_time
return process.waiting_time
def __A ( self : Optional[Any] , __magic_name__ : deque[Process] ) -> deque[Process]:
SCREAMING_SNAKE_CASE_ = deque() # sequence deque of finished process
while len(__magic_name__ ) != 0:
SCREAMING_SNAKE_CASE_ = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of current process
self.update_waiting_time(__magic_name__ )
# update current time
self.current_time += cp.burst_time
# finish the process and set the process's burst-time 0
SCREAMING_SNAKE_CASE_ = 0
# set the process's turnaround time because it is finished
SCREAMING_SNAKE_CASE_ = self.current_time - cp.arrival_time
# set the completion time
SCREAMING_SNAKE_CASE_ = self.current_time
# add the process to queue that has finished queue
finished.append(__magic_name__ )
self.finish_queue.extend(__magic_name__ ) # add finished process to finish queue
# FCFS will finish all remaining processes
return finished
def __A ( self : Any , __magic_name__ : deque[Process] , __magic_name__ : int ) -> tuple[deque[Process], deque[Process]]:
SCREAMING_SNAKE_CASE_ = deque() # sequence deque of terminated process
# just for 1 cycle and unfinished processes will go back to queue
for _ in range(len(__magic_name__ ) ):
SCREAMING_SNAKE_CASE_ = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of unfinished processes
self.update_waiting_time(__magic_name__ )
# if the burst time of process is bigger than time-slice
if cp.burst_time > time_slice:
# use CPU for only time-slice
self.current_time += time_slice
# update remaining burst time
cp.burst_time -= time_slice
# update end point time
SCREAMING_SNAKE_CASE_ = self.current_time
# locate the process behind the queue because it is not finished
ready_queue.append(__magic_name__ )
else:
# use CPU for remaining burst time
self.current_time += cp.burst_time
# set burst time 0 because the process is finished
SCREAMING_SNAKE_CASE_ = 0
# set the finish time
SCREAMING_SNAKE_CASE_ = self.current_time
# update the process' turnaround time because it is finished
SCREAMING_SNAKE_CASE_ = self.current_time - cp.arrival_time
# add the process to queue that has finished queue
finished.append(__magic_name__ )
self.finish_queue.extend(__magic_name__ ) # add finished process to finish queue
# return finished processes queue and remaining processes queue
return finished, ready_queue
def __A ( self : Any ) -> deque[Process]:
# all queues except last one have round_robin algorithm
for i in range(self.number_of_queues - 1 ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.round_robin(
self.ready_queue , self.time_slices[i] )
# the last queue has first_come_first_served algorithm
self.first_come_first_served(self.ready_queue )
return self.finish_queue
if __name__ == "__main__":
import doctest
A : Dict = Process("P1", 0, 53)
A : str = Process("P2", 0, 17)
A : List[Any] = Process("P3", 0, 68)
A : List[str] = Process("P4", 0, 24)
A : Dict = 3
A : Any = [17, 25]
A : Dict = deque([Pa, Pa, Pa, Pa])
if len(time_slices) != number_of_queues - 1:
raise SystemExit(0)
doctest.testmod(extraglobs={"queue": deque([Pa, Pa, Pa, Pa])})
A : Union[str, Any] = Process("P1", 0, 53)
A : Any = Process("P2", 0, 17)
A : Dict = Process("P3", 0, 68)
A : List[str] = Process("P4", 0, 24)
A : Optional[int] = 3
A : int = [17, 25]
A : Union[str, Any] = deque([Pa, Pa, Pa, Pa])
A : Tuple = MLFQ(number_of_queues, time_slices, queue, 0)
A : Tuple = mlfq.multi_level_feedback_queue()
# print total waiting times of processes(P1, P2, P3, P4)
print(
f"waiting time:\
\t\t\t{MLFQ.calculate_waiting_time(mlfq, [Pa, Pa, Pa, Pa])}"
)
# print completion times of processes(P1, P2, P3, P4)
print(
f"completion time:\
\t\t{MLFQ.calculate_completion_time(mlfq, [Pa, Pa, Pa, Pa])}"
)
# print total turnaround times of processes(P1, P2, P3, P4)
print(
f"turnaround time:\
\t\t{MLFQ.calculate_turnaround_time(mlfq, [Pa, Pa, Pa, Pa])}"
)
# print sequence of finished processes
print(
f"sequence of finished processes:\
{mlfq.calculate_sequence_of_finish_queue()}"
)
| 305
| 1
|
import math
import qiskit
def a__ ( __UpperCamelCase = 1 , __UpperCamelCase = 1 , __UpperCamelCase = 1 ):
if (
isinstance(__UpperCamelCase , __UpperCamelCase )
or isinstance(__UpperCamelCase , __UpperCamelCase )
or isinstance(__UpperCamelCase , __UpperCamelCase )
):
raise TypeError("inputs must be integers." )
if (input_a < 0) or (input_a < 0) or (carry_in < 0):
raise ValueError("inputs must be positive." )
if (
(math.floor(__UpperCamelCase ) != input_a)
or (math.floor(__UpperCamelCase ) != input_a)
or (math.floor(__UpperCamelCase ) != carry_in)
):
raise ValueError("inputs must be exact integers." )
if (input_a > 2) or (input_a > 2) or (carry_in > 2):
raise ValueError("inputs must be less or equal to 2." )
# build registers
SCREAMING_SNAKE_CASE_ = qiskit.QuantumRegister(4 , "qr" )
SCREAMING_SNAKE_CASE_ = qiskit.ClassicalRegister(2 , "cr" )
# list the entries
SCREAMING_SNAKE_CASE_ = [input_a, input_a, carry_in]
SCREAMING_SNAKE_CASE_ = qiskit.QuantumCircuit(__UpperCamelCase , __UpperCamelCase )
for i in range(0 , 3 ):
if entry[i] == 2:
quantum_circuit.h(__UpperCamelCase ) # for hadamard entries
elif entry[i] == 1:
quantum_circuit.x(__UpperCamelCase ) # for 1 entries
elif entry[i] == 0:
quantum_circuit.i(__UpperCamelCase ) # for 0 entries
# build the circuit
quantum_circuit.ccx(0 , 1 , 3 ) # ccx = toffoli gate
quantum_circuit.cx(0 , 1 )
quantum_circuit.ccx(1 , 2 , 3 )
quantum_circuit.cx(1 , 2 )
quantum_circuit.cx(0 , 1 )
quantum_circuit.measure([2, 3] , __UpperCamelCase ) # measure the last two qbits
SCREAMING_SNAKE_CASE_ = qiskit.Aer.get_backend("aer_simulator" )
SCREAMING_SNAKE_CASE_ = qiskit.execute(__UpperCamelCase , __UpperCamelCase , shots=1_0_0_0 )
return job.result().get_counts(__UpperCamelCase )
if __name__ == "__main__":
print(f"Total sum count for state is: {quantum_full_adder(1, 1, 1)}")
| 305
|
import torch
def a__ ( ):
if torch.cuda.is_available():
SCREAMING_SNAKE_CASE_ = torch.cuda.device_count()
else:
SCREAMING_SNAKE_CASE_ = 0
print(F'''Successfully ran on {num_gpus} GPUs''' )
if __name__ == "__main__":
main()
| 305
| 1
|
import requests
from bsa import BeautifulSoup
def a__ ( __UpperCamelCase = "AAPL" ):
SCREAMING_SNAKE_CASE_ = F'''https://in.finance.yahoo.com/quote/{symbol}?s={symbol}'''
SCREAMING_SNAKE_CASE_ = BeautifulSoup(requests.get(__UpperCamelCase ).text , "html.parser" )
SCREAMING_SNAKE_CASE_ = "My(6px) Pos(r) smartphone_Mt(6px)"
return soup.find("div" , class_=class_ ).find("span" ).text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(f"Current {symbol:<4} stock price is {stock_price(symbol):>8}")
| 305
|
from collections.abc import Generator
from math import sin
def a__ ( __UpperCamelCase ):
if len(__UpperCamelCase ) != 3_2:
raise ValueError("Input must be of length 32" )
SCREAMING_SNAKE_CASE_ = b""
for i in [3, 2, 1, 0]:
little_endian += string_aa[8 * i : 8 * i + 8]
return little_endian
def a__ ( __UpperCamelCase ):
if i < 0:
raise ValueError("Input must be non-negative" )
SCREAMING_SNAKE_CASE_ = format(__UpperCamelCase , "08x" )[-8:]
SCREAMING_SNAKE_CASE_ = b""
for i in [3, 2, 1, 0]:
little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode("utf-8" )
return little_endian_hex
def a__ ( __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = b""
for char in message:
bit_string += format(__UpperCamelCase , "08b" ).encode("utf-8" )
SCREAMING_SNAKE_CASE_ = format(len(__UpperCamelCase ) , "064b" ).encode("utf-8" )
# Pad bit_string to a multiple of 512 chars
bit_string += b"1"
while len(__UpperCamelCase ) % 5_1_2 != 4_4_8:
bit_string += b"0"
bit_string += to_little_endian(start_len[3_2:] ) + to_little_endian(start_len[:3_2] )
return bit_string
def a__ ( __UpperCamelCase ):
if len(__UpperCamelCase ) % 5_1_2 != 0:
raise ValueError("Input must have length that's a multiple of 512" )
for pos in range(0 , len(__UpperCamelCase ) , 5_1_2 ):
SCREAMING_SNAKE_CASE_ = bit_string[pos : pos + 5_1_2]
SCREAMING_SNAKE_CASE_ = []
for i in range(0 , 5_1_2 , 3_2 ):
block_words.append(int(to_little_endian(block[i : i + 3_2] ) , 2 ) )
yield block_words
def a__ ( __UpperCamelCase ):
if i < 0:
raise ValueError("Input must be non-negative" )
SCREAMING_SNAKE_CASE_ = format(__UpperCamelCase , "032b" )
SCREAMING_SNAKE_CASE_ = ""
for c in i_str:
new_str += "1" if c == "0" else "0"
return int(__UpperCamelCase , 2 )
def a__ ( __UpperCamelCase , __UpperCamelCase ):
return (a + b) % 2**3_2
def a__ ( __UpperCamelCase , __UpperCamelCase ):
if i < 0:
raise ValueError("Input must be non-negative" )
if shift < 0:
raise ValueError("Shift must be non-negative" )
return ((i << shift) ^ (i >> (3_2 - shift))) % 2**3_2
def a__ ( __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = preprocess(__UpperCamelCase )
SCREAMING_SNAKE_CASE_ = [int(2**3_2 * abs(sin(i + 1 ) ) ) for i in range(6_4 )]
# Starting states
SCREAMING_SNAKE_CASE_ = 0X67452301
SCREAMING_SNAKE_CASE_ = 0Xefcdab89
SCREAMING_SNAKE_CASE_ = 0X98badcfe
SCREAMING_SNAKE_CASE_ = 0X10325476
SCREAMING_SNAKE_CASE_ = [
7,
1_2,
1_7,
2_2,
7,
1_2,
1_7,
2_2,
7,
1_2,
1_7,
2_2,
7,
1_2,
1_7,
2_2,
5,
9,
1_4,
2_0,
5,
9,
1_4,
2_0,
5,
9,
1_4,
2_0,
5,
9,
1_4,
2_0,
4,
1_1,
1_6,
2_3,
4,
1_1,
1_6,
2_3,
4,
1_1,
1_6,
2_3,
4,
1_1,
1_6,
2_3,
6,
1_0,
1_5,
2_1,
6,
1_0,
1_5,
2_1,
6,
1_0,
1_5,
2_1,
6,
1_0,
1_5,
2_1,
]
# Process bit string in chunks, each with 16 32-char words
for block_words in get_block_words(__UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = aa
SCREAMING_SNAKE_CASE_ = ba
SCREAMING_SNAKE_CASE_ = ca
SCREAMING_SNAKE_CASE_ = da
# Hash current chunk
for i in range(6_4 ):
if i <= 1_5:
# f = (b & c) | (not_32(b) & d) # Alternate definition for f
SCREAMING_SNAKE_CASE_ = d ^ (b & (c ^ d))
SCREAMING_SNAKE_CASE_ = i
elif i <= 3_1:
# f = (d & b) | (not_32(d) & c) # Alternate definition for f
SCREAMING_SNAKE_CASE_ = c ^ (d & (b ^ c))
SCREAMING_SNAKE_CASE_ = (5 * i + 1) % 1_6
elif i <= 4_7:
SCREAMING_SNAKE_CASE_ = b ^ c ^ d
SCREAMING_SNAKE_CASE_ = (3 * i + 5) % 1_6
else:
SCREAMING_SNAKE_CASE_ = c ^ (b | not_aa(__UpperCamelCase ))
SCREAMING_SNAKE_CASE_ = (7 * i) % 1_6
SCREAMING_SNAKE_CASE_ = (f + a + added_consts[i] + block_words[g]) % 2**3_2
SCREAMING_SNAKE_CASE_ = d
SCREAMING_SNAKE_CASE_ = c
SCREAMING_SNAKE_CASE_ = b
SCREAMING_SNAKE_CASE_ = sum_aa(__UpperCamelCase , left_rotate_aa(__UpperCamelCase , shift_amounts[i] ) )
# Add hashed chunk to running total
SCREAMING_SNAKE_CASE_ = sum_aa(__UpperCamelCase , __UpperCamelCase )
SCREAMING_SNAKE_CASE_ = sum_aa(__UpperCamelCase , __UpperCamelCase )
SCREAMING_SNAKE_CASE_ = sum_aa(__UpperCamelCase , __UpperCamelCase )
SCREAMING_SNAKE_CASE_ = sum_aa(__UpperCamelCase , __UpperCamelCase )
SCREAMING_SNAKE_CASE_ = reformat_hex(__UpperCamelCase ) + reformat_hex(__UpperCamelCase ) + reformat_hex(__UpperCamelCase ) + reformat_hex(__UpperCamelCase )
return digest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 305
| 1
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_big_bird import BigBirdTokenizer
else:
A : Tuple = None
A : Any = logging.get_logger(__name__)
A : Optional[int] = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
A : str = {
"vocab_file": {
"google/bigbird-roberta-base": "https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model",
"google/bigbird-roberta-large": (
"https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model"
),
"google/bigbird-base-trivia-itc": (
"https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model"
),
},
"tokenizer_file": {
"google/bigbird-roberta-base": (
"https://huggingface.co/google/bigbird-roberta-base/resolve/main/tokenizer.json"
),
"google/bigbird-roberta-large": (
"https://huggingface.co/google/bigbird-roberta-large/resolve/main/tokenizer.json"
),
"google/bigbird-base-trivia-itc": (
"https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/tokenizer.json"
),
},
}
A : int = {
"google/bigbird-roberta-base": 40_96,
"google/bigbird-roberta-large": 40_96,
"google/bigbird-base-trivia-itc": 40_96,
}
A : Optional[Any] = "▁"
class lowerCamelCase (SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
lowerCamelCase__ = VOCAB_FILES_NAMES
lowerCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ = BigBirdTokenizer
lowerCamelCase__ = ['''input_ids''', '''attention_mask''']
lowerCamelCase__ = []
def __init__( self : Optional[int] , __magic_name__ : Optional[Any]=None , __magic_name__ : Optional[Any]=None , __magic_name__ : List[Any]="<unk>" , __magic_name__ : Tuple="<s>" , __magic_name__ : Union[str, Any]="</s>" , __magic_name__ : Dict="<pad>" , __magic_name__ : str="[SEP]" , __magic_name__ : Dict="[MASK]" , __magic_name__ : int="[CLS]" , **__magic_name__ : Optional[int] , ) -> str:
SCREAMING_SNAKE_CASE_ = AddedToken(__magic_name__ , lstrip=__magic_name__ , rstrip=__magic_name__ ) if isinstance(__magic_name__ , __magic_name__ ) else bos_token
SCREAMING_SNAKE_CASE_ = AddedToken(__magic_name__ , lstrip=__magic_name__ , rstrip=__magic_name__ ) if isinstance(__magic_name__ , __magic_name__ ) else eos_token
SCREAMING_SNAKE_CASE_ = AddedToken(__magic_name__ , lstrip=__magic_name__ , rstrip=__magic_name__ ) if isinstance(__magic_name__ , __magic_name__ ) else unk_token
SCREAMING_SNAKE_CASE_ = AddedToken(__magic_name__ , lstrip=__magic_name__ , rstrip=__magic_name__ ) if isinstance(__magic_name__ , __magic_name__ ) else pad_token
SCREAMING_SNAKE_CASE_ = AddedToken(__magic_name__ , lstrip=__magic_name__ , rstrip=__magic_name__ ) if isinstance(__magic_name__ , __magic_name__ ) else cls_token
SCREAMING_SNAKE_CASE_ = AddedToken(__magic_name__ , lstrip=__magic_name__ , rstrip=__magic_name__ ) if isinstance(__magic_name__ , __magic_name__ ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
SCREAMING_SNAKE_CASE_ = AddedToken(__magic_name__ , lstrip=__magic_name__ , rstrip=__magic_name__ ) if isinstance(__magic_name__ , __magic_name__ ) else mask_token
super().__init__(
__magic_name__ , tokenizer_file=__magic_name__ , bos_token=__magic_name__ , eos_token=__magic_name__ , unk_token=__magic_name__ , sep_token=__magic_name__ , pad_token=__magic_name__ , cls_token=__magic_name__ , mask_token=__magic_name__ , **__magic_name__ , )
SCREAMING_SNAKE_CASE_ = vocab_file
SCREAMING_SNAKE_CASE_ = False if not self.vocab_file else True
def __A ( self : List[str] , __magic_name__ : List[int] , __magic_name__ : Optional[List[int]] = None ) -> List[int]:
SCREAMING_SNAKE_CASE_ = [self.sep_token_id]
SCREAMING_SNAKE_CASE_ = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def __A ( self : str , __magic_name__ : List[int] , __magic_name__ : Optional[List[int]] = None , __magic_name__ : bool = False ) -> List[int]:
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"You should not supply a second sequence if the provided sequence of "
"ids is already formatted with special tokens for the model." )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is None:
return [1] + ([0] * len(__magic_name__ )) + [1]
return [1] + ([0] * len(__magic_name__ )) + [1] + ([0] * len(__magic_name__ )) + [1]
def __A ( self : Optional[Any] , __magic_name__ : List[int] , __magic_name__ : Optional[List[int]] = None ) -> List[int]:
SCREAMING_SNAKE_CASE_ = [self.sep_token_id]
SCREAMING_SNAKE_CASE_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __A ( self : Union[str, Any] , __magic_name__ : str , __magic_name__ : Optional[str] = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(__magic_name__ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
SCREAMING_SNAKE_CASE_ = os.path.join(
__magic_name__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__magic_name__ ):
copyfile(self.vocab_file , __magic_name__ )
return (out_vocab_file,)
| 305
|
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BlipaProcessor, BlipImageProcessor, GPTaTokenizer, PreTrainedTokenizerFast
@require_vision
class lowerCamelCase (unittest.TestCase ):
"""simple docstring"""
def __A ( self : int ) -> Any:
SCREAMING_SNAKE_CASE_ = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE_ = BlipImageProcessor()
SCREAMING_SNAKE_CASE_ = GPTaTokenizer.from_pretrained("hf-internal-testing/tiny-random-GPT2Model" )
SCREAMING_SNAKE_CASE_ = BlipaProcessor(__magic_name__ , __magic_name__ )
processor.save_pretrained(self.tmpdirname )
def __A ( self : str , **__magic_name__ : int ) -> Union[str, Any]:
return AutoProcessor.from_pretrained(self.tmpdirname , **__magic_name__ ).tokenizer
def __A ( self : Dict , **__magic_name__ : List[Any] ) -> int:
return AutoProcessor.from_pretrained(self.tmpdirname , **__magic_name__ ).image_processor
def __A ( self : int ) -> Any:
shutil.rmtree(self.tmpdirname )
def __A ( self : Dict ) -> Dict:
SCREAMING_SNAKE_CASE_ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
SCREAMING_SNAKE_CASE_ = [Image.fromarray(np.moveaxis(__magic_name__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __A ( self : List[Any] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = BlipaProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE_ = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
SCREAMING_SNAKE_CASE_ = self.get_image_processor(do_normalize=__magic_name__ , padding_value=1.0 )
SCREAMING_SNAKE_CASE_ = BlipaProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=__magic_name__ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __magic_name__ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __magic_name__ )
def __A ( self : Tuple ) -> int:
SCREAMING_SNAKE_CASE_ = self.get_image_processor()
SCREAMING_SNAKE_CASE_ = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ = BlipaProcessor(tokenizer=__magic_name__ , image_processor=__magic_name__ )
SCREAMING_SNAKE_CASE_ = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE_ = image_processor(__magic_name__ , return_tensors="np" )
SCREAMING_SNAKE_CASE_ = processor(images=__magic_name__ , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def __A ( self : str ) -> Tuple:
SCREAMING_SNAKE_CASE_ = self.get_image_processor()
SCREAMING_SNAKE_CASE_ = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ = BlipaProcessor(tokenizer=__magic_name__ , image_processor=__magic_name__ )
SCREAMING_SNAKE_CASE_ = "lower newer"
SCREAMING_SNAKE_CASE_ = processor(text=__magic_name__ )
SCREAMING_SNAKE_CASE_ = tokenizer(__magic_name__ , return_token_type_ids=__magic_name__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __A ( self : Dict ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = self.get_image_processor()
SCREAMING_SNAKE_CASE_ = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ = BlipaProcessor(tokenizer=__magic_name__ , image_processor=__magic_name__ )
SCREAMING_SNAKE_CASE_ = "lower newer"
SCREAMING_SNAKE_CASE_ = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE_ = processor(text=__magic_name__ , images=__magic_name__ )
self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "input_ids", "attention_mask"] )
# test if it raises when no input is passed
with pytest.raises(__magic_name__ ):
processor()
def __A ( self : Dict ) -> Tuple:
SCREAMING_SNAKE_CASE_ = self.get_image_processor()
SCREAMING_SNAKE_CASE_ = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ = BlipaProcessor(tokenizer=__magic_name__ , image_processor=__magic_name__ )
SCREAMING_SNAKE_CASE_ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
SCREAMING_SNAKE_CASE_ = processor.batch_decode(__magic_name__ )
SCREAMING_SNAKE_CASE_ = tokenizer.batch_decode(__magic_name__ )
self.assertListEqual(__magic_name__ , __magic_name__ )
def __A ( self : List[str] ) -> int:
SCREAMING_SNAKE_CASE_ = self.get_image_processor()
SCREAMING_SNAKE_CASE_ = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ = BlipaProcessor(tokenizer=__magic_name__ , image_processor=__magic_name__ )
SCREAMING_SNAKE_CASE_ = "lower newer"
SCREAMING_SNAKE_CASE_ = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE_ = processor(text=__magic_name__ , images=__magic_name__ )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "input_ids", "attention_mask"] )
| 305
| 1
|
def a__ ( ):
for n in range(1 , 1_0_0_0_0_0_0 ):
yield n * (n + 1) // 2
def a__ ( __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = 2
while i * i <= n:
SCREAMING_SNAKE_CASE_ = 0
while n % i == 0:
n //= i
multiplicity += 1
divisors_count *= multiplicity + 1
i += 1
if n > 1:
divisors_count *= 2
return divisors_count
def a__ ( ):
return next(i for i in triangle_number_generator() if count_divisors(__UpperCamelCase ) > 5_0_0 )
if __name__ == "__main__":
print(solution())
| 305
|
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available
from ...utils import OptionalDependencyNotAvailable
A : List[Any] = {"configuration_dpt": ["DPT_PRETRAINED_CONFIG_ARCHIVE_MAP", "DPTConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Optional[Any] = ["DPTFeatureExtractor"]
A : str = ["DPTImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Optional[Any] = [
"DPT_PRETRAINED_MODEL_ARCHIVE_LIST",
"DPTForDepthEstimation",
"DPTForSemanticSegmentation",
"DPTModel",
"DPTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_dpt import DPTFeatureExtractor
from .image_processing_dpt import DPTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dpt import (
DPT_PRETRAINED_MODEL_ARCHIVE_LIST,
DPTForDepthEstimation,
DPTForSemanticSegmentation,
DPTModel,
DPTPreTrainedModel,
)
else:
import sys
A : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 305
| 1
|
import baseaa
import io
import json
import os
from copy import deepcopy
from ..optimizer import AcceleratedOptimizer
from ..scheduler import AcceleratedScheduler
class lowerCamelCase :
"""simple docstring"""
def __init__( self : Any , __magic_name__ : Optional[int] ) -> Tuple:
if isinstance(__magic_name__ , __magic_name__ ):
# Don't modify user's data should they want to reuse it (e.g. in tests), because once we
# modified it, it will not be accepted here again, since `auto` values would have been overridden
SCREAMING_SNAKE_CASE_ = deepcopy(__magic_name__ )
elif os.path.exists(__magic_name__ ):
with io.open(__magic_name__ , "r" , encoding="utf-8" ) as f:
SCREAMING_SNAKE_CASE_ = json.load(__magic_name__ )
else:
try:
SCREAMING_SNAKE_CASE_ = baseaa.urlsafe_baadecode(__magic_name__ ).decode("utf-8" )
SCREAMING_SNAKE_CASE_ = json.loads(__magic_name__ )
except (UnicodeDecodeError, AttributeError, ValueError):
raise ValueError(
F'''Expected a string path to an existing deepspeed config, or a dictionary, or a base64 encoded string. Received: {config_file_or_dict}''' )
SCREAMING_SNAKE_CASE_ = config
self.set_stage_and_offload()
def __A ( self : int ) -> Union[str, Any]:
# zero stage - this is done as early as possible, before model is created, to allow
# ``is_deepspeed_zero3_enabled`` query and getting to the early deepspeed config object
# during ``zero.Init()`` which needs to know the dtype, and some other hparams.
SCREAMING_SNAKE_CASE_ = self.get_value("zero_optimization.stage" , -1 )
# offload
SCREAMING_SNAKE_CASE_ = False
if self.is_zeroa() or self.is_zeroa():
SCREAMING_SNAKE_CASE_ = set(["cpu", "nvme"] )
SCREAMING_SNAKE_CASE_ = set(
[
self.get_value("zero_optimization.offload_optimizer.device" ),
self.get_value("zero_optimization.offload_param.device" ),
] )
if len(offload_devices & offload_devices_valid ) > 0:
SCREAMING_SNAKE_CASE_ = True
def __A ( self : Union[str, Any] , __magic_name__ : Optional[int] ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = self.config
# find the config node of interest if it exists
SCREAMING_SNAKE_CASE_ = ds_key_long.split("." )
SCREAMING_SNAKE_CASE_ = nodes.pop()
for node in nodes:
SCREAMING_SNAKE_CASE_ = config.get(__magic_name__ )
if config is None:
return None, ds_key
return config, ds_key
def __A ( self : Any , __magic_name__ : Any , __magic_name__ : int=None ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.find_config_node(__magic_name__ )
if config is None:
return default
return config.get(__magic_name__ , __magic_name__ )
def __A ( self : Dict , __magic_name__ : Union[str, Any] , __magic_name__ : Any=False ) -> int:
SCREAMING_SNAKE_CASE_ = self.config
# find the config node of interest if it exists
SCREAMING_SNAKE_CASE_ = ds_key_long.split("." )
for node in nodes:
SCREAMING_SNAKE_CASE_ = config
SCREAMING_SNAKE_CASE_ = config.get(__magic_name__ )
if config is None:
if must_exist:
raise ValueError(F'''Can\'t find {ds_key_long} entry in the config: {self.config}''' )
else:
return
# if found remove it
if parent_config is not None:
parent_config.pop(__magic_name__ )
def __A ( self : Any , __magic_name__ : int ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = self.get_value(__magic_name__ )
return False if value is None else bool(__magic_name__ )
def __A ( self : Optional[int] , __magic_name__ : Optional[Any] ) -> str:
SCREAMING_SNAKE_CASE_ = self.get_value(__magic_name__ )
return False if value is None else not bool(__magic_name__ )
def __A ( self : Any ) -> Dict:
return self._stage == 2
def __A ( self : List[str] ) -> Dict:
return self._stage == 3
def __A ( self : List[Any] ) -> Any:
return self._offload
class lowerCamelCase :
"""simple docstring"""
def __init__( self : Union[str, Any] , __magic_name__ : List[str] ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = engine
def __A ( self : Optional[Any] , __magic_name__ : Optional[int] , **__magic_name__ : Tuple ) -> Optional[Any]:
# runs backpropagation and handles mixed precision
self.engine.backward(__magic_name__ , **__magic_name__ )
# Deepspeed's `engine.step` performs the following operations:
# - gradient accumulation check
# - gradient clipping
# - optimizer step
# - zero grad
# - checking overflow
# - lr_scheduler step (only if engine.lr_scheduler is not None)
self.engine.step()
# and this plugin overrides the above calls with no-ops when Accelerate runs under
# Deepspeed, but allows normal functionality for non-Deepspeed cases thus enabling a simple
# training loop that works transparently under many training regimes.
class lowerCamelCase (SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def __init__( self : Dict , __magic_name__ : List[str] ) -> Tuple:
super().__init__(__magic_name__ , device_placement=__magic_name__ , scaler=__magic_name__ )
SCREAMING_SNAKE_CASE_ = hasattr(self.optimizer , "overflow" )
def __A ( self : str , __magic_name__ : Tuple=None ) -> Tuple:
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
def __A ( self : Optional[Any] ) -> Optional[int]:
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
@property
def __A ( self : Optional[int] ) -> Optional[int]:
if self.__has_overflow__:
return self.optimizer.overflow
return False
class lowerCamelCase (SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def __init__( self : List[str] , __magic_name__ : Dict , __magic_name__ : str ) -> str:
super().__init__(__magic_name__ , __magic_name__ )
def __A ( self : Optional[Any] ) -> str:
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
class lowerCamelCase :
"""simple docstring"""
def __init__( self : Union[str, Any] , __magic_name__ : int , __magic_name__ : List[str]=0.001 , __magic_name__ : Tuple=0 , **__magic_name__ : List[Any] ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = params
SCREAMING_SNAKE_CASE_ = lr
SCREAMING_SNAKE_CASE_ = weight_decay
SCREAMING_SNAKE_CASE_ = kwargs
class lowerCamelCase :
"""simple docstring"""
def __init__( self : Union[str, Any] , __magic_name__ : List[Any] , __magic_name__ : Dict=None , __magic_name__ : Optional[int]=0 , **__magic_name__ : str ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = optimizer
SCREAMING_SNAKE_CASE_ = total_num_steps
SCREAMING_SNAKE_CASE_ = warmup_num_steps
SCREAMING_SNAKE_CASE_ = kwargs
| 305
|
from __future__ import annotations
import collections
import pprint
from pathlib import Path
def a__ ( __UpperCamelCase ):
return "".join(sorted(__UpperCamelCase ) )
def a__ ( __UpperCamelCase ):
return word_by_signature[signature(__UpperCamelCase )]
A : str = Path(__file__).parent.joinpath("words.txt").read_text(encoding="utf-8")
A : int = sorted({word.strip().lower() for word in data.splitlines()})
A : Tuple = collections.defaultdict(list)
for word in word_list:
word_by_signature[signature(word)].append(word)
if __name__ == "__main__":
A : Union[str, Any] = {word: anagram(word) for word in word_list if len(anagram(word)) > 1}
with open("anagrams.txt", "w") as file:
file.write("all_anagrams = \n ")
file.write(pprint.pformat(all_anagrams))
| 305
| 1
|
def a__ ( __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = [0] * len(__UpperCamelCase )
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = [1] * len(__UpperCamelCase )
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(__UpperCamelCase ) ):
if indegree[i] == 0:
queue.append(__UpperCamelCase )
while queue:
SCREAMING_SNAKE_CASE_ = queue.pop(0 )
for x in graph[vertex]:
indegree[x] -= 1
if long_dist[vertex] + 1 > long_dist[x]:
SCREAMING_SNAKE_CASE_ = long_dist[vertex] + 1
if indegree[x] == 0:
queue.append(__UpperCamelCase )
print(max(__UpperCamelCase ) )
# Adjacency list of Graph
A : Any = {0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []}
longest_distance(graph)
| 305
|
import copy
import os
from typing import TYPE_CHECKING, List, Union
if TYPE_CHECKING:
pass
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A : int = logging.get_logger(__name__)
A : str = {
"kakaobrain/align-base": "https://huggingface.co/kakaobrain/align-base/resolve/main/config.json",
}
class lowerCamelCase (SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
lowerCamelCase__ = '''align_text_model'''
def __init__( self : Optional[Any] , __magic_name__ : Union[str, Any]=30_522 , __magic_name__ : Tuple=768 , __magic_name__ : List[str]=12 , __magic_name__ : Optional[Any]=12 , __magic_name__ : str=3_072 , __magic_name__ : Dict="gelu" , __magic_name__ : Any=0.1 , __magic_name__ : Optional[int]=0.1 , __magic_name__ : List[str]=512 , __magic_name__ : Any=2 , __magic_name__ : Optional[Any]=0.02 , __magic_name__ : int=1e-12 , __magic_name__ : str=0 , __magic_name__ : Optional[Any]="absolute" , __magic_name__ : Optional[Any]=True , **__magic_name__ : Tuple , ) -> Union[str, Any]:
super().__init__(**__magic_name__ )
SCREAMING_SNAKE_CASE_ = vocab_size
SCREAMING_SNAKE_CASE_ = hidden_size
SCREAMING_SNAKE_CASE_ = num_hidden_layers
SCREAMING_SNAKE_CASE_ = num_attention_heads
SCREAMING_SNAKE_CASE_ = hidden_act
SCREAMING_SNAKE_CASE_ = intermediate_size
SCREAMING_SNAKE_CASE_ = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ = max_position_embeddings
SCREAMING_SNAKE_CASE_ = type_vocab_size
SCREAMING_SNAKE_CASE_ = initializer_range
SCREAMING_SNAKE_CASE_ = layer_norm_eps
SCREAMING_SNAKE_CASE_ = position_embedding_type
SCREAMING_SNAKE_CASE_ = use_cache
SCREAMING_SNAKE_CASE_ = pad_token_id
@classmethod
def __A ( cls : Any , __magic_name__ : Union[str, os.PathLike] , **__magic_name__ : Optional[Any] ) -> "PretrainedConfig":
cls._set_token_in_kwargs(__magic_name__ )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = cls.get_config_dict(__magic_name__ , **__magic_name__ )
# get the text config dict if we are loading from AlignConfig
if config_dict.get("model_type" ) == "align":
SCREAMING_SNAKE_CASE_ = config_dict["text_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__magic_name__ , **__magic_name__ )
class lowerCamelCase (SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
lowerCamelCase__ = '''align_vision_model'''
def __init__( self : List[str] , __magic_name__ : int = 3 , __magic_name__ : int = 600 , __magic_name__ : float = 2.0 , __magic_name__ : float = 3.1 , __magic_name__ : int = 8 , __magic_name__ : List[int] = [3, 3, 5, 3, 5, 5, 3] , __magic_name__ : List[int] = [32, 16, 24, 40, 80, 112, 192] , __magic_name__ : List[int] = [16, 24, 40, 80, 112, 192, 320] , __magic_name__ : List[int] = [] , __magic_name__ : List[int] = [1, 2, 2, 2, 1, 2, 1] , __magic_name__ : List[int] = [1, 2, 2, 3, 3, 4, 1] , __magic_name__ : List[int] = [1, 6, 6, 6, 6, 6, 6] , __magic_name__ : float = 0.25 , __magic_name__ : str = "swish" , __magic_name__ : int = 2_560 , __magic_name__ : str = "mean" , __magic_name__ : float = 0.02 , __magic_name__ : float = 0.001 , __magic_name__ : float = 0.99 , __magic_name__ : float = 0.2 , **__magic_name__ : List[Any] , ) -> Tuple:
super().__init__(**__magic_name__ )
SCREAMING_SNAKE_CASE_ = num_channels
SCREAMING_SNAKE_CASE_ = image_size
SCREAMING_SNAKE_CASE_ = width_coefficient
SCREAMING_SNAKE_CASE_ = depth_coefficient
SCREAMING_SNAKE_CASE_ = depth_divisor
SCREAMING_SNAKE_CASE_ = kernel_sizes
SCREAMING_SNAKE_CASE_ = in_channels
SCREAMING_SNAKE_CASE_ = out_channels
SCREAMING_SNAKE_CASE_ = depthwise_padding
SCREAMING_SNAKE_CASE_ = strides
SCREAMING_SNAKE_CASE_ = num_block_repeats
SCREAMING_SNAKE_CASE_ = expand_ratios
SCREAMING_SNAKE_CASE_ = squeeze_expansion_ratio
SCREAMING_SNAKE_CASE_ = hidden_act
SCREAMING_SNAKE_CASE_ = hidden_dim
SCREAMING_SNAKE_CASE_ = pooling_type
SCREAMING_SNAKE_CASE_ = initializer_range
SCREAMING_SNAKE_CASE_ = batch_norm_eps
SCREAMING_SNAKE_CASE_ = batch_norm_momentum
SCREAMING_SNAKE_CASE_ = drop_connect_rate
SCREAMING_SNAKE_CASE_ = sum(__magic_name__ ) * 4
@classmethod
def __A ( cls : List[str] , __magic_name__ : Union[str, os.PathLike] , **__magic_name__ : Dict ) -> "PretrainedConfig":
cls._set_token_in_kwargs(__magic_name__ )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = cls.get_config_dict(__magic_name__ , **__magic_name__ )
# get the vision config dict if we are loading from AlignConfig
if config_dict.get("model_type" ) == "align":
SCREAMING_SNAKE_CASE_ = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__magic_name__ , **__magic_name__ )
class lowerCamelCase (SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
lowerCamelCase__ = '''align'''
lowerCamelCase__ = True
def __init__( self : Optional[Any] , __magic_name__ : Dict=None , __magic_name__ : List[Any]=None , __magic_name__ : str=640 , __magic_name__ : Any=1.0 , __magic_name__ : Dict=0.02 , **__magic_name__ : Union[str, Any] , ) -> int:
super().__init__(**__magic_name__ )
if text_config is None:
SCREAMING_SNAKE_CASE_ = {}
logger.info("text_config is None. Initializing the AlignTextConfig with default values." )
if vision_config is None:
SCREAMING_SNAKE_CASE_ = {}
logger.info("vision_config is None. Initializing the AlignVisionConfig with default values." )
SCREAMING_SNAKE_CASE_ = AlignTextConfig(**__magic_name__ )
SCREAMING_SNAKE_CASE_ = AlignVisionConfig(**__magic_name__ )
SCREAMING_SNAKE_CASE_ = projection_dim
SCREAMING_SNAKE_CASE_ = temperature_init_value
SCREAMING_SNAKE_CASE_ = initializer_range
@classmethod
def __A ( cls : List[str] , __magic_name__ : AlignTextConfig , __magic_name__ : AlignVisionConfig , **__magic_name__ : Tuple ) -> Any:
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **__magic_name__ )
def __A ( self : int ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = copy.deepcopy(self.__dict__ )
SCREAMING_SNAKE_CASE_ = self.text_config.to_dict()
SCREAMING_SNAKE_CASE_ = self.vision_config.to_dict()
SCREAMING_SNAKE_CASE_ = self.__class__.model_type
return output
| 305
| 1
|
import unittest
import numpy as np
import torch
from diffusers import PNDMPipeline, PNDMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class lowerCamelCase (unittest.TestCase ):
"""simple docstring"""
@property
def __A ( self : Optional[int] ) -> Optional[Any]:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("DownBlock2D", "AttnDownBlock2D") , up_block_types=("AttnUpBlock2D", "UpBlock2D") , )
return model
def __A ( self : Any ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = self.dummy_uncond_unet
SCREAMING_SNAKE_CASE_ = PNDMScheduler()
SCREAMING_SNAKE_CASE_ = PNDMPipeline(unet=__magic_name__ , scheduler=__magic_name__ )
pndm.to(__magic_name__ )
pndm.set_progress_bar_config(disable=__magic_name__ )
SCREAMING_SNAKE_CASE_ = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ = pndm(generator=__magic_name__ , num_inference_steps=20 , output_type="numpy" ).images
SCREAMING_SNAKE_CASE_ = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ = pndm(generator=__magic_name__ , num_inference_steps=20 , output_type="numpy" , return_dict=__magic_name__ )[0]
SCREAMING_SNAKE_CASE_ = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE_ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
SCREAMING_SNAKE_CASE_ = np.array([1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class lowerCamelCase (unittest.TestCase ):
"""simple docstring"""
def __A ( self : Union[str, Any] ) -> int:
SCREAMING_SNAKE_CASE_ = "google/ddpm-cifar10-32"
SCREAMING_SNAKE_CASE_ = UNetaDModel.from_pretrained(__magic_name__ )
SCREAMING_SNAKE_CASE_ = PNDMScheduler()
SCREAMING_SNAKE_CASE_ = PNDMPipeline(unet=__magic_name__ , scheduler=__magic_name__ )
pndm.to(__magic_name__ )
pndm.set_progress_bar_config(disable=__magic_name__ )
SCREAMING_SNAKE_CASE_ = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ = pndm(generator=__magic_name__ , output_type="numpy" ).images
SCREAMING_SNAKE_CASE_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
SCREAMING_SNAKE_CASE_ = np.array([0.1564, 0.1_4645, 0.1406, 0.1_4715, 0.1_2425, 0.1_4045, 0.1_3115, 0.1_2175, 0.125] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 305
|
import unittest
from transformers.testing_utils import CaptureStdout
from transformers.tools.python_interpreter import evaluate
def a__ ( __UpperCamelCase ):
return x + 2
class lowerCamelCase (unittest.TestCase ):
"""simple docstring"""
def __A ( self : List[Any] ) -> int:
SCREAMING_SNAKE_CASE_ = "x = 3"
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = evaluate(__magic_name__ , {} , state=__magic_name__ )
assert result == 3
self.assertDictEqual(__magic_name__ , {"x": 3} )
SCREAMING_SNAKE_CASE_ = "x = y"
SCREAMING_SNAKE_CASE_ = {"y": 5}
SCREAMING_SNAKE_CASE_ = evaluate(__magic_name__ , {} , state=__magic_name__ )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(__magic_name__ , {"x": 5, "y": 5} )
def __A ( self : Union[str, Any] ) -> str:
SCREAMING_SNAKE_CASE_ = "y = add_two(x)"
SCREAMING_SNAKE_CASE_ = {"x": 3}
SCREAMING_SNAKE_CASE_ = evaluate(__magic_name__ , {"add_two": add_two} , state=__magic_name__ )
assert result == 5
self.assertDictEqual(__magic_name__ , {"x": 3, "y": 5} )
# Won't work without the tool
with CaptureStdout() as out:
SCREAMING_SNAKE_CASE_ = evaluate(__magic_name__ , {} , state=__magic_name__ )
assert result is None
assert "tried to execute add_two" in out.out
def __A ( self : List[str] ) -> int:
SCREAMING_SNAKE_CASE_ = "x = 3"
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = evaluate(__magic_name__ , {} , state=__magic_name__ )
assert result == 3
self.assertDictEqual(__magic_name__ , {"x": 3} )
def __A ( self : Optional[Any] ) -> str:
SCREAMING_SNAKE_CASE_ = "test_dict = {'x': x, 'y': add_two(x)}"
SCREAMING_SNAKE_CASE_ = {"x": 3}
SCREAMING_SNAKE_CASE_ = evaluate(__magic_name__ , {"add_two": add_two} , state=__magic_name__ )
self.assertDictEqual(__magic_name__ , {"x": 3, "y": 5} )
self.assertDictEqual(__magic_name__ , {"x": 3, "test_dict": {"x": 3, "y": 5}} )
def __A ( self : Optional[int] ) -> List[str]:
SCREAMING_SNAKE_CASE_ = "x = 3\ny = 5"
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = evaluate(__magic_name__ , {} , state=__magic_name__ )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(__magic_name__ , {"x": 3, "y": 5} )
def __A ( self : Any ) -> List[str]:
SCREAMING_SNAKE_CASE_ = "text = f'This is x: {x}.'"
SCREAMING_SNAKE_CASE_ = {"x": 3}
SCREAMING_SNAKE_CASE_ = evaluate(__magic_name__ , {} , state=__magic_name__ )
# evaluate returns the value of the last assignment.
assert result == "This is x: 3."
self.assertDictEqual(__magic_name__ , {"x": 3, "text": "This is x: 3."} )
def __A ( self : int ) -> Tuple:
SCREAMING_SNAKE_CASE_ = "if x <= 3:\n y = 2\nelse:\n y = 5"
SCREAMING_SNAKE_CASE_ = {"x": 3}
SCREAMING_SNAKE_CASE_ = evaluate(__magic_name__ , {} , state=__magic_name__ )
# evaluate returns the value of the last assignment.
assert result == 2
self.assertDictEqual(__magic_name__ , {"x": 3, "y": 2} )
SCREAMING_SNAKE_CASE_ = {"x": 8}
SCREAMING_SNAKE_CASE_ = evaluate(__magic_name__ , {} , state=__magic_name__ )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(__magic_name__ , {"x": 8, "y": 5} )
def __A ( self : str ) -> str:
SCREAMING_SNAKE_CASE_ = "test_list = [x, add_two(x)]"
SCREAMING_SNAKE_CASE_ = {"x": 3}
SCREAMING_SNAKE_CASE_ = evaluate(__magic_name__ , {"add_two": add_two} , state=__magic_name__ )
self.assertListEqual(__magic_name__ , [3, 5] )
self.assertDictEqual(__magic_name__ , {"x": 3, "test_list": [3, 5]} )
def __A ( self : Union[str, Any] ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = "y = x"
SCREAMING_SNAKE_CASE_ = {"x": 3}
SCREAMING_SNAKE_CASE_ = evaluate(__magic_name__ , {} , state=__magic_name__ )
assert result == 3
self.assertDictEqual(__magic_name__ , {"x": 3, "y": 3} )
def __A ( self : Tuple ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = "test_list = [x, add_two(x)]\ntest_list[1]"
SCREAMING_SNAKE_CASE_ = {"x": 3}
SCREAMING_SNAKE_CASE_ = evaluate(__magic_name__ , {"add_two": add_two} , state=__magic_name__ )
assert result == 5
self.assertDictEqual(__magic_name__ , {"x": 3, "test_list": [3, 5]} )
SCREAMING_SNAKE_CASE_ = "test_dict = {'x': x, 'y': add_two(x)}\ntest_dict['y']"
SCREAMING_SNAKE_CASE_ = {"x": 3}
SCREAMING_SNAKE_CASE_ = evaluate(__magic_name__ , {"add_two": add_two} , state=__magic_name__ )
assert result == 5
self.assertDictEqual(__magic_name__ , {"x": 3, "test_dict": {"x": 3, "y": 5}} )
def __A ( self : Tuple ) -> Any:
SCREAMING_SNAKE_CASE_ = "x = 0\nfor i in range(3):\n x = i"
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = evaluate(__magic_name__ , {"range": range} , state=__magic_name__ )
assert result == 2
self.assertDictEqual(__magic_name__ , {"x": 2, "i": 2} )
| 305
| 1
|
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_tf_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_tf_available():
import tensorflow as tf
A : List[Any] = logging.get_logger(__name__)
@dataclass
class lowerCamelCase (SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
lowerCamelCase__ = [
'''no_inference''',
'''no_cuda''',
'''no_tpu''',
'''no_speed''',
'''no_memory''',
'''no_env_print''',
'''no_multi_process''',
]
def __init__( self : Optional[Any] , **__magic_name__ : Dict ) -> Union[str, Any]:
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
SCREAMING_SNAKE_CASE_ = deprecated_arg[3:]
SCREAMING_SNAKE_CASE_ = not kwargs.pop(__magic_name__ )
logger.warning(
F'''{deprecated_arg} is depreciated. Please use --no-{positive_arg} or'''
F''' {positive_arg}={kwargs[positive_arg]}''' )
SCREAMING_SNAKE_CASE_ = kwargs.pop("tpu_name" , self.tpu_name )
SCREAMING_SNAKE_CASE_ = kwargs.pop("device_idx" , self.device_idx )
SCREAMING_SNAKE_CASE_ = kwargs.pop("eager_mode" , self.eager_mode )
SCREAMING_SNAKE_CASE_ = kwargs.pop("use_xla" , self.use_xla )
super().__init__(**__magic_name__ )
lowerCamelCase__ = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'''help''': '''Name of TPU'''} , )
lowerCamelCase__ = field(
default=0 , metadata={'''help''': '''CPU / GPU device index. Defaults to 0.'''} , )
lowerCamelCase__ = field(default=SCREAMING_SNAKE_CASE__ , metadata={'''help''': '''Benchmark models in eager model.'''} )
lowerCamelCase__ = field(
default=SCREAMING_SNAKE_CASE__ , metadata={
'''help''': '''Benchmark models using XLA JIT compilation. Note that `eager_model` has to be set to `False`.'''
} , )
@cached_property
def __A ( self : Dict ) -> Tuple["tf.distribute.cluster_resolver.TPUClusterResolver"]:
requires_backends(self , ["tf"] )
SCREAMING_SNAKE_CASE_ = None
if self.tpu:
try:
if self.tpu_name:
SCREAMING_SNAKE_CASE_ = tf.distribute.cluster_resolver.TPUClusterResolver(self.tpu_name )
else:
SCREAMING_SNAKE_CASE_ = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
SCREAMING_SNAKE_CASE_ = None
return tpu
@cached_property
def __A ( self : Any ) -> Tuple["tf.distribute.Strategy", "tf.distribute.cluster_resolver.TPUClusterResolver"]:
requires_backends(self , ["tf"] )
if self.is_tpu:
tf.config.experimental_connect_to_cluster(self._setup_tpu )
tf.tpu.experimental.initialize_tpu_system(self._setup_tpu )
SCREAMING_SNAKE_CASE_ = tf.distribute.TPUStrategy(self._setup_tpu )
else:
# currently no multi gpu is allowed
if self.is_gpu:
# TODO: Currently only single GPU is supported
tf.config.set_visible_devices(self.gpu_list[self.device_idx] , "GPU" )
SCREAMING_SNAKE_CASE_ = tf.distribute.OneDeviceStrategy(device=F'''/gpu:{self.device_idx}''' )
else:
tf.config.set_visible_devices([] , "GPU" ) # disable GPU
SCREAMING_SNAKE_CASE_ = tf.distribute.OneDeviceStrategy(device=F'''/cpu:{self.device_idx}''' )
return strategy
@property
def __A ( self : int ) -> bool:
requires_backends(self , ["tf"] )
return self._setup_tpu is not None
@property
def __A ( self : Any ) -> "tf.distribute.Strategy":
requires_backends(self , ["tf"] )
return self._setup_strategy
@property
def __A ( self : Optional[Any] ) -> str:
requires_backends(self , ["tf"] )
return tf.config.list_physical_devices("GPU" )
@property
def __A ( self : Optional[int] ) -> int:
requires_backends(self , ["tf"] )
if self.cuda:
return len(self.gpu_list )
return 0
@property
def __A ( self : List[str] ) -> bool:
return self.n_gpu > 0
| 305
|
import numpy as np
import pandas as pd
from sklearn.preprocessing import Normalizer
from sklearn.svm import SVR
from statsmodels.tsa.statespace.sarimax import SARIMAX
def a__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = np.array([[1, item, train_mtch[i]] for i, item in enumerate(__UpperCamelCase )] )
SCREAMING_SNAKE_CASE_ = np.array(__UpperCamelCase )
SCREAMING_SNAKE_CASE_ = np.dot(np.dot(np.linalg.inv(np.dot(x.transpose() , __UpperCamelCase ) ) , x.transpose() ) , __UpperCamelCase )
return abs(beta[0] + test_dt[0] * beta[1] + test_mtch[0] + beta[2] )
def a__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = (1, 2, 1)
SCREAMING_SNAKE_CASE_ = (1, 1, 0, 7)
SCREAMING_SNAKE_CASE_ = SARIMAX(
__UpperCamelCase , exog=__UpperCamelCase , order=__UpperCamelCase , seasonal_order=__UpperCamelCase )
SCREAMING_SNAKE_CASE_ = model.fit(disp=__UpperCamelCase , maxiter=6_0_0 , method="nm" )
SCREAMING_SNAKE_CASE_ = model_fit.predict(1 , len(__UpperCamelCase ) , exog=[test_match] )
return result[0]
def a__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = SVR(kernel="rbf" , C=1 , gamma=0.1 , epsilon=0.1 )
regressor.fit(__UpperCamelCase , __UpperCamelCase )
SCREAMING_SNAKE_CASE_ = regressor.predict(__UpperCamelCase )
return y_pred[0]
def a__ ( __UpperCamelCase ):
train_user.sort()
SCREAMING_SNAKE_CASE_ = np.percentile(__UpperCamelCase , 2_5 )
SCREAMING_SNAKE_CASE_ = np.percentile(__UpperCamelCase , 7_5 )
SCREAMING_SNAKE_CASE_ = qa - qa
SCREAMING_SNAKE_CASE_ = qa - (iqr * 0.1)
return low_lim
def a__ ( __UpperCamelCase , __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = 0
for i in list_vote:
if i > actual_result:
SCREAMING_SNAKE_CASE_ = not_safe + 1
else:
if abs(abs(__UpperCamelCase ) - abs(__UpperCamelCase ) ) <= 0.1:
safe += 1
else:
not_safe += 1
return safe > not_safe
if __name__ == "__main__":
# data_input_df = pd.read_csv("ex_data.csv", header=None)
A : Dict = [[1_82_31, 0.0, 1], [2_26_21, 1.0, 2], [1_56_75, 0.0, 3], [2_35_83, 1.0, 4]]
A : Optional[Any] = pd.DataFrame(
data_input, columns=["total_user", "total_even", "days"]
)
A : Union[str, Any] = Normalizer().fit_transform(data_input_df.values)
# split data
A : Optional[int] = normalize_df[:, 2].tolist()
A : List[str] = normalize_df[:, 0].tolist()
A : int = normalize_df[:, 1].tolist()
# for svr (input variable = total date and total match)
A : int = normalize_df[:, [1, 2]].tolist()
A : Tuple = x[: len(x) - 1]
A : str = x[len(x) - 1 :]
# for linear regression & sarimax
A : Tuple = total_date[: len(total_date) - 1]
A : Optional[int] = total_user[: len(total_user) - 1]
A : str = total_match[: len(total_match) - 1]
A : List[Any] = total_date[len(total_date) - 1 :]
A : List[Any] = total_user[len(total_user) - 1 :]
A : Optional[Any] = total_match[len(total_match) - 1 :]
# voting system with forecasting
A : Optional[int] = [
linear_regression_prediction(
trn_date, trn_user, trn_match, tst_date, tst_match
),
sarimax_predictor(trn_user, trn_match, tst_match),
support_vector_regressor(x_train, x_test, trn_user),
]
# check the safety of today's data
A : str = "" if data_safety_checker(res_vote, tst_user) else "not "
print("Today's data is {not_str}safe.")
| 305
| 1
|
import argparse
import torch
from transformers import YosoConfig, YosoForMaskedLM
def a__ ( __UpperCamelCase ):
if "model" in orig_key:
SCREAMING_SNAKE_CASE_ = orig_key.replace("model." , "" )
if "norm1" in orig_key:
SCREAMING_SNAKE_CASE_ = orig_key.replace("norm1" , "attention.output.LayerNorm" )
if "norm2" in orig_key:
SCREAMING_SNAKE_CASE_ = orig_key.replace("norm2" , "output.LayerNorm" )
if "norm" in orig_key:
SCREAMING_SNAKE_CASE_ = orig_key.replace("norm" , "LayerNorm" )
if "transformer" in orig_key:
SCREAMING_SNAKE_CASE_ = orig_key.split("." )[0].split("_" )[-1]
SCREAMING_SNAKE_CASE_ = orig_key.replace(F'''transformer_{layer_num}''' , F'''encoder.layer.{layer_num}''' )
if "mha.attn" in orig_key:
SCREAMING_SNAKE_CASE_ = orig_key.replace("mha.attn" , "attention.self" )
if "mha" in orig_key:
SCREAMING_SNAKE_CASE_ = orig_key.replace("mha" , "attention" )
if "W_q" in orig_key:
SCREAMING_SNAKE_CASE_ = orig_key.replace("W_q" , "self.query" )
if "W_k" in orig_key:
SCREAMING_SNAKE_CASE_ = orig_key.replace("W_k" , "self.key" )
if "W_v" in orig_key:
SCREAMING_SNAKE_CASE_ = orig_key.replace("W_v" , "self.value" )
if "ff1" in orig_key:
SCREAMING_SNAKE_CASE_ = orig_key.replace("ff1" , "intermediate.dense" )
if "ff2" in orig_key:
SCREAMING_SNAKE_CASE_ = orig_key.replace("ff2" , "output.dense" )
if "ff" in orig_key:
SCREAMING_SNAKE_CASE_ = orig_key.replace("ff" , "output.dense" )
if "mlm_class" in orig_key:
SCREAMING_SNAKE_CASE_ = orig_key.replace("mlm.mlm_class" , "cls.predictions.decoder" )
if "mlm" in orig_key:
SCREAMING_SNAKE_CASE_ = orig_key.replace("mlm" , "cls.predictions.transform" )
if "cls" not in orig_key:
SCREAMING_SNAKE_CASE_ = "yoso." + orig_key
return orig_key
def a__ ( __UpperCamelCase , __UpperCamelCase ):
for key in orig_state_dict.copy().keys():
SCREAMING_SNAKE_CASE_ = orig_state_dict.pop(__UpperCamelCase )
if ("pooler" in key) or ("sen_class" in key):
continue
else:
SCREAMING_SNAKE_CASE_ = val
SCREAMING_SNAKE_CASE_ = orig_state_dict["cls.predictions.decoder.bias"]
SCREAMING_SNAKE_CASE_ = torch.arange(__UpperCamelCase ).expand((1, -1) ) + 2
return orig_state_dict
def a__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = torch.load(__UpperCamelCase , map_location="cpu" )["model_state_dict"]
SCREAMING_SNAKE_CASE_ = YosoConfig.from_json_file(__UpperCamelCase )
SCREAMING_SNAKE_CASE_ = YosoForMaskedLM(__UpperCamelCase )
SCREAMING_SNAKE_CASE_ = convert_checkpoint_helper(config.max_position_embeddings , __UpperCamelCase )
print(model.load_state_dict(__UpperCamelCase ) )
model.eval()
model.save_pretrained(__UpperCamelCase )
print(F'''Checkpoint successfuly converted. Model saved at {pytorch_dump_path}''' )
if __name__ == "__main__":
A : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--pytorch_model_path", default=None, type=str, required=True, help="Path to YOSO pytorch checkpoint."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The json file for YOSO model config.",
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
A : int = parser.parse_args()
convert_yoso_checkpoint(args.pytorch_model_path, args.config_file, args.pytorch_dump_path)
| 305
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
A : List[str] = {"configuration_swin": ["SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP", "SwinConfig", "SwinOnnxConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Any = [
"SWIN_PRETRAINED_MODEL_ARCHIVE_LIST",
"SwinForImageClassification",
"SwinForMaskedImageModeling",
"SwinModel",
"SwinPreTrainedModel",
"SwinBackbone",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : str = [
"TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFSwinForImageClassification",
"TFSwinForMaskedImageModeling",
"TFSwinModel",
"TFSwinPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_swin import SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinConfig, SwinOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swin import (
SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinBackbone,
SwinForImageClassification,
SwinForMaskedImageModeling,
SwinModel,
SwinPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_swin import (
TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSwinForImageClassification,
TFSwinForMaskedImageModeling,
TFSwinModel,
TFSwinPreTrainedModel,
)
else:
import sys
A : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 305
| 1
|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.utils import ComputeEnvironment
from .cluster import get_cluster_input
from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
from .sagemaker import get_sagemaker_input
A : Union[str, Any] = "Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine"
def a__ ( ):
SCREAMING_SNAKE_CASE_ = _ask_options(
"In which compute environment are you running?" , ["This machine", "AWS (Amazon SageMaker)"] , _convert_compute_environment , )
if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
SCREAMING_SNAKE_CASE_ = get_sagemaker_input()
else:
SCREAMING_SNAKE_CASE_ = get_cluster_input()
return config
def a__ ( __UpperCamelCase=None ):
if subparsers is not None:
SCREAMING_SNAKE_CASE_ = subparsers.add_parser("config" , description=__UpperCamelCase )
else:
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser("Accelerate config command" , description=__UpperCamelCase )
parser.add_argument(
"--config_file" , default=__UpperCamelCase , help=(
"The path to use to store the config file. Will default to a file named default_config.yaml in the cache "
"location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have "
"such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed "
"with 'huggingface'."
) , )
if subparsers is not None:
parser.set_defaults(func=__UpperCamelCase )
return parser
def a__ ( __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = get_user_input()
if args.config_file is not None:
SCREAMING_SNAKE_CASE_ = args.config_file
else:
if not os.path.isdir(__UpperCamelCase ):
os.makedirs(__UpperCamelCase )
SCREAMING_SNAKE_CASE_ = default_yaml_config_file
if config_file.endswith(".json" ):
config.to_json_file(__UpperCamelCase )
else:
config.to_yaml_file(__UpperCamelCase )
print(F'''accelerate configuration saved at {config_file}''' )
def a__ ( ):
SCREAMING_SNAKE_CASE_ = config_command_parser()
SCREAMING_SNAKE_CASE_ = parser.parse_args()
config_command(__UpperCamelCase )
if __name__ == "__main__":
main()
| 305
|
import tempfile
import unittest
import numpy as np
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_flax
if is_flax_available():
import os
from flax.core.frozen_dict import unfreeze
from flax.traverse_util import flatten_dict
from transformers import FlaxBertModel
A : Union[str, Any] = "0.12" # assumed parallelism: 8
@require_flax
@is_staging_test
class lowerCamelCase (unittest.TestCase ):
"""simple docstring"""
@classmethod
def __A ( cls : Any ) -> Dict:
SCREAMING_SNAKE_CASE_ = TOKEN
HfFolder.save_token(__magic_name__ )
@classmethod
def __A ( cls : Optional[int] ) -> Tuple:
try:
delete_repo(token=cls._token , repo_id="test-model-flax" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-model-flax-org" )
except HTTPError:
pass
def __A ( self : str ) -> str:
SCREAMING_SNAKE_CASE_ = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
SCREAMING_SNAKE_CASE_ = FlaxBertModel(__magic_name__ )
model.push_to_hub("test-model-flax" , use_auth_token=self._token )
SCREAMING_SNAKE_CASE_ = FlaxBertModel.from_pretrained(F'''{USER}/test-model-flax''' )
SCREAMING_SNAKE_CASE_ = flatten_dict(unfreeze(model.params ) )
SCREAMING_SNAKE_CASE_ = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
SCREAMING_SNAKE_CASE_ = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(__magic_name__ , 1e-3 , msg=F'''{key} not identical''' )
# Reset repo
delete_repo(token=self._token , repo_id="test-model-flax" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(__magic_name__ , repo_id="test-model-flax" , push_to_hub=__magic_name__ , use_auth_token=self._token )
SCREAMING_SNAKE_CASE_ = FlaxBertModel.from_pretrained(F'''{USER}/test-model-flax''' )
SCREAMING_SNAKE_CASE_ = flatten_dict(unfreeze(model.params ) )
SCREAMING_SNAKE_CASE_ = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
SCREAMING_SNAKE_CASE_ = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(__magic_name__ , 1e-3 , msg=F'''{key} not identical''' )
def __A ( self : int ) -> Tuple:
SCREAMING_SNAKE_CASE_ = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
SCREAMING_SNAKE_CASE_ = FlaxBertModel(__magic_name__ )
model.push_to_hub("valid_org/test-model-flax-org" , use_auth_token=self._token )
SCREAMING_SNAKE_CASE_ = FlaxBertModel.from_pretrained("valid_org/test-model-flax-org" )
SCREAMING_SNAKE_CASE_ = flatten_dict(unfreeze(model.params ) )
SCREAMING_SNAKE_CASE_ = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
SCREAMING_SNAKE_CASE_ = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(__magic_name__ , 1e-3 , msg=F'''{key} not identical''' )
# Reset repo
delete_repo(token=self._token , repo_id="valid_org/test-model-flax-org" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(
__magic_name__ , repo_id="valid_org/test-model-flax-org" , push_to_hub=__magic_name__ , use_auth_token=self._token )
SCREAMING_SNAKE_CASE_ = FlaxBertModel.from_pretrained("valid_org/test-model-flax-org" )
SCREAMING_SNAKE_CASE_ = flatten_dict(unfreeze(model.params ) )
SCREAMING_SNAKE_CASE_ = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
SCREAMING_SNAKE_CASE_ = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(__magic_name__ , 1e-3 , msg=F'''{key} not identical''' )
def a__ ( __UpperCamelCase , __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = flatten_dict(modela.params )
SCREAMING_SNAKE_CASE_ = flatten_dict(modela.params )
for key in flat_params_a.keys():
if np.sum(np.abs(flat_params_a[key] - flat_params_a[key] ) ) > 1E-4:
SCREAMING_SNAKE_CASE_ = False
return models_are_equal
@require_flax
class lowerCamelCase (unittest.TestCase ):
"""simple docstring"""
def __A ( self : str ) -> Dict:
SCREAMING_SNAKE_CASE_ = BertConfig.from_pretrained("hf-internal-testing/tiny-bert-flax-only" )
SCREAMING_SNAKE_CASE_ = FlaxBertModel(__magic_name__ )
SCREAMING_SNAKE_CASE_ = "bert"
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(__magic_name__ , __magic_name__ ) )
with self.assertRaises(__magic_name__ ):
SCREAMING_SNAKE_CASE_ = FlaxBertModel.from_pretrained(__magic_name__ )
SCREAMING_SNAKE_CASE_ = FlaxBertModel.from_pretrained(__magic_name__ , subfolder=__magic_name__ )
self.assertTrue(check_models_equal(__magic_name__ , __magic_name__ ) )
def __A ( self : Optional[Any] ) -> Tuple:
SCREAMING_SNAKE_CASE_ = BertConfig.from_pretrained("hf-internal-testing/tiny-bert-flax-only" )
SCREAMING_SNAKE_CASE_ = FlaxBertModel(__magic_name__ )
SCREAMING_SNAKE_CASE_ = "bert"
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(__magic_name__ , __magic_name__ ) , max_shard_size="10KB" )
with self.assertRaises(__magic_name__ ):
SCREAMING_SNAKE_CASE_ = FlaxBertModel.from_pretrained(__magic_name__ )
SCREAMING_SNAKE_CASE_ = FlaxBertModel.from_pretrained(__magic_name__ , subfolder=__magic_name__ )
self.assertTrue(check_models_equal(__magic_name__ , __magic_name__ ) )
def __A ( self : Optional[int] ) -> Dict:
SCREAMING_SNAKE_CASE_ = "bert"
SCREAMING_SNAKE_CASE_ = "hf-internal-testing/tiny-random-bert-subfolder"
with self.assertRaises(__magic_name__ ):
SCREAMING_SNAKE_CASE_ = FlaxBertModel.from_pretrained(__magic_name__ )
SCREAMING_SNAKE_CASE_ = FlaxBertModel.from_pretrained(__magic_name__ , subfolder=__magic_name__ )
self.assertIsNotNone(__magic_name__ )
def __A ( self : List[str] ) -> Dict:
SCREAMING_SNAKE_CASE_ = "bert"
SCREAMING_SNAKE_CASE_ = "hf-internal-testing/tiny-random-bert-sharded-subfolder"
with self.assertRaises(__magic_name__ ):
SCREAMING_SNAKE_CASE_ = FlaxBertModel.from_pretrained(__magic_name__ )
SCREAMING_SNAKE_CASE_ = FlaxBertModel.from_pretrained(__magic_name__ , subfolder=__magic_name__ )
self.assertIsNotNone(__magic_name__ )
| 305
| 1
|
import copy
import os
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Dict, Mapping, Optional, Union
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A : Optional[int] = logging.get_logger(__name__)
A : str = {
"google/owlvit-base-patch32": "https://huggingface.co/google/owlvit-base-patch32/resolve/main/config.json",
"google/owlvit-base-patch16": "https://huggingface.co/google/owlvit-base-patch16/resolve/main/config.json",
"google/owlvit-large-patch14": "https://huggingface.co/google/owlvit-large-patch14/resolve/main/config.json",
}
class lowerCamelCase (SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
lowerCamelCase__ = '''owlvit_text_model'''
def __init__( self : Tuple , __magic_name__ : List[Any]=49_408 , __magic_name__ : str=512 , __magic_name__ : Dict=2_048 , __magic_name__ : Dict=12 , __magic_name__ : Tuple=8 , __magic_name__ : Optional[Any]=16 , __magic_name__ : Optional[Any]="quick_gelu" , __magic_name__ : Optional[Any]=1e-5 , __magic_name__ : Union[str, Any]=0.0 , __magic_name__ : str=0.02 , __magic_name__ : List[str]=1.0 , __magic_name__ : Optional[int]=0 , __magic_name__ : Tuple=49_406 , __magic_name__ : Optional[Any]=49_407 , **__magic_name__ : List[str] , ) -> Tuple:
super().__init__(pad_token_id=__magic_name__ , bos_token_id=__magic_name__ , eos_token_id=__magic_name__ , **__magic_name__ )
SCREAMING_SNAKE_CASE_ = vocab_size
SCREAMING_SNAKE_CASE_ = hidden_size
SCREAMING_SNAKE_CASE_ = intermediate_size
SCREAMING_SNAKE_CASE_ = num_hidden_layers
SCREAMING_SNAKE_CASE_ = num_attention_heads
SCREAMING_SNAKE_CASE_ = max_position_embeddings
SCREAMING_SNAKE_CASE_ = hidden_act
SCREAMING_SNAKE_CASE_ = layer_norm_eps
SCREAMING_SNAKE_CASE_ = attention_dropout
SCREAMING_SNAKE_CASE_ = initializer_range
SCREAMING_SNAKE_CASE_ = initializer_factor
@classmethod
def __A ( cls : List[str] , __magic_name__ : Union[str, os.PathLike] , **__magic_name__ : str ) -> "PretrainedConfig":
cls._set_token_in_kwargs(__magic_name__ )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = cls.get_config_dict(__magic_name__ , **__magic_name__ )
# get the text config dict if we are loading from OwlViTConfig
if config_dict.get("model_type" ) == "owlvit":
SCREAMING_SNAKE_CASE_ = config_dict["text_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__magic_name__ , **__magic_name__ )
class lowerCamelCase (SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
lowerCamelCase__ = '''owlvit_vision_model'''
def __init__( self : List[Any] , __magic_name__ : int=768 , __magic_name__ : Dict=3_072 , __magic_name__ : Any=12 , __magic_name__ : Optional[Any]=12 , __magic_name__ : List[Any]=3 , __magic_name__ : List[Any]=768 , __magic_name__ : Any=32 , __magic_name__ : Tuple="quick_gelu" , __magic_name__ : List[Any]=1e-5 , __magic_name__ : Tuple=0.0 , __magic_name__ : Any=0.02 , __magic_name__ : Union[str, Any]=1.0 , **__magic_name__ : Optional[int] , ) -> Tuple:
super().__init__(**__magic_name__ )
SCREAMING_SNAKE_CASE_ = hidden_size
SCREAMING_SNAKE_CASE_ = intermediate_size
SCREAMING_SNAKE_CASE_ = num_hidden_layers
SCREAMING_SNAKE_CASE_ = num_attention_heads
SCREAMING_SNAKE_CASE_ = num_channels
SCREAMING_SNAKE_CASE_ = image_size
SCREAMING_SNAKE_CASE_ = patch_size
SCREAMING_SNAKE_CASE_ = hidden_act
SCREAMING_SNAKE_CASE_ = layer_norm_eps
SCREAMING_SNAKE_CASE_ = attention_dropout
SCREAMING_SNAKE_CASE_ = initializer_range
SCREAMING_SNAKE_CASE_ = initializer_factor
@classmethod
def __A ( cls : Union[str, Any] , __magic_name__ : Union[str, os.PathLike] , **__magic_name__ : Tuple ) -> "PretrainedConfig":
cls._set_token_in_kwargs(__magic_name__ )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = cls.get_config_dict(__magic_name__ , **__magic_name__ )
# get the vision config dict if we are loading from OwlViTConfig
if config_dict.get("model_type" ) == "owlvit":
SCREAMING_SNAKE_CASE_ = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__magic_name__ , **__magic_name__ )
class lowerCamelCase (SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
lowerCamelCase__ = '''owlvit'''
lowerCamelCase__ = True
def __init__( self : List[Any] , __magic_name__ : Any=None , __magic_name__ : str=None , __magic_name__ : Optional[int]=512 , __magic_name__ : Any=2.6592 , __magic_name__ : Optional[int]=True , **__magic_name__ : Union[str, Any] , ) -> Any:
super().__init__(**__magic_name__ )
if text_config is None:
SCREAMING_SNAKE_CASE_ = {}
logger.info("text_config is None. Initializing the OwlViTTextConfig with default values." )
if vision_config is None:
SCREAMING_SNAKE_CASE_ = {}
logger.info("vision_config is None. initializing the OwlViTVisionConfig with default values." )
SCREAMING_SNAKE_CASE_ = OwlViTTextConfig(**__magic_name__ )
SCREAMING_SNAKE_CASE_ = OwlViTVisionConfig(**__magic_name__ )
SCREAMING_SNAKE_CASE_ = projection_dim
SCREAMING_SNAKE_CASE_ = logit_scale_init_value
SCREAMING_SNAKE_CASE_ = return_dict
SCREAMING_SNAKE_CASE_ = 1.0
@classmethod
def __A ( cls : Optional[int] , __magic_name__ : Union[str, os.PathLike] , **__magic_name__ : Union[str, Any] ) -> "PretrainedConfig":
cls._set_token_in_kwargs(__magic_name__ )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = cls.get_config_dict(__magic_name__ , **__magic_name__ )
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__magic_name__ , **__magic_name__ )
@classmethod
def __A ( cls : str , __magic_name__ : Dict , __magic_name__ : Dict , **__magic_name__ : Dict ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = text_config
SCREAMING_SNAKE_CASE_ = vision_config
return cls.from_dict(__magic_name__ , **__magic_name__ )
def __A ( self : str ) -> Any:
SCREAMING_SNAKE_CASE_ = copy.deepcopy(self.__dict__ )
SCREAMING_SNAKE_CASE_ = self.text_config.to_dict()
SCREAMING_SNAKE_CASE_ = self.vision_config.to_dict()
SCREAMING_SNAKE_CASE_ = self.__class__.model_type
return output
class lowerCamelCase (SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
@property
def __A ( self : Optional[int] ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("input_ids", {0: "batch", 1: "sequence"}),
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
("attention_mask", {0: "batch", 1: "sequence"}),
] )
@property
def __A ( self : int ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("logits_per_image", {0: "batch"}),
("logits_per_text", {0: "batch"}),
("text_embeds", {0: "batch"}),
("image_embeds", {0: "batch"}),
] )
@property
def __A ( self : Dict ) -> float:
return 1e-4
def __A ( self : Any , __magic_name__ : "ProcessorMixin" , __magic_name__ : int = -1 , __magic_name__ : int = -1 , __magic_name__ : Optional["TensorType"] = None , ) -> Mapping[str, Any]:
SCREAMING_SNAKE_CASE_ = super().generate_dummy_inputs(
processor.tokenizer , batch_size=__magic_name__ , seq_length=__magic_name__ , framework=__magic_name__ )
SCREAMING_SNAKE_CASE_ = super().generate_dummy_inputs(
processor.image_processor , batch_size=__magic_name__ , framework=__magic_name__ )
return {**text_input_dict, **image_input_dict}
@property
def __A ( self : Union[str, Any] ) -> int:
return 14
| 305
|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.utils import ComputeEnvironment
from .cluster import get_cluster_input
from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
from .sagemaker import get_sagemaker_input
A : Union[str, Any] = "Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine"
def a__ ( ):
SCREAMING_SNAKE_CASE_ = _ask_options(
"In which compute environment are you running?" , ["This machine", "AWS (Amazon SageMaker)"] , _convert_compute_environment , )
if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
SCREAMING_SNAKE_CASE_ = get_sagemaker_input()
else:
SCREAMING_SNAKE_CASE_ = get_cluster_input()
return config
def a__ ( __UpperCamelCase=None ):
if subparsers is not None:
SCREAMING_SNAKE_CASE_ = subparsers.add_parser("config" , description=__UpperCamelCase )
else:
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser("Accelerate config command" , description=__UpperCamelCase )
parser.add_argument(
"--config_file" , default=__UpperCamelCase , help=(
"The path to use to store the config file. Will default to a file named default_config.yaml in the cache "
"location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have "
"such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed "
"with 'huggingface'."
) , )
if subparsers is not None:
parser.set_defaults(func=__UpperCamelCase )
return parser
def a__ ( __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = get_user_input()
if args.config_file is not None:
SCREAMING_SNAKE_CASE_ = args.config_file
else:
if not os.path.isdir(__UpperCamelCase ):
os.makedirs(__UpperCamelCase )
SCREAMING_SNAKE_CASE_ = default_yaml_config_file
if config_file.endswith(".json" ):
config.to_json_file(__UpperCamelCase )
else:
config.to_yaml_file(__UpperCamelCase )
print(F'''accelerate configuration saved at {config_file}''' )
def a__ ( ):
SCREAMING_SNAKE_CASE_ = config_command_parser()
SCREAMING_SNAKE_CASE_ = parser.parse_args()
config_command(__UpperCamelCase )
if __name__ == "__main__":
main()
| 305
| 1
|
import argparse
import torch
from torch import nn
from transformers import MBartConfig, MBartForConditionalGeneration
def a__ ( __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = [
"encoder.version",
"decoder.version",
"model.encoder.version",
"model.decoder.version",
"_float_tensor",
"decoder.output_projection.weight",
]
for k in ignore_keys:
state_dict.pop(__UpperCamelCase , __UpperCamelCase )
def a__ ( __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = emb.weight.shape
SCREAMING_SNAKE_CASE_ = nn.Linear(__UpperCamelCase , __UpperCamelCase , bias=__UpperCamelCase )
SCREAMING_SNAKE_CASE_ = emb.weight.data
return lin_layer
def a__ ( __UpperCamelCase , __UpperCamelCase="facebook/mbart-large-en-ro" , __UpperCamelCase=False , __UpperCamelCase=False ):
SCREAMING_SNAKE_CASE_ = torch.load(__UpperCamelCase , map_location="cpu" )["model"]
remove_ignore_keys_(__UpperCamelCase )
SCREAMING_SNAKE_CASE_ = state_dict["encoder.embed_tokens.weight"].shape[0]
SCREAMING_SNAKE_CASE_ = MBartConfig.from_pretrained(__UpperCamelCase , vocab_size=__UpperCamelCase )
if mbart_aa and finetuned:
SCREAMING_SNAKE_CASE_ = "relu"
SCREAMING_SNAKE_CASE_ = state_dict["decoder.embed_tokens.weight"]
SCREAMING_SNAKE_CASE_ = MBartForConditionalGeneration(__UpperCamelCase )
model.model.load_state_dict(__UpperCamelCase )
if finetuned:
SCREAMING_SNAKE_CASE_ = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
A : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"fairseq_path", type=str, help="bart.large, bart.large.cnn or a path to a model.pt on local filesystem."
)
parser.add_argument("pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument(
"--hf_config",
default="facebook/mbart-large-cc25",
type=str,
help="Which huggingface architecture to use: mbart-large",
)
parser.add_argument("--mbart_50", action="store_true", help="whether the model is mMART-50 checkpoint")
parser.add_argument("--finetuned", action="store_true", help="whether the model is a fine-tuned checkpoint")
A : Dict = parser.parse_args()
A : Optional[Any] = convert_fairseq_mbart_checkpoint_from_disk(
args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa
)
model.save_pretrained(args.pytorch_dump_folder_path)
| 305
|
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=SCREAMING_SNAKE_CASE__ )
class lowerCamelCase (SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
lowerCamelCase__ = field(default='''summarization''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
lowerCamelCase__ = Features({'''text''': Value('''string''' )} )
lowerCamelCase__ = Features({'''summary''': Value('''string''' )} )
lowerCamelCase__ = "text"
lowerCamelCase__ = "summary"
@property
def __A ( self : Dict ) -> Dict[str, str]:
return {self.text_column: "text", self.summary_column: "summary"}
| 305
| 1
|
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
A : Optional[Any] = logging.get_logger(__name__)
A : int = {
"ut/deta": "https://huggingface.co/ut/deta/resolve/main/config.json",
}
class lowerCamelCase (SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
lowerCamelCase__ = '''deta'''
lowerCamelCase__ = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
}
def __init__( self : Optional[Any] , __magic_name__ : List[Any]=None , __magic_name__ : str=900 , __magic_name__ : Union[str, Any]=2_048 , __magic_name__ : List[str]=6 , __magic_name__ : Union[str, Any]=2_048 , __magic_name__ : int=8 , __magic_name__ : List[str]=6 , __magic_name__ : Tuple=1_024 , __magic_name__ : Tuple=8 , __magic_name__ : List[Any]=0.0 , __magic_name__ : Optional[int]=True , __magic_name__ : str="relu" , __magic_name__ : int=256 , __magic_name__ : Tuple=0.1 , __magic_name__ : Optional[int]=0.0 , __magic_name__ : Optional[Any]=0.0 , __magic_name__ : List[str]=0.02 , __magic_name__ : Tuple=1.0 , __magic_name__ : List[str]=True , __magic_name__ : Optional[Any]=False , __magic_name__ : Dict="sine" , __magic_name__ : Union[str, Any]=5 , __magic_name__ : Tuple=4 , __magic_name__ : Union[str, Any]=4 , __magic_name__ : List[str]=True , __magic_name__ : List[str]=300 , __magic_name__ : Optional[Any]=True , __magic_name__ : Optional[Any]=True , __magic_name__ : str=1 , __magic_name__ : Union[str, Any]=5 , __magic_name__ : List[str]=2 , __magic_name__ : Tuple=1 , __magic_name__ : List[Any]=1 , __magic_name__ : str=5 , __magic_name__ : Union[str, Any]=2 , __magic_name__ : Optional[int]=0.1 , __magic_name__ : Any=0.25 , **__magic_name__ : Tuple , ) -> Optional[int]:
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
SCREAMING_SNAKE_CASE_ = CONFIG_MAPPING["resnet"](out_features=["stage2", "stage3", "stage4"] )
else:
if isinstance(__magic_name__ , __magic_name__ ):
SCREAMING_SNAKE_CASE_ = backbone_config.pop("model_type" )
SCREAMING_SNAKE_CASE_ = CONFIG_MAPPING[backbone_model_type]
SCREAMING_SNAKE_CASE_ = config_class.from_dict(__magic_name__ )
SCREAMING_SNAKE_CASE_ = backbone_config
SCREAMING_SNAKE_CASE_ = num_queries
SCREAMING_SNAKE_CASE_ = max_position_embeddings
SCREAMING_SNAKE_CASE_ = d_model
SCREAMING_SNAKE_CASE_ = encoder_ffn_dim
SCREAMING_SNAKE_CASE_ = encoder_layers
SCREAMING_SNAKE_CASE_ = encoder_attention_heads
SCREAMING_SNAKE_CASE_ = decoder_ffn_dim
SCREAMING_SNAKE_CASE_ = decoder_layers
SCREAMING_SNAKE_CASE_ = decoder_attention_heads
SCREAMING_SNAKE_CASE_ = dropout
SCREAMING_SNAKE_CASE_ = attention_dropout
SCREAMING_SNAKE_CASE_ = activation_dropout
SCREAMING_SNAKE_CASE_ = activation_function
SCREAMING_SNAKE_CASE_ = init_std
SCREAMING_SNAKE_CASE_ = init_xavier_std
SCREAMING_SNAKE_CASE_ = encoder_layerdrop
SCREAMING_SNAKE_CASE_ = auxiliary_loss
SCREAMING_SNAKE_CASE_ = position_embedding_type
# deformable attributes
SCREAMING_SNAKE_CASE_ = num_feature_levels
SCREAMING_SNAKE_CASE_ = encoder_n_points
SCREAMING_SNAKE_CASE_ = decoder_n_points
SCREAMING_SNAKE_CASE_ = two_stage
SCREAMING_SNAKE_CASE_ = two_stage_num_proposals
SCREAMING_SNAKE_CASE_ = with_box_refine
SCREAMING_SNAKE_CASE_ = assign_first_stage
if two_stage is True and with_box_refine is False:
raise ValueError("If two_stage is True, with_box_refine must be True." )
# Hungarian matcher
SCREAMING_SNAKE_CASE_ = class_cost
SCREAMING_SNAKE_CASE_ = bbox_cost
SCREAMING_SNAKE_CASE_ = giou_cost
# Loss coefficients
SCREAMING_SNAKE_CASE_ = mask_loss_coefficient
SCREAMING_SNAKE_CASE_ = dice_loss_coefficient
SCREAMING_SNAKE_CASE_ = bbox_loss_coefficient
SCREAMING_SNAKE_CASE_ = giou_loss_coefficient
SCREAMING_SNAKE_CASE_ = eos_coefficient
SCREAMING_SNAKE_CASE_ = focal_alpha
super().__init__(is_encoder_decoder=__magic_name__ , **__magic_name__ )
@property
def __A ( self : str ) -> int:
return self.encoder_attention_heads
@property
def __A ( self : Any ) -> int:
return self.d_model
def __A ( self : Union[str, Any] ) -> Tuple:
SCREAMING_SNAKE_CASE_ = copy.deepcopy(self.__dict__ )
SCREAMING_SNAKE_CASE_ = self.backbone_config.to_dict()
SCREAMING_SNAKE_CASE_ = self.__class__.model_type
return output
| 305
|
from ....utils import logging
A : List[str] = logging.get_logger(__name__)
class lowerCamelCase (SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def __init__( self : List[str] , __magic_name__ : Optional[Any] , __magic_name__ : Any=None , __magic_name__ : List[str]=2_048 ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = config.__dict__
SCREAMING_SNAKE_CASE_ = modal_hidden_size
if num_labels:
SCREAMING_SNAKE_CASE_ = num_labels
| 305
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
A : Optional[int] = {
"configuration_blenderbot_small": [
"BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP",
"BlenderbotSmallConfig",
"BlenderbotSmallOnnxConfig",
],
"tokenization_blenderbot_small": ["BlenderbotSmallTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : List[str] = ["BlenderbotSmallTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Optional[Any] = [
"BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST",
"BlenderbotSmallForCausalLM",
"BlenderbotSmallForConditionalGeneration",
"BlenderbotSmallModel",
"BlenderbotSmallPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Union[str, Any] = [
"TFBlenderbotSmallForConditionalGeneration",
"TFBlenderbotSmallModel",
"TFBlenderbotSmallPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Union[str, Any] = [
"FlaxBlenderbotSmallForConditionalGeneration",
"FlaxBlenderbotSmallModel",
"FlaxBlenderbotSmallPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_blenderbot_small import (
BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotSmallConfig,
BlenderbotSmallOnnxConfig,
)
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_small_fast import BlenderbotSmallTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot_small import (
BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotSmallForCausalLM,
BlenderbotSmallForConditionalGeneration,
BlenderbotSmallModel,
BlenderbotSmallPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot_small import (
TFBlenderbotSmallForConditionalGeneration,
TFBlenderbotSmallModel,
TFBlenderbotSmallPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot_small import (
FlaxBlenderbotSmallForConditionalGeneration,
FlaxBlenderbotSmallModel,
FlaxBlenderbotSmallPreTrainedModel,
)
else:
import sys
A : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 305
|
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class lowerCamelCase (SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
lowerCamelCase__ = ['''image_processor''', '''tokenizer''']
lowerCamelCase__ = '''ViltImageProcessor'''
lowerCamelCase__ = ('''BertTokenizer''', '''BertTokenizerFast''')
def __init__( self : Optional[int] , __magic_name__ : str=None , __magic_name__ : List[str]=None , **__magic_name__ : Any ) -> str:
SCREAMING_SNAKE_CASE_ = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , __magic_name__ , )
SCREAMING_SNAKE_CASE_ = kwargs.pop("feature_extractor" )
SCREAMING_SNAKE_CASE_ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(__magic_name__ , __magic_name__ )
SCREAMING_SNAKE_CASE_ = self.image_processor
def __call__( self : List[str] , __magic_name__ : List[str] , __magic_name__ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , __magic_name__ : bool = True , __magic_name__ : Union[bool, str, PaddingStrategy] = False , __magic_name__ : Union[bool, str, TruncationStrategy] = None , __magic_name__ : Optional[int] = None , __magic_name__ : int = 0 , __magic_name__ : Optional[int] = None , __magic_name__ : Optional[bool] = None , __magic_name__ : Optional[bool] = None , __magic_name__ : bool = False , __magic_name__ : bool = False , __magic_name__ : bool = False , __magic_name__ : bool = False , __magic_name__ : bool = True , __magic_name__ : Optional[Union[str, TensorType]] = None , **__magic_name__ : str , ) -> BatchEncoding:
SCREAMING_SNAKE_CASE_ = self.tokenizer(
text=__magic_name__ , add_special_tokens=__magic_name__ , padding=__magic_name__ , truncation=__magic_name__ , max_length=__magic_name__ , stride=__magic_name__ , pad_to_multiple_of=__magic_name__ , return_token_type_ids=__magic_name__ , return_attention_mask=__magic_name__ , return_overflowing_tokens=__magic_name__ , return_special_tokens_mask=__magic_name__ , return_offsets_mapping=__magic_name__ , return_length=__magic_name__ , verbose=__magic_name__ , return_tensors=__magic_name__ , **__magic_name__ , )
# add pixel_values + pixel_mask
SCREAMING_SNAKE_CASE_ = self.image_processor(__magic_name__ , return_tensors=__magic_name__ )
encoding.update(__magic_name__ )
return encoding
def __A ( self : Optional[int] , *__magic_name__ : List[Any] , **__magic_name__ : Optional[Any] ) -> Any:
return self.tokenizer.batch_decode(*__magic_name__ , **__magic_name__ )
def __A ( self : Dict , *__magic_name__ : List[Any] , **__magic_name__ : Union[str, Any] ) -> str:
return self.tokenizer.decode(*__magic_name__ , **__magic_name__ )
@property
def __A ( self : Optional[int] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = self.tokenizer.model_input_names
SCREAMING_SNAKE_CASE_ = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def __A ( self : Dict ) -> List[Any]:
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , __magic_name__ , )
return self.image_processor_class
@property
def __A ( self : int ) -> List[Any]:
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , __magic_name__ , )
return self.image_processor
| 305
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.