code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxSeqaSeqConfigWithPast
from ...utils import logging
if TYPE_CHECKING:
from ...feature_extraction_utils import FeatureExtractionMixin
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"openai/whisper-base": "https://huggingface.co/openai/whisper-base/resolve/main/config.json",
}
# fmt: off
lowerCamelCase__ = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 357, 366, 438, 532, 685,
705, 796, 930, 1058, 1220, 1267, 1279, 1303, 1343, 1377,
1391, 1635, 1782, 1875, 2162, 2361, 2488, 3467, 4008, 4211,
4600, 4808, 5299, 5855, 6329, 7203, 9609, 9959, 1_0563, 1_0786,
1_1420, 1_1709, 1_1907, 1_3163, 1_3697, 1_3700, 1_4808, 1_5306, 1_6410, 1_6791,
1_7992, 1_9203, 1_9510, 2_0724, 2_2305, 2_2935, 2_7007, 3_0109, 3_0420, 3_3409,
3_4949, 4_0283, 4_0493, 4_0549, 4_7282, 4_9146, 5_0257, 5_0359, 5_0360, 5_0361
]
lowerCamelCase__ = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 359, 503, 522, 542, 873,
893, 902, 918, 922, 931, 1350, 1853, 1982, 2460, 2627,
3246, 3253, 3268, 3536, 3846, 3961, 4183, 4667, 6585, 6647,
7273, 9061, 9383, 1_0428, 1_0929, 1_1938, 1_2033, 1_2331, 1_2562, 1_3793,
1_4157, 1_4635, 1_5265, 1_5618, 1_6553, 1_6604, 1_8362, 1_8956, 2_0075, 2_1675,
2_2520, 2_6130, 2_6161, 2_6435, 2_8279, 2_9464, 3_1650, 3_2302, 3_2470, 3_6865,
4_2863, 4_7425, 4_9870, 5_0254, 5_0258, 5_0360, 5_0361, 5_0362
]
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :List[Any] = "whisper"
SCREAMING_SNAKE_CASE__ :Dict = ["past_key_values"]
SCREAMING_SNAKE_CASE__ :List[Any] = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self : Optional[Any] , __a : Dict=5_1865 , __a : Tuple=80 , __a : Optional[Any]=6 , __a : List[Any]=4 , __a : Any=6 , __a : Tuple=4 , __a : List[str]=1536 , __a : str=1536 , __a : Any=0.0 , __a : List[str]=0.0 , __a : Dict=5_0257 , __a : str=True , __a : Optional[Any]=True , __a : Optional[Any]="gelu" , __a : Optional[int]=256 , __a : List[Any]=0.0 , __a : Dict=0.0 , __a : List[Any]=0.0 , __a : int=0.02 , __a : Optional[Any]=False , __a : Optional[Any]=1500 , __a : List[str]=448 , __a : Union[str, Any]=5_0256 , __a : List[str]=5_0256 , __a : Any=5_0256 , __a : str=None , __a : List[Any]=[220, 5_0256] , __a : int=False , __a : str=256 , __a : Any=False , __a : Optional[Any]=0.05 , __a : Union[str, Any]=10 , __a : int=2 , __a : int=0.0 , __a : Optional[int]=10 , __a : str=0 , __a : Any=7 , **__a : Tuple , ) -> Tuple:
_UpperCamelCase : Tuple = vocab_size
_UpperCamelCase : List[Any] = num_mel_bins
_UpperCamelCase : Union[str, Any] = d_model
_UpperCamelCase : Tuple = encoder_layers
_UpperCamelCase : Any = encoder_attention_heads
_UpperCamelCase : List[Any] = decoder_layers
_UpperCamelCase : int = decoder_attention_heads
_UpperCamelCase : Dict = decoder_ffn_dim
_UpperCamelCase : Tuple = encoder_ffn_dim
_UpperCamelCase : Tuple = dropout
_UpperCamelCase : Union[str, Any] = attention_dropout
_UpperCamelCase : int = activation_dropout
_UpperCamelCase : int = activation_function
_UpperCamelCase : str = init_std
_UpperCamelCase : List[Any] = encoder_layerdrop
_UpperCamelCase : Tuple = decoder_layerdrop
_UpperCamelCase : List[Any] = use_cache
_UpperCamelCase : List[str] = encoder_layers
_UpperCamelCase : Union[str, Any] = scale_embedding # scale factor will be sqrt(d_model) if True
_UpperCamelCase : Dict = max_source_positions
_UpperCamelCase : Dict = max_target_positions
# Audio Classification-specific parameters. Feel free to ignore for other classes.
_UpperCamelCase : Union[str, Any] = classifier_proj_size
_UpperCamelCase : List[str] = use_weighted_layer_sum
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_UpperCamelCase : Tuple = apply_spec_augment
_UpperCamelCase : List[Any] = mask_time_prob
_UpperCamelCase : List[Any] = mask_time_length
_UpperCamelCase : Tuple = mask_time_min_masks
_UpperCamelCase : List[Any] = mask_feature_prob
_UpperCamelCase : str = mask_feature_length
_UpperCamelCase : int = mask_feature_min_masks
_UpperCamelCase : Union[str, Any] = median_filter_width
super().__init__(
pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , is_encoder_decoder=__a , decoder_start_token_id=__a , suppress_tokens=__a , begin_suppress_tokens=__a , **__a , )
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
@property
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Mapping[str, Mapping[int, str]]:
_UpperCamelCase : int = OrderedDict(
[
("input_features", {0: "batch", 1: "feature_size", 2: "encoder_sequence"}),
] )
if self.use_past:
_UpperCamelCase : Union[str, Any] = {0: "batch"}
else:
_UpperCamelCase : Optional[int] = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(__a , direction="inputs" )
return common_inputs
def __SCREAMING_SNAKE_CASE ( self : Dict , __a : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , __a : int = -1 , __a : int = -1 , __a : bool = False , __a : Optional["TensorType"] = None , __a : int = 2_2050 , __a : float = 5.0 , __a : int = 220 , ) -> Mapping[str, Any]:
_UpperCamelCase : str = OrderedDict()
_UpperCamelCase : str = OnnxConfig.generate_dummy_inputs(
self , preprocessor=preprocessor.feature_extractor , batch_size=__a , framework=__a , sampling_rate=__a , time_duration=__a , frequency=__a , )
_UpperCamelCase : List[str] = encoder_inputs["input_features"].shape[2]
_UpperCamelCase : Union[str, Any] = encoder_sequence_length // 2 if self.use_past else seq_length
_UpperCamelCase : Dict = super().generate_dummy_inputs(
preprocessor.tokenizer , __a , __a , __a , __a )
_UpperCamelCase : int = encoder_inputs.pop("input_features" )
_UpperCamelCase : List[Any] = decoder_inputs.pop("decoder_input_ids" )
if "past_key_values" in decoder_inputs:
_UpperCamelCase : Optional[int] = decoder_inputs.pop("past_key_values" )
return dummy_inputs
@property
def __SCREAMING_SNAKE_CASE ( self : str ) -> float:
return 1e-3
| 51
|
"""simple docstring"""
import os
import zipfile
import requests
from get_ci_error_statistics import download_artifact, get_artifacts_links
def lowercase__ ( lowercase_ ,lowercase_=7 ) -> Tuple:
"""simple docstring"""
_UpperCamelCase : Optional[int] = None
if token is not None:
_UpperCamelCase : Optional[Any] = {"Accept": "application/vnd.github+json", "Authorization": F'''Bearer {token}'''}
# The id of a workflow (not of a workflow run)
_UpperCamelCase : Any = "636036"
_UpperCamelCase : Tuple = F'''https://api.github.com/repos/huggingface/transformers/actions/workflows/{workflow_id}/runs'''
# On `main` branch + event being `schedule` + not returning PRs + only `num_runs` results
url += F'''?branch=main&event=schedule&exclude_pull_requests=true&per_page={num_runs}'''
_UpperCamelCase : Dict = requests.get(lowercase_ ,headers=lowercase_ ).json()
return result["workflow_runs"]
def lowercase__ ( lowercase_ ) -> List[str]:
"""simple docstring"""
_UpperCamelCase : List[Any] = get_daily_ci_runs(lowercase_ )
_UpperCamelCase : Tuple = None
for workflow_run in workflow_runs:
if workflow_run["status"] == "completed":
_UpperCamelCase : Union[str, Any] = workflow_run["id"]
break
return workflow_run_id
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase : str = get_last_daily_ci_runs(lowercase_ )
if workflow_run_id is not None:
_UpperCamelCase : int = get_artifacts_links(worflow_run_id=lowercase_ ,token=lowercase_ )
for artifact_name in artifact_names:
if artifact_name in artifacts_links:
_UpperCamelCase : Dict = artifacts_links[artifact_name]
download_artifact(
artifact_name=lowercase_ ,artifact_url=lowercase_ ,output_dir=lowercase_ ,token=lowercase_ )
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ) -> int:
"""simple docstring"""
get_last_daily_ci_artifacts(lowercase_ ,lowercase_ ,lowercase_ )
_UpperCamelCase : Dict = {}
for artifact_name in artifact_names:
_UpperCamelCase : Union[str, Any] = os.path.join(lowercase_ ,F'''{artifact_name}.zip''' )
if os.path.isfile(lowercase_ ):
_UpperCamelCase : int = {}
with zipfile.ZipFile(lowercase_ ) as z:
for filename in z.namelist():
if not os.path.isdir(lowercase_ ):
# read the file
with z.open(lowercase_ ) as f:
_UpperCamelCase : int = f.read().decode("UTF-8" )
return results
| 51
| 1
|
"""simple docstring"""
def lowercase__ ( lowercase_ ,lowercase_ ) -> List[str]:
"""simple docstring"""
_UpperCamelCase : Tuple = (boundary[1] - boundary[0]) / steps
_UpperCamelCase : Any = boundary[0]
_UpperCamelCase : str = boundary[1]
_UpperCamelCase : Tuple = make_points(lowercase_ ,lowercase_ ,lowercase_ )
_UpperCamelCase : Union[str, Any] = 0.0
y += (h / 2.0) * f(lowercase_ )
for i in x_i:
# print(i)
y += h * f(lowercase_ )
y += (h / 2.0) * f(lowercase_ )
return y
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase : Optional[Any] = a + h
while x < (b - h):
yield x
_UpperCamelCase : Optional[Any] = x + h
def lowercase__ ( lowercase_ ) -> Union[str, Any]: # enter your function here
"""simple docstring"""
_UpperCamelCase : Dict = (x - 0) * (x - 0)
return y
def lowercase__ ( ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase : int = 0.0 # Lower bound of integration
_UpperCamelCase : List[str] = 1.0 # Upper bound of integration
_UpperCamelCase : List[str] = 10.0 # define number of steps or resolution
_UpperCamelCase : List[Any] = [a, b] # define boundary of integration
_UpperCamelCase : Dict = method_a(lowercase_ ,lowercase_ )
print(F'''y = {y}''' )
if __name__ == "__main__":
main()
| 51
|
"""simple docstring"""
import math
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self : Any , __a : list[list[float]] , __a : list[int] ) -> int:
_UpperCamelCase : List[Any] = 0.0
_UpperCamelCase : Union[str, Any] = 0.0
for i in range(len(__a ) ):
da += math.pow((sample[i] - weights[0][i]) , 2 )
da += math.pow((sample[i] - weights[1][i]) , 2 )
return 0 if da > da else 1
return 0
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __a : list[list[int | float]] , __a : list[int] , __a : int , __a : float ) -> list[list[int | float]]:
for i in range(len(__a ) ):
weights[j][i] += alpha * (sample[i] - weights[j][i])
return weights
def lowercase__ ( ) -> None:
"""simple docstring"""
_UpperCamelCase : Optional[int] = [[1, 1, 0, 0], [0, 0, 0, 1], [1, 0, 0, 0], [0, 0, 1, 1]]
# weight initialization ( n, C )
_UpperCamelCase : List[str] = [[0.2, 0.6, 0.5, 0.9], [0.8, 0.4, 0.7, 0.3]]
# training
_UpperCamelCase : List[Any] = SelfOrganizingMap()
_UpperCamelCase : int = 3
_UpperCamelCase : List[Any] = 0.5
for _ in range(lowercase_ ):
for j in range(len(lowercase_ ) ):
# training sample
_UpperCamelCase : int = training_samples[j]
# Compute the winning vector
_UpperCamelCase : Tuple = self_organizing_map.get_winner(lowercase_ ,lowercase_ )
# Update the winning vector
_UpperCamelCase : int = self_organizing_map.update(lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ )
# classify test sample
_UpperCamelCase : Optional[int] = [0, 0, 0, 1]
_UpperCamelCase : Dict = self_organizing_map.get_winner(lowercase_ ,lowercase_ )
# results
print(F'''Clusters that the test sample belongs to : {winner}''' )
print(F'''Weights that have been trained : {weights}''' )
# running the main() function
if __name__ == "__main__":
main()
| 51
| 1
|
"""simple docstring"""
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all image processors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...image_processing_utils import ImageProcessingMixin
from ...utils import CONFIG_NAME, IMAGE_PROCESSOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = OrderedDict(
[
("align", "EfficientNetImageProcessor"),
("beit", "BeitImageProcessor"),
("bit", "BitImageProcessor"),
("blip", "BlipImageProcessor"),
("blip-2", "BlipImageProcessor"),
("bridgetower", "BridgeTowerImageProcessor"),
("chinese_clip", "ChineseCLIPImageProcessor"),
("clip", "CLIPImageProcessor"),
("clipseg", "ViTImageProcessor"),
("conditional_detr", "ConditionalDetrImageProcessor"),
("convnext", "ConvNextImageProcessor"),
("convnextv2", "ConvNextImageProcessor"),
("cvt", "ConvNextImageProcessor"),
("data2vec-vision", "BeitImageProcessor"),
("deformable_detr", "DeformableDetrImageProcessor"),
("deit", "DeiTImageProcessor"),
("deta", "DetaImageProcessor"),
("detr", "DetrImageProcessor"),
("dinat", "ViTImageProcessor"),
("donut-swin", "DonutImageProcessor"),
("dpt", "DPTImageProcessor"),
("efficientformer", "EfficientFormerImageProcessor"),
("efficientnet", "EfficientNetImageProcessor"),
("flava", "FlavaImageProcessor"),
("focalnet", "BitImageProcessor"),
("git", "CLIPImageProcessor"),
("glpn", "GLPNImageProcessor"),
("groupvit", "CLIPImageProcessor"),
("imagegpt", "ImageGPTImageProcessor"),
("instructblip", "BlipImageProcessor"),
("layoutlmv2", "LayoutLMv2ImageProcessor"),
("layoutlmv3", "LayoutLMv3ImageProcessor"),
("levit", "LevitImageProcessor"),
("mask2former", "Mask2FormerImageProcessor"),
("maskformer", "MaskFormerImageProcessor"),
("mgp-str", "ViTImageProcessor"),
("mobilenet_v1", "MobileNetV1ImageProcessor"),
("mobilenet_v2", "MobileNetV2ImageProcessor"),
("mobilevit", "MobileViTImageProcessor"),
("mobilevit", "MobileViTImageProcessor"),
("mobilevitv2", "MobileViTImageProcessor"),
("nat", "ViTImageProcessor"),
("oneformer", "OneFormerImageProcessor"),
("owlvit", "OwlViTImageProcessor"),
("perceiver", "PerceiverImageProcessor"),
("pix2struct", "Pix2StructImageProcessor"),
("poolformer", "PoolFormerImageProcessor"),
("regnet", "ConvNextImageProcessor"),
("resnet", "ConvNextImageProcessor"),
("sam", "SamImageProcessor"),
("segformer", "SegformerImageProcessor"),
("swiftformer", "ViTImageProcessor"),
("swin", "ViTImageProcessor"),
("swin2sr", "Swin2SRImageProcessor"),
("swinv2", "ViTImageProcessor"),
("table-transformer", "DetrImageProcessor"),
("timesformer", "VideoMAEImageProcessor"),
("tvlt", "TvltImageProcessor"),
("upernet", "SegformerImageProcessor"),
("van", "ConvNextImageProcessor"),
("videomae", "VideoMAEImageProcessor"),
("vilt", "ViltImageProcessor"),
("vit", "ViTImageProcessor"),
("vit_hybrid", "ViTHybridImageProcessor"),
("vit_mae", "ViTImageProcessor"),
("vit_msn", "ViTImageProcessor"),
("xclip", "CLIPImageProcessor"),
("yolos", "YolosImageProcessor"),
]
)
lowerCamelCase__ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, IMAGE_PROCESSOR_MAPPING_NAMES)
def lowercase__ ( lowercase_ ) -> Dict:
"""simple docstring"""
for module_name, extractors in IMAGE_PROCESSOR_MAPPING_NAMES.items():
if class_name in extractors:
_UpperCamelCase : Tuple = model_type_to_module_name(lowercase_ )
_UpperCamelCase : Dict = importlib.import_module(F'''.{module_name}''' ,"transformers.models" )
try:
return getattr(lowercase_ ,lowercase_ )
except AttributeError:
continue
for _, extractor in IMAGE_PROCESSOR_MAPPING._extra_content.items():
if getattr(lowercase_ ,"__name__" ,lowercase_ ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
_UpperCamelCase : Tuple = importlib.import_module("transformers" )
if hasattr(lowercase_ ,lowercase_ ):
return getattr(lowercase_ ,lowercase_ )
return None
def lowercase__ ( lowercase_ ,lowercase_ = None ,lowercase_ = False ,lowercase_ = False ,lowercase_ = None ,lowercase_ = None ,lowercase_ = None ,lowercase_ = False ,**lowercase_ ,) -> Tuple:
"""simple docstring"""
_UpperCamelCase : Tuple = get_file_from_repo(
lowercase_ ,lowercase_ ,cache_dir=lowercase_ ,force_download=lowercase_ ,resume_download=lowercase_ ,proxies=lowercase_ ,use_auth_token=lowercase_ ,revision=lowercase_ ,local_files_only=lowercase_ ,)
if resolved_config_file is None:
logger.info(
"Could not locate the image processor configuration file, will try to use the model config instead." )
return {}
with open(lowercase_ ,encoding="utf-8" ) as reader:
return json.load(lowercase_ )
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : int ) -> List[Any]:
raise EnvironmentError(
"AutoImageProcessor is designed to be instantiated "
"using the `AutoImageProcessor.from_pretrained(pretrained_model_name_or_path)` method." )
@classmethod
@replace_list_option_in_docstrings(__a )
def __SCREAMING_SNAKE_CASE ( cls : str , __a : Tuple , **__a : Optional[int] ) -> Optional[Any]:
_UpperCamelCase : Tuple = kwargs.pop("config" , __a )
_UpperCamelCase : List[str] = kwargs.pop("trust_remote_code" , __a )
_UpperCamelCase : Dict = True
_UpperCamelCase, _UpperCamelCase : Union[str, Any] = ImageProcessingMixin.get_image_processor_dict(__a , **__a )
_UpperCamelCase : Optional[int] = config_dict.get("image_processor_type" , __a )
_UpperCamelCase : List[Any] = None
if "AutoImageProcessor" in config_dict.get("auto_map" , {} ):
_UpperCamelCase : List[str] = config_dict["auto_map"]["AutoImageProcessor"]
# If we still don't have the image processor class, check if we're loading from a previous feature extractor config
# and if so, infer the image processor class from there.
if image_processor_class is None and image_processor_auto_map is None:
_UpperCamelCase : Dict = config_dict.pop("feature_extractor_type" , __a )
if feature_extractor_class is not None:
logger.warning(
"Could not find image processor class in the image processor config or the model config. Loading"
" based on pattern matching with the model's feature extractor configuration." )
_UpperCamelCase : List[str] = feature_extractor_class.replace("FeatureExtractor" , "ImageProcessor" )
if "AutoFeatureExtractor" in config_dict.get("auto_map" , {} ):
_UpperCamelCase : Tuple = config_dict["auto_map"]["AutoFeatureExtractor"]
_UpperCamelCase : Optional[Any] = feature_extractor_auto_map.replace("FeatureExtractor" , "ImageProcessor" )
logger.warning(
"Could not find image processor auto map in the image processor config or the model config."
" Loading based on pattern matching with the model's feature extractor configuration." )
# If we don't find the image processor class in the image processor config, let's try the model config.
if image_processor_class is None and image_processor_auto_map is None:
if not isinstance(__a , __a ):
_UpperCamelCase : Optional[int] = AutoConfig.from_pretrained(__a , **__a )
# It could be in `config.image_processor_type``
_UpperCamelCase : Dict = getattr(__a , "image_processor_type" , __a )
if hasattr(__a , "auto_map" ) and "AutoImageProcessor" in config.auto_map:
_UpperCamelCase : Tuple = config.auto_map["AutoImageProcessor"]
if image_processor_class is not None:
_UpperCamelCase : Optional[int] = image_processor_class_from_name(__a )
_UpperCamelCase : Any = image_processor_auto_map is not None
_UpperCamelCase : str = image_processor_class is not None or type(__a ) in IMAGE_PROCESSOR_MAPPING
_UpperCamelCase : int = resolve_trust_remote_code(
__a , __a , __a , __a )
if has_remote_code and trust_remote_code:
_UpperCamelCase : Dict = get_class_from_dynamic_module(
__a , __a , **__a )
_UpperCamelCase : int = kwargs.pop("code_revision" , __a )
if os.path.isdir(__a ):
image_processor_class.register_for_auto_class()
return image_processor_class.from_dict(__a , **__a )
elif image_processor_class is not None:
return image_processor_class.from_dict(__a , **__a )
# Last try: we use the IMAGE_PROCESSOR_MAPPING.
elif type(__a ) in IMAGE_PROCESSOR_MAPPING:
_UpperCamelCase : Optional[int] = IMAGE_PROCESSOR_MAPPING[type(__a )]
return image_processor_class.from_dict(__a , **__a )
raise ValueError(
F'''Unrecognized image processor in {pretrained_model_name_or_path}. Should have a '''
F'''`image_processor_type` key in its {IMAGE_PROCESSOR_NAME} of {CONFIG_NAME}, or one of the following '''
F'''`model_type` keys in its {CONFIG_NAME}: {', '.join(c for c in IMAGE_PROCESSOR_MAPPING_NAMES.keys() )}''' )
@staticmethod
def __SCREAMING_SNAKE_CASE ( __a : List[str] , __a : Optional[int] ) -> str:
IMAGE_PROCESSOR_MAPPING.register(__a , __a )
| 51
|
"""simple docstring"""
import argparse
import collections
import os
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_table.py
lowerCamelCase__ = "src/transformers"
lowerCamelCase__ = "docs/source/en"
lowerCamelCase__ = "."
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ) -> List[str]:
"""simple docstring"""
with open(lowercase_ ,"r" ,encoding="utf-8" ,newline="\n" ) as f:
_UpperCamelCase : Union[str, Any] = f.readlines()
# Find the start prompt.
_UpperCamelCase : Dict = 0
while not lines[start_index].startswith(lowercase_ ):
start_index += 1
start_index += 1
_UpperCamelCase : Optional[int] = start_index
while not lines[end_index].startswith(lowercase_ ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# Add here suffixes that are used to identify models, separated by |
lowerCamelCase__ = "Model|Encoder|Decoder|ForConditionalGeneration"
# Regexes that match TF/Flax/PT model names.
lowerCamelCase__ = re.compile(R"TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
lowerCamelCase__ = re.compile(R"Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
lowerCamelCase__ = re.compile(R"(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
# This is to make sure the transformers module imported is the one in the repo.
lowerCamelCase__ = direct_transformers_import(TRANSFORMERS_PATH)
def lowercase__ ( lowercase_ ) -> Any:
"""simple docstring"""
_UpperCamelCase : Tuple = re.finditer(".+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)" ,lowercase_ )
return [m.group(0 ) for m in matches]
def lowercase__ ( lowercase_ ,lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase : str = 2 if text == "✅" or text == "❌" else len(lowercase_ )
_UpperCamelCase : Union[str, Any] = (width - text_length) // 2
_UpperCamelCase : Dict = width - text_length - left_indent
return " " * left_indent + text + " " * right_indent
def lowercase__ ( ) -> str:
"""simple docstring"""
_UpperCamelCase : Optional[Any] = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
_UpperCamelCase : str = {
name: config_maping_names[code]
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if code in config_maping_names
}
_UpperCamelCase : Dict = {name: config.replace("Config" ,"" ) for name, config in model_name_to_config.items()}
# Dictionaries flagging if each model prefix has a slow/fast tokenizer, backend in PT/TF/Flax.
_UpperCamelCase : int = collections.defaultdict(lowercase_ )
_UpperCamelCase : Dict = collections.defaultdict(lowercase_ )
_UpperCamelCase : Dict = collections.defaultdict(lowercase_ )
_UpperCamelCase : int = collections.defaultdict(lowercase_ )
_UpperCamelCase : str = collections.defaultdict(lowercase_ )
# Let's lookup through all transformers object (once).
for attr_name in dir(lowercase_ ):
_UpperCamelCase : List[str] = None
if attr_name.endswith("Tokenizer" ):
_UpperCamelCase : Tuple = slow_tokenizers
_UpperCamelCase : Any = attr_name[:-9]
elif attr_name.endswith("TokenizerFast" ):
_UpperCamelCase : Optional[Any] = fast_tokenizers
_UpperCamelCase : List[str] = attr_name[:-13]
elif _re_tf_models.match(lowercase_ ) is not None:
_UpperCamelCase : List[Any] = tf_models
_UpperCamelCase : Dict = _re_tf_models.match(lowercase_ ).groups()[0]
elif _re_flax_models.match(lowercase_ ) is not None:
_UpperCamelCase : Dict = flax_models
_UpperCamelCase : Union[str, Any] = _re_flax_models.match(lowercase_ ).groups()[0]
elif _re_pt_models.match(lowercase_ ) is not None:
_UpperCamelCase : Optional[int] = pt_models
_UpperCamelCase : Any = _re_pt_models.match(lowercase_ ).groups()[0]
if lookup_dict is not None:
while len(lowercase_ ) > 0:
if attr_name in model_name_to_prefix.values():
_UpperCamelCase : Dict = True
break
# Try again after removing the last word in the name
_UpperCamelCase : List[str] = "".join(camel_case_split(lowercase_ )[:-1] )
# Let's build that table!
_UpperCamelCase : Any = list(model_name_to_config.keys() )
model_names.sort(key=str.lower )
_UpperCamelCase : List[str] = ["Model", "Tokenizer slow", "Tokenizer fast", "PyTorch support", "TensorFlow support", "Flax Support"]
# We'll need widths to properly display everything in the center (+2 is to leave one extra space on each side).
_UpperCamelCase : Union[str, Any] = [len(lowercase_ ) + 2 for c in columns]
_UpperCamelCase : Any = max([len(lowercase_ ) for name in model_names] ) + 2
# Build the table per se
_UpperCamelCase : Tuple = "|" + "|".join([_center_text(lowercase_ ,lowercase_ ) for c, w in zip(lowercase_ ,lowercase_ )] ) + "|\n"
# Use ":-----:" format to center-aligned table cell texts
table += "|" + "|".join([":" + "-" * (w - 2) + ":" for w in widths] ) + "|\n"
_UpperCamelCase : Union[str, Any] = {True: "✅", False: "❌"}
for name in model_names:
_UpperCamelCase : Optional[int] = model_name_to_prefix[name]
_UpperCamelCase : Tuple = [
name,
check[slow_tokenizers[prefix]],
check[fast_tokenizers[prefix]],
check[pt_models[prefix]],
check[tf_models[prefix]],
check[flax_models[prefix]],
]
table += "|" + "|".join([_center_text(lowercase_ ,lowercase_ ) for l, w in zip(lowercase_ ,lowercase_ )] ) + "|\n"
return table
def lowercase__ ( lowercase_=False ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase : str = _find_text_in_file(
filename=os.path.join(lowercase_ ,"index.md" ) ,start_prompt="<!--This table is updated automatically from the auto modules" ,end_prompt="<!-- End table-->" ,)
_UpperCamelCase : Any = get_model_table_from_auto_modules()
if current_table != new_table:
if overwrite:
with open(os.path.join(lowercase_ ,"index.md" ) ,"w" ,encoding="utf-8" ,newline="\n" ) as f:
f.writelines(lines[:start_index] + [new_table] + lines[end_index:] )
else:
raise ValueError(
"The model table in the `index.md` has not been updated. Run `make fix-copies` to fix this." )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
lowerCamelCase__ = parser.parse_args()
check_model_table(args.fix_and_overwrite)
| 51
| 1
|
"""simple docstring"""
import copy
from typing import Dict, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
from ..detr import DetrConfig
from ..swin import SwinConfig
lowerCamelCase__ = {
"facebook/maskformer-swin-base-ade": (
"https://huggingface.co/facebook/maskformer-swin-base-ade/blob/main/config.json"
)
# See all MaskFormer models at https://huggingface.co/models?filter=maskformer
}
lowerCamelCase__ = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Tuple = "maskformer"
SCREAMING_SNAKE_CASE__ :Optional[int] = {"hidden_size": "mask_feature_size"}
SCREAMING_SNAKE_CASE__ :Optional[Any] = ["resnet", "swin"]
SCREAMING_SNAKE_CASE__ :Optional[int] = ["detr"]
def __init__( self : str , __a : int = 256 , __a : int = 256 , __a : float = 0.1 , __a : bool = False , __a : Optional[Dict] = None , __a : Optional[Dict] = None , __a : float = 0.02 , __a : float = 1.0 , __a : float = 1.0 , __a : float = 1.0 , __a : float = 20.0 , __a : Optional[bool] = None , **__a : Optional[int] , ) -> Any:
if backbone_config is None:
# fall back to https://huggingface.co/microsoft/swin-base-patch4-window12-384-in22k
_UpperCamelCase : Union[str, Any] = SwinConfig(
image_size=384 , in_channels=3 , patch_size=4 , embed_dim=128 , depths=[2, 2, 18, 2] , num_heads=[4, 8, 16, 32] , window_size=12 , drop_path_rate=0.3 , out_features=["stage1", "stage2", "stage3", "stage4"] , )
if isinstance(__a , __a ):
_UpperCamelCase : List[str] = backbone_config.pop("model_type" )
_UpperCamelCase : Union[str, Any] = CONFIG_MAPPING[backbone_model_type]
_UpperCamelCase : Dict = config_class.from_dict(__a )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
F'''Backbone {backbone_config.model_type} is not a supported model and may not be compatible with MaskFormer. '''
F'''Supported model types: {','.join(self.backbones_supported )}''' )
if decoder_config is None:
# fall back to https://huggingface.co/facebook/detr-resnet-50
_UpperCamelCase : int = DetrConfig()
else:
# verify that the decoder is supported
_UpperCamelCase : Optional[Any] = (
decoder_config.pop("model_type" ) if isinstance(__a , __a ) else decoder_config.model_type
)
if decoder_type not in self.decoders_supported:
raise ValueError(
F'''Transformer Decoder {decoder_type} not supported, please use one of'''
F''' {','.join(self.decoders_supported )}''' )
if isinstance(__a , __a ):
_UpperCamelCase : Union[str, Any] = CONFIG_MAPPING[decoder_type]
_UpperCamelCase : str = config_class.from_dict(__a )
_UpperCamelCase : List[str] = backbone_config
_UpperCamelCase : Optional[int] = decoder_config
# main feature dimension for the model
_UpperCamelCase : Union[str, Any] = fpn_feature_size
_UpperCamelCase : Union[str, Any] = mask_feature_size
# initializer
_UpperCamelCase : Optional[Any] = init_std
_UpperCamelCase : str = init_xavier_std
# Hungarian matcher && loss
_UpperCamelCase : Tuple = cross_entropy_weight
_UpperCamelCase : Tuple = dice_weight
_UpperCamelCase : str = mask_weight
_UpperCamelCase : str = use_auxiliary_loss
_UpperCamelCase : int = no_object_weight
_UpperCamelCase : Union[str, Any] = output_auxiliary_logits
_UpperCamelCase : Any = self.decoder_config.encoder_attention_heads
_UpperCamelCase : Optional[int] = self.decoder_config.num_hidden_layers
super().__init__(**__a )
@classmethod
def __SCREAMING_SNAKE_CASE ( cls : Optional[int] , __a : PretrainedConfig , __a : PretrainedConfig , **__a : Tuple ) -> Dict:
return cls(
backbone_config=__a , decoder_config=__a , **__a , )
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Dict[str, any]:
_UpperCamelCase : List[Any] = copy.deepcopy(self.__dict__ )
_UpperCamelCase : List[str] = self.backbone_config.to_dict()
_UpperCamelCase : Optional[Any] = self.decoder_config.to_dict()
_UpperCamelCase : Optional[Any] = self.__class__.model_type
return output
| 51
|
"""simple docstring"""
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
lowerCamelCase__ = 200
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
lowerCamelCase__ = 50
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
lowerCamelCase__ = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 1000))
def lowercase__ ( lowercase_ ,lowercase_ ) -> tuple[str, float]:
"""simple docstring"""
_UpperCamelCase : str = len([g for position, g in enumerate(lowercase_ ) if g == main_target[position]] )
return (item, float(lowercase_ ))
def lowercase__ ( lowercase_ ,lowercase_ ) -> tuple[str, str]:
"""simple docstring"""
_UpperCamelCase : Tuple = random.randint(0 ,len(lowercase_ ) - 1 )
_UpperCamelCase : Dict = parent_a[:random_slice] + parent_a[random_slice:]
_UpperCamelCase : Tuple = parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def lowercase__ ( lowercase_ ,lowercase_ ) -> str:
"""simple docstring"""
_UpperCamelCase : int = list(lowercase_ )
if random.uniform(0 ,1 ) < MUTATION_PROBABILITY:
_UpperCamelCase : int = random.choice(lowercase_ )
return "".join(lowercase_ )
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,) -> list[str]:
"""simple docstring"""
_UpperCamelCase : Optional[Any] = []
# Generate more children proportionally to the fitness score.
_UpperCamelCase : List[str] = int(parent_a[1] * 100 ) + 1
_UpperCamelCase : Union[str, Any] = 10 if child_n >= 10 else child_n
for _ in range(lowercase_ ):
_UpperCamelCase : Dict = population_score[random.randint(0 ,lowercase_ )][0]
_UpperCamelCase, _UpperCamelCase : Dict = crossover(parent_a[0] ,lowercase_ )
# Append new string to the population list.
pop.append(mutate(lowercase_ ,lowercase_ ) )
pop.append(mutate(lowercase_ ,lowercase_ ) )
return pop
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ = True ) -> tuple[int, int, str]:
"""simple docstring"""
if N_POPULATION < N_SELECTED:
_UpperCamelCase : List[str] = F'''{N_POPULATION} must be bigger than {N_SELECTED}'''
raise ValueError(lowercase_ )
# Verify that the target contains no genes besides the ones inside genes variable.
_UpperCamelCase : int = sorted({c for c in target if c not in genes} )
if not_in_genes_list:
_UpperCamelCase : int = F'''{not_in_genes_list} is not in genes list, evolution cannot converge'''
raise ValueError(lowercase_ )
# Generate random starting population.
_UpperCamelCase : Union[str, Any] = []
for _ in range(lowercase_ ):
population.append("".join([random.choice(lowercase_ ) for i in range(len(lowercase_ ) )] ) )
# Just some logs to know what the algorithms is doing.
_UpperCamelCase, _UpperCamelCase : str = 0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(lowercase_ )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
_UpperCamelCase : int = [evaluate(lowercase_ ,lowercase_ ) for item in population]
# Check if there is a matching evolution.
_UpperCamelCase : Optional[Any] = sorted(lowercase_ ,key=lambda lowercase_ : x[1] ,reverse=lowercase_ )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
F'''\nGeneration: {generation}'''
F'''\nTotal Population:{total_population}'''
F'''\nBest score: {population_score[0][1]}'''
F'''\nBest string: {population_score[0][0]}''' )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
_UpperCamelCase : str = population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(lowercase_ )
# Normalize population score to be between 0 and 1.
_UpperCamelCase : str = [
(item, score / len(lowercase_ )) for item, score in population_score
]
# This is selection
for i in range(lowercase_ ):
population.extend(select(population_score[int(lowercase_ )] ,lowercase_ ,lowercase_ ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(lowercase_ ) > N_POPULATION:
break
if __name__ == "__main__":
lowerCamelCase__ = (
"This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!"
)
lowerCamelCase__ = list(
" ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm"
"nopqrstuvwxyz.,;!?+-*#@^'èéòà€ù=)(&%$£/\\"
)
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = basic(target_str, genes_list)
print(
f"""\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}"""
)
| 51
| 1
|
"""simple docstring"""
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import VideoMAEConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEModel,
)
from transformers.models.videomae.modeling_videomae import VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : Tuple , __a : Any , __a : int=13 , __a : Any=10 , __a : Tuple=3 , __a : List[str]=2 , __a : Tuple=2 , __a : Optional[Any]=2 , __a : Union[str, Any]=True , __a : Union[str, Any]=True , __a : Dict=32 , __a : Tuple=5 , __a : Union[str, Any]=4 , __a : int=37 , __a : List[Any]="gelu" , __a : Union[str, Any]=0.1 , __a : Dict=0.1 , __a : Optional[int]=10 , __a : str=0.02 , __a : List[Any]=0.9 , __a : Optional[int]=None , ) -> Any:
_UpperCamelCase : Optional[Any] = parent
_UpperCamelCase : Any = batch_size
_UpperCamelCase : List[Any] = image_size
_UpperCamelCase : Any = num_channels
_UpperCamelCase : int = patch_size
_UpperCamelCase : Optional[int] = tubelet_size
_UpperCamelCase : Any = num_frames
_UpperCamelCase : Optional[int] = is_training
_UpperCamelCase : Tuple = use_labels
_UpperCamelCase : str = hidden_size
_UpperCamelCase : str = num_hidden_layers
_UpperCamelCase : Any = num_attention_heads
_UpperCamelCase : Optional[Any] = intermediate_size
_UpperCamelCase : Optional[Any] = hidden_act
_UpperCamelCase : str = hidden_dropout_prob
_UpperCamelCase : List[str] = attention_probs_dropout_prob
_UpperCamelCase : Optional[Any] = type_sequence_label_size
_UpperCamelCase : str = initializer_range
_UpperCamelCase : Tuple = mask_ratio
_UpperCamelCase : Union[str, Any] = scope
# in VideoMAE, the number of tokens equals num_frames/tubelet_size * num_patches per frame
_UpperCamelCase : str = (image_size // patch_size) ** 2
_UpperCamelCase : str = (num_frames // tubelet_size) * self.num_patches_per_frame
# use this variable to define bool_masked_pos
_UpperCamelCase : int = int(mask_ratio * self.seq_length )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Dict:
_UpperCamelCase : Dict = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
_UpperCamelCase : Optional[int] = None
if self.use_labels:
_UpperCamelCase : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCamelCase : str = self.get_config()
return config, pixel_values, labels
def __SCREAMING_SNAKE_CASE ( self : str ) -> int:
return VideoMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , tubelet_size=self.tubelet_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__a , initializer_range=self.initializer_range , )
def __SCREAMING_SNAKE_CASE ( self : str , __a : Optional[int] , __a : Optional[int] , __a : Optional[Any] ) -> Tuple:
_UpperCamelCase : int = VideoMAEModel(config=__a )
model.to(__a )
model.eval()
_UpperCamelCase : Tuple = model(__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __a : Tuple , __a : List[Any] , __a : Optional[Any] ) -> List[Any]:
_UpperCamelCase : str = VideoMAEForPreTraining(__a )
model.to(__a )
model.eval()
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
_UpperCamelCase : int = torch.ones((self.num_masks,) )
_UpperCamelCase : Dict = torch.cat([mask, torch.zeros(self.seq_length - mask.size(0 ) )] )
_UpperCamelCase : Tuple = mask.expand(self.batch_size , -1 ).bool()
_UpperCamelCase : Dict = model(__a , __a )
# model only returns predictions for masked patches
_UpperCamelCase : List[Any] = mask.sum().item()
_UpperCamelCase : int = 3 * self.tubelet_size * self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_masked_patches, decoder_num_labels) )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[Any]:
_UpperCamelCase : Optional[Any] = self.prepare_config_and_inputs()
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase : Tuple = config_and_inputs
_UpperCamelCase : Any = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Optional[int] = (
(VideoMAEModel, VideoMAEForPreTraining, VideoMAEForVideoClassification) if is_torch_available() else ()
)
SCREAMING_SNAKE_CASE__ :Dict = (
{"feature-extraction": VideoMAEModel, "video-classification": VideoMAEForVideoClassification}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE__ :Dict = False
SCREAMING_SNAKE_CASE__ :Any = False
SCREAMING_SNAKE_CASE__ :Dict = False
SCREAMING_SNAKE_CASE__ :Optional[int] = False
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[str]:
_UpperCamelCase : str = VideoMAEModelTester(self )
_UpperCamelCase : List[str] = ConfigTester(self , config_class=__a , has_text_modality=__a , hidden_size=37 )
def __SCREAMING_SNAKE_CASE ( self : str , __a : Optional[int] , __a : List[Any] , __a : Optional[int]=False ) -> List[Any]:
_UpperCamelCase : Optional[Any] = copy.deepcopy(__a )
if model_class == VideoMAEForPreTraining:
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
_UpperCamelCase : Any = torch.ones((self.model_tester.num_masks,) )
_UpperCamelCase : int = torch.cat([mask, torch.zeros(self.model_tester.seq_length - mask.size(0 ) )] )
_UpperCamelCase : int = mask.expand(self.model_tester.batch_size , -1 ).bool()
_UpperCamelCase : Optional[Any] = bool_masked_pos.to(__a )
if return_labels:
if model_class in [
*get_values(__a ),
]:
_UpperCamelCase : List[str] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__a )
return inputs_dict
def __SCREAMING_SNAKE_CASE ( self : Any ) -> Union[str, Any]:
self.config_tester.run_common_tests()
@unittest.skip(reason="VideoMAE does not use inputs_embeds" )
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> List[Any]:
pass
def __SCREAMING_SNAKE_CASE ( self : str ) -> Union[str, Any]:
_UpperCamelCase, _UpperCamelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase : Tuple = model_class(__a )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_UpperCamelCase : List[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__a , nn.Linear ) )
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[Any]:
_UpperCamelCase, _UpperCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase : int = model_class(__a )
_UpperCamelCase : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCamelCase : List[Any] = [*signature.parameters.keys()]
_UpperCamelCase : Optional[Any] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __a )
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[Any]:
_UpperCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def __SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[Any]:
_UpperCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*__a )
@slow
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[Any]:
for model_name in VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase : List[Any] = VideoMAEModel.from_pretrained(__a )
self.assertIsNotNone(__a )
def __SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[Any]:
if not self.has_attentions:
pass
else:
_UpperCamelCase, _UpperCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCamelCase : Any = True
for model_class in self.all_model_classes:
_UpperCamelCase : Any = self.model_tester.seq_length - self.model_tester.num_masks
_UpperCamelCase : Union[str, Any] = (
num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
)
_UpperCamelCase : Union[str, Any] = True
_UpperCamelCase : Optional[Any] = False
_UpperCamelCase : Dict = True
_UpperCamelCase : Optional[Any] = model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
_UpperCamelCase : int = model(**self._prepare_for_class(__a , __a ) )
_UpperCamelCase : str = outputs.attentions
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
_UpperCamelCase : Tuple = True
_UpperCamelCase : Any = model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
_UpperCamelCase : Any = model(**self._prepare_for_class(__a , __a ) )
_UpperCamelCase : int = outputs.attentions
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
_UpperCamelCase : Optional[Any] = len(__a )
# Check attention is always last and order is fine
_UpperCamelCase : Optional[Any] = True
_UpperCamelCase : Union[str, Any] = True
_UpperCamelCase : str = model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
_UpperCamelCase : List[str] = model(**self._prepare_for_class(__a , __a ) )
self.assertEqual(out_len + 1 , len(__a ) )
_UpperCamelCase : Any = outputs.attentions
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[Any]:
def check_hidden_states_output(__a : Union[str, Any] , __a : Optional[Any] , __a : Union[str, Any] ):
_UpperCamelCase : Tuple = model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
_UpperCamelCase : List[str] = model(**self._prepare_for_class(__a , __a ) )
_UpperCamelCase : Union[str, Any] = outputs.hidden_states
_UpperCamelCase : Optional[Any] = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(__a ) , __a )
_UpperCamelCase : List[str] = self.model_tester.seq_length - self.model_tester.num_masks
_UpperCamelCase : Tuple = num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
_UpperCamelCase, _UpperCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase : int = True
check_hidden_states_output(__a , __a , __a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_UpperCamelCase : List[str] = True
check_hidden_states_output(__a , __a , __a )
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[str]:
pass
def lowercase__ ( ) -> List[str]:
"""simple docstring"""
_UpperCamelCase : int = hf_hub_download(
repo_id="hf-internal-testing/spaghetti-video" ,filename="eating_spaghetti.npy" ,repo_type="dataset" )
_UpperCamelCase : Dict = np.load(lowercase_ )
return list(lowercase_ )
@require_torch
@require_vision
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> int:
# logits were tested with a different mean and std, so we use the same here
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[int]:
_UpperCamelCase : Any = VideoMAEForVideoClassification.from_pretrained("MCG-NJU/videomae-base-finetuned-kinetics" ).to(
__a )
_UpperCamelCase : List[Any] = self.default_image_processor
_UpperCamelCase : Optional[int] = prepare_video()
_UpperCamelCase : Dict = image_processor(__a , return_tensors="pt" ).to(__a )
# forward pass
with torch.no_grad():
_UpperCamelCase : int = model(**__a )
# verify the logits
_UpperCamelCase : Tuple = torch.Size((1, 400) )
self.assertEqual(outputs.logits.shape , __a )
_UpperCamelCase : Optional[Any] = torch.tensor([0.36_69, -0.06_88, -0.24_21] ).to(__a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __a , atol=1e-4 ) )
@slow
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> int:
_UpperCamelCase : List[Any] = VideoMAEForPreTraining.from_pretrained("MCG-NJU/videomae-base-short" ).to(__a )
_UpperCamelCase : Tuple = self.default_image_processor
_UpperCamelCase : List[Any] = prepare_video()
_UpperCamelCase : Any = image_processor(__a , return_tensors="pt" ).to(__a )
# add boolean mask, indicating which patches to mask
_UpperCamelCase : List[str] = hf_hub_download(repo_id="hf-internal-testing/bool-masked-pos" , filename="bool_masked_pos.pt" )
_UpperCamelCase : int = torch.load(__a )
# forward pass
with torch.no_grad():
_UpperCamelCase : str = model(**__a )
# verify the logits
_UpperCamelCase : Dict = torch.Size([1, 1408, 1536] )
_UpperCamelCase : List[Any] = torch.tensor(
[[0.79_94, 0.96_12, 0.85_08], [0.74_01, 0.89_58, 0.83_02], [0.58_62, 0.74_68, 0.73_25]] , device=__a )
self.assertEqual(outputs.logits.shape , __a )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , __a , atol=1e-4 ) )
# verify the loss (`config.norm_pix_loss` = `True`)
_UpperCamelCase : int = torch.tensor([0.51_42] , device=__a )
self.assertTrue(torch.allclose(outputs.loss , __a , atol=1e-4 ) )
# verify the loss (`config.norm_pix_loss` = `False`)
_UpperCamelCase : List[Any] = VideoMAEForPreTraining.from_pretrained("MCG-NJU/videomae-base-short" , norm_pix_loss=__a ).to(
__a )
with torch.no_grad():
_UpperCamelCase : List[str] = model(**__a )
_UpperCamelCase : Tuple = torch.tensor(torch.tensor([0.64_69] ) , device=__a )
self.assertTrue(torch.allclose(outputs.loss , __a , atol=1e-4 ) )
| 51
|
"""simple docstring"""
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = ["model.decoder.embed_positions.weights"]
def lowercase__ ( lowercase_ ) -> Optional[Any]:
"""simple docstring"""
if "emb" in name:
_UpperCamelCase : List[str] = name.replace("emb" ,"model.decoder.embed_tokens" )
if "transformer" in name:
_UpperCamelCase : Optional[int] = name.replace("transformer" ,"model.decoder" )
if "cross_attention" in name:
_UpperCamelCase : Optional[int] = name.replace("cross_attention" ,"encoder_attn" )
if "linear1" in name:
_UpperCamelCase : Optional[Any] = name.replace("linear1" ,"fc1" )
if "linear2" in name:
_UpperCamelCase : Union[str, Any] = name.replace("linear2" ,"fc2" )
if "norm1" in name:
_UpperCamelCase : Optional[Any] = name.replace("norm1" ,"self_attn_layer_norm" )
if "norm_cross" in name:
_UpperCamelCase : Dict = name.replace("norm_cross" ,"encoder_attn_layer_norm" )
if "norm2" in name:
_UpperCamelCase : Union[str, Any] = name.replace("norm2" ,"final_layer_norm" )
if "out_norm" in name:
_UpperCamelCase : Union[str, Any] = name.replace("out_norm" ,"model.decoder.layer_norm" )
if "linears" in name:
_UpperCamelCase : List[str] = name.replace("linears" ,"lm_heads" )
if "condition_provider.conditioners.description.output_proj" in name:
_UpperCamelCase : Any = name.replace("condition_provider.conditioners.description.output_proj" ,"enc_to_dec_proj" )
return name
def lowercase__ ( lowercase_ ,lowercase_ ) -> Tuple[Dict, Dict]:
"""simple docstring"""
_UpperCamelCase : str = list(state_dict.keys() )
_UpperCamelCase : Optional[Any] = {}
for key in keys:
_UpperCamelCase : Optional[int] = state_dict.pop(lowercase_ )
_UpperCamelCase : List[Any] = rename_keys(lowercase_ )
if "in_proj_weight" in key:
# split fused qkv proj
_UpperCamelCase : Tuple = val[:hidden_size, :]
_UpperCamelCase : Optional[Any] = val[hidden_size : 2 * hidden_size, :]
_UpperCamelCase : Optional[Any] = val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
_UpperCamelCase : Optional[Any] = val
else:
_UpperCamelCase : List[str] = val
return state_dict, enc_dec_proj_state_dict
def lowercase__ ( lowercase_ ) -> MusicgenDecoderConfig:
"""simple docstring"""
if checkpoint == "small":
# default config values
_UpperCamelCase : List[Any] = 1_024
_UpperCamelCase : List[str] = 24
_UpperCamelCase : Any = 16
elif checkpoint == "medium":
_UpperCamelCase : Tuple = 1_536
_UpperCamelCase : Dict = 48
_UpperCamelCase : Tuple = 24
elif checkpoint == "large":
_UpperCamelCase : int = 2_048
_UpperCamelCase : Optional[int] = 48
_UpperCamelCase : Dict = 32
else:
raise ValueError(F'''Checkpoint should be one of `[\'small\', \'medium\', \'large\']`, got {checkpoint}.''' )
_UpperCamelCase : str = MusicgenDecoderConfig(
hidden_size=lowercase_ ,ffn_dim=hidden_size * 4 ,num_hidden_layers=lowercase_ ,num_attention_heads=lowercase_ ,)
return config
@torch.no_grad()
def lowercase__ ( lowercase_ ,lowercase_=None ,lowercase_=None ,lowercase_="cpu" ) -> List[str]:
"""simple docstring"""
_UpperCamelCase : str = MusicGen.get_pretrained(lowercase_ ,device=lowercase_ )
_UpperCamelCase : Union[str, Any] = decoder_config_from_checkpoint(lowercase_ )
_UpperCamelCase : Optional[int] = fairseq_model.lm.state_dict()
_UpperCamelCase, _UpperCamelCase : Optional[Any] = rename_state_dict(
lowercase_ ,hidden_size=decoder_config.hidden_size )
_UpperCamelCase : Tuple = TaEncoderModel.from_pretrained("t5-base" )
_UpperCamelCase : Union[str, Any] = EncodecModel.from_pretrained("facebook/encodec_32khz" )
_UpperCamelCase : str = MusicgenForCausalLM(lowercase_ ).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
_UpperCamelCase, _UpperCamelCase : str = decoder.load_state_dict(lowercase_ ,strict=lowercase_ )
for key in missing_keys.copy():
if key.startswith(("text_encoder", "audio_encoder") ) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(lowercase_ )
if len(lowercase_ ) > 0:
raise ValueError(F'''Missing key(s) in state_dict: {missing_keys}''' )
if len(lowercase_ ) > 0:
raise ValueError(F'''Unexpected key(s) in state_dict: {unexpected_keys}''' )
# init the composite model
_UpperCamelCase : str = MusicgenForConditionalGeneration(text_encoder=lowercase_ ,audio_encoder=lowercase_ ,decoder=lowercase_ )
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(lowercase_ )
# check we can do a forward pass
_UpperCamelCase : List[str] = torch.arange(0 ,8 ,dtype=torch.long ).reshape(2 ,-1 )
_UpperCamelCase : Dict = input_ids.reshape(2 * 4 ,-1 )
with torch.no_grad():
_UpperCamelCase : Tuple = model(input_ids=lowercase_ ,decoder_input_ids=lowercase_ ).logits
if logits.shape != (8, 1, 2_048):
raise ValueError("Incorrect shape for logits" )
# now construct the processor
_UpperCamelCase : int = AutoTokenizer.from_pretrained("t5-base" )
_UpperCamelCase : str = AutoFeatureExtractor.from_pretrained("facebook/encodec_32khz" ,padding_side="left" )
_UpperCamelCase : Optional[int] = MusicgenProcessor(feature_extractor=lowercase_ ,tokenizer=lowercase_ )
# set the appropriate bos/pad token ids
_UpperCamelCase : str = 2_048
_UpperCamelCase : str = 2_048
# set other default generation config params
_UpperCamelCase : Optional[Any] = int(30 * audio_encoder.config.frame_rate )
_UpperCamelCase : List[str] = True
_UpperCamelCase : int = 3.0
if pytorch_dump_folder is not None:
Path(lowercase_ ).mkdir(exist_ok=lowercase_ )
logger.info(F'''Saving model {checkpoint} to {pytorch_dump_folder}''' )
model.save_pretrained(lowercase_ )
processor.save_pretrained(lowercase_ )
if repo_id:
logger.info(F'''Pushing model {checkpoint} to {repo_id}''' )
model.push_to_hub(lowercase_ )
processor.push_to_hub(lowercase_ )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint",
default="small",
type=str,
help="Checkpoint size of the MusicGen model you'd like to convert. Can be one of: `['small', 'medium', 'large']`.",
)
parser.add_argument(
"--pytorch_dump_folder",
required=True,
default=None,
type=str,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
parser.add_argument(
"--device", default="cpu", type=str, help="Torch device to run the conversion, either cpu or cuda."
)
lowerCamelCase__ = parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
| 51
| 1
|
"""simple docstring"""
import importlib
import os
import sys
# This is required to make the module import works (when the python process is running from the root of the repo)
sys.path.append(".")
def lowercase__ ( lowercase_ ) -> int:
"""simple docstring"""
_UpperCamelCase : Any = test_file.split(os.path.sep )
if components[0:2] != ["tests", "models"]:
raise ValueError(
"`test_file` should start with `tests/models/` (with `/` being the OS specific path separator). Got "
F'''{test_file} instead.''' )
_UpperCamelCase : str = components[-1]
if not test_fn.endswith("py" ):
raise ValueError(F'''`test_file` should be a python file. Got {test_fn} instead.''' )
if not test_fn.startswith("test_modeling_" ):
raise ValueError(
F'''`test_file` should point to a file name of the form `test_modeling_*.py`. Got {test_fn} instead.''' )
_UpperCamelCase : Dict = components[:-1] + [test_fn.replace(".py" ,"" )]
_UpperCamelCase : List[str] = ".".join(lowercase_ )
return test_module_path
def lowercase__ ( lowercase_ ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase : Optional[Any] = get_module_path(lowercase_ )
_UpperCamelCase : str = importlib.import_module(lowercase_ )
return test_module
def lowercase__ ( lowercase_ ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase : Union[str, Any] = []
_UpperCamelCase : List[Any] = get_test_module(lowercase_ )
for attr in dir(lowercase_ ):
if attr.endswith("ModelTester" ):
tester_classes.append(getattr(lowercase_ ,lowercase_ ) )
# sort with class names
return sorted(lowercase_ ,key=lambda lowercase_ : x.__name__ )
def lowercase__ ( lowercase_ ) -> Tuple:
"""simple docstring"""
_UpperCamelCase : Optional[Any] = []
_UpperCamelCase : Any = get_test_module(lowercase_ )
for attr in dir(lowercase_ ):
_UpperCamelCase : int = getattr(lowercase_ ,lowercase_ )
# (TF/Flax)ModelTesterMixin is also an attribute in specific model test module. Let's exclude them by checking
# `all_model_classes` is not empty (which also excludes other special classes).
_UpperCamelCase : Optional[Any] = getattr(lowercase_ ,"all_model_classes" ,[] )
if len(lowercase_ ) > 0:
test_classes.append(lowercase_ )
# sort with class names
return sorted(lowercase_ ,key=lambda lowercase_ : x.__name__ )
def lowercase__ ( lowercase_ ) -> Any:
"""simple docstring"""
_UpperCamelCase : Dict = get_test_classes(lowercase_ )
_UpperCamelCase : int = set()
for test_class in test_classes:
model_classes.update(test_class.all_model_classes )
# sort with class names
return sorted(lowercase_ ,key=lambda lowercase_ : x.__name__ )
def lowercase__ ( lowercase_ ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase : List[str] = test_class()
if hasattr(lowercase_ ,"setUp" ):
test.setUp()
_UpperCamelCase : Tuple = None
if hasattr(lowercase_ ,"model_tester" ):
# `(TF/Flax)ModelTesterMixin` has this attribute default to `None`. Let's skip this case.
if test.model_tester is not None:
_UpperCamelCase : Tuple = test.model_tester.__class__
return model_tester
def lowercase__ ( lowercase_ ,lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase : str = get_test_classes(lowercase_ )
_UpperCamelCase : Dict = []
for test_class in test_classes:
if model_class in test_class.all_model_classes:
target_test_classes.append(lowercase_ )
# sort with class names
return sorted(lowercase_ ,key=lambda lowercase_ : x.__name__ )
def lowercase__ ( lowercase_ ,lowercase_ ) -> Dict:
"""simple docstring"""
_UpperCamelCase : Any = get_test_classes_for_model(lowercase_ ,lowercase_ )
_UpperCamelCase : List[Any] = []
for test_class in test_classes:
_UpperCamelCase : List[Any] = get_model_tester_from_test_class(lowercase_ )
if tester_class is not None:
tester_classes.append(lowercase_ )
# sort with class names
return sorted(lowercase_ ,key=lambda lowercase_ : x.__name__ )
def lowercase__ ( lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase : Any = get_test_classes(lowercase_ )
_UpperCamelCase : Tuple = {test_class: get_model_tester_from_test_class(lowercase_ ) for test_class in test_classes}
return test_tester_mapping
def lowercase__ ( lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase : List[Any] = get_model_classes(lowercase_ )
_UpperCamelCase : Optional[int] = {
model_class: get_test_classes_for_model(lowercase_ ,lowercase_ ) for model_class in model_classes
}
return model_test_mapping
def lowercase__ ( lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase : Optional[Any] = get_model_classes(lowercase_ )
_UpperCamelCase : Tuple = {
model_class: get_tester_classes_for_model(lowercase_ ,lowercase_ ) for model_class in model_classes
}
return model_to_tester_mapping
def lowercase__ ( lowercase_ ) -> Optional[int]:
"""simple docstring"""
if isinstance(lowercase_ ,lowercase_ ):
return o
elif isinstance(lowercase_ ,lowercase_ ):
return o.__name__
elif isinstance(lowercase_ ,(list, tuple) ):
return [to_json(lowercase_ ) for x in o]
elif isinstance(lowercase_ ,lowercase_ ):
return {to_json(lowercase_ ): to_json(lowercase_ ) for k, v in o.items()}
else:
return o
| 51
|
"""simple docstring"""
from datetime import datetime
import requests
from bsa import BeautifulSoup
if __name__ == "__main__":
lowerCamelCase__ = input("Enter image url: ").strip()
print(f"""Downloading image from {url} ...""")
lowerCamelCase__ = BeautifulSoup(requests.get(url).content, "html.parser")
# The image URL is in the content field of the first meta tag with property og:image
lowerCamelCase__ = soup.find("meta", {"property": "og:image"})["content"]
lowerCamelCase__ = requests.get(image_url).content
lowerCamelCase__ = f"""{datetime.now():%Y-%m-%d_%H:%M:%S}.jpg"""
with open(file_name, "wb") as fp:
fp.write(image_data)
print(f"""Done. Image saved to disk as {file_name}.""")
| 51
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
lowerCamelCase__ = {
"configuration_owlvit": [
"OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"OwlViTConfig",
"OwlViTOnnxConfig",
"OwlViTTextConfig",
"OwlViTVisionConfig",
],
"processing_owlvit": ["OwlViTProcessor"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["OwlViTFeatureExtractor"]
lowerCamelCase__ = ["OwlViTImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"OwlViTModel",
"OwlViTPreTrainedModel",
"OwlViTTextModel",
"OwlViTVisionModel",
"OwlViTForObjectDetection",
]
if TYPE_CHECKING:
from .configuration_owlvit import (
OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OwlViTConfig,
OwlViTOnnxConfig,
OwlViTTextConfig,
OwlViTVisionConfig,
)
from .processing_owlvit import OwlViTProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_owlvit import OwlViTFeatureExtractor
from .image_processing_owlvit import OwlViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_owlvit import (
OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
OwlViTForObjectDetection,
OwlViTModel,
OwlViTPreTrainedModel,
OwlViTTextModel,
OwlViTVisionModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 51
|
"""simple docstring"""
import importlib
import os
import sys
# This is required to make the module import works (when the python process is running from the root of the repo)
sys.path.append(".")
def lowercase__ ( lowercase_ ) -> int:
"""simple docstring"""
_UpperCamelCase : Any = test_file.split(os.path.sep )
if components[0:2] != ["tests", "models"]:
raise ValueError(
"`test_file` should start with `tests/models/` (with `/` being the OS specific path separator). Got "
F'''{test_file} instead.''' )
_UpperCamelCase : str = components[-1]
if not test_fn.endswith("py" ):
raise ValueError(F'''`test_file` should be a python file. Got {test_fn} instead.''' )
if not test_fn.startswith("test_modeling_" ):
raise ValueError(
F'''`test_file` should point to a file name of the form `test_modeling_*.py`. Got {test_fn} instead.''' )
_UpperCamelCase : Dict = components[:-1] + [test_fn.replace(".py" ,"" )]
_UpperCamelCase : List[str] = ".".join(lowercase_ )
return test_module_path
def lowercase__ ( lowercase_ ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase : Optional[Any] = get_module_path(lowercase_ )
_UpperCamelCase : str = importlib.import_module(lowercase_ )
return test_module
def lowercase__ ( lowercase_ ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase : Union[str, Any] = []
_UpperCamelCase : List[Any] = get_test_module(lowercase_ )
for attr in dir(lowercase_ ):
if attr.endswith("ModelTester" ):
tester_classes.append(getattr(lowercase_ ,lowercase_ ) )
# sort with class names
return sorted(lowercase_ ,key=lambda lowercase_ : x.__name__ )
def lowercase__ ( lowercase_ ) -> Tuple:
"""simple docstring"""
_UpperCamelCase : Optional[Any] = []
_UpperCamelCase : Any = get_test_module(lowercase_ )
for attr in dir(lowercase_ ):
_UpperCamelCase : int = getattr(lowercase_ ,lowercase_ )
# (TF/Flax)ModelTesterMixin is also an attribute in specific model test module. Let's exclude them by checking
# `all_model_classes` is not empty (which also excludes other special classes).
_UpperCamelCase : Optional[Any] = getattr(lowercase_ ,"all_model_classes" ,[] )
if len(lowercase_ ) > 0:
test_classes.append(lowercase_ )
# sort with class names
return sorted(lowercase_ ,key=lambda lowercase_ : x.__name__ )
def lowercase__ ( lowercase_ ) -> Any:
"""simple docstring"""
_UpperCamelCase : Dict = get_test_classes(lowercase_ )
_UpperCamelCase : int = set()
for test_class in test_classes:
model_classes.update(test_class.all_model_classes )
# sort with class names
return sorted(lowercase_ ,key=lambda lowercase_ : x.__name__ )
def lowercase__ ( lowercase_ ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase : List[str] = test_class()
if hasattr(lowercase_ ,"setUp" ):
test.setUp()
_UpperCamelCase : Tuple = None
if hasattr(lowercase_ ,"model_tester" ):
# `(TF/Flax)ModelTesterMixin` has this attribute default to `None`. Let's skip this case.
if test.model_tester is not None:
_UpperCamelCase : Tuple = test.model_tester.__class__
return model_tester
def lowercase__ ( lowercase_ ,lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase : str = get_test_classes(lowercase_ )
_UpperCamelCase : Dict = []
for test_class in test_classes:
if model_class in test_class.all_model_classes:
target_test_classes.append(lowercase_ )
# sort with class names
return sorted(lowercase_ ,key=lambda lowercase_ : x.__name__ )
def lowercase__ ( lowercase_ ,lowercase_ ) -> Dict:
"""simple docstring"""
_UpperCamelCase : Any = get_test_classes_for_model(lowercase_ ,lowercase_ )
_UpperCamelCase : List[Any] = []
for test_class in test_classes:
_UpperCamelCase : List[Any] = get_model_tester_from_test_class(lowercase_ )
if tester_class is not None:
tester_classes.append(lowercase_ )
# sort with class names
return sorted(lowercase_ ,key=lambda lowercase_ : x.__name__ )
def lowercase__ ( lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase : Any = get_test_classes(lowercase_ )
_UpperCamelCase : Tuple = {test_class: get_model_tester_from_test_class(lowercase_ ) for test_class in test_classes}
return test_tester_mapping
def lowercase__ ( lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase : List[Any] = get_model_classes(lowercase_ )
_UpperCamelCase : Optional[int] = {
model_class: get_test_classes_for_model(lowercase_ ,lowercase_ ) for model_class in model_classes
}
return model_test_mapping
def lowercase__ ( lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase : Optional[Any] = get_model_classes(lowercase_ )
_UpperCamelCase : Tuple = {
model_class: get_tester_classes_for_model(lowercase_ ,lowercase_ ) for model_class in model_classes
}
return model_to_tester_mapping
def lowercase__ ( lowercase_ ) -> Optional[int]:
"""simple docstring"""
if isinstance(lowercase_ ,lowercase_ ):
return o
elif isinstance(lowercase_ ,lowercase_ ):
return o.__name__
elif isinstance(lowercase_ ,(list, tuple) ):
return [to_json(lowercase_ ) for x in o]
elif isinstance(lowercase_ ,lowercase_ ):
return {to_json(lowercase_ ): to_json(lowercase_ ) for k, v in o.items()}
else:
return o
| 51
| 1
|
"""simple docstring"""
from math import isqrt
def lowercase__ ( lowercase_ ) -> bool:
"""simple docstring"""
return all(number % divisor != 0 for divisor in range(2 ,isqrt(lowercase_ ) + 1 ) )
def lowercase__ ( lowercase_ = 10**6 ) -> int:
"""simple docstring"""
_UpperCamelCase : Optional[Any] = 0
_UpperCamelCase : Tuple = 1
_UpperCamelCase : Optional[Any] = 7
while prime_candidate < max_prime:
primes_count += is_prime(lowercase_ )
cube_index += 1
prime_candidate += 6 * cube_index
return primes_count
if __name__ == "__main__":
print(f"""{solution() = }""")
| 51
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCamelCase__ = {
"configuration_mask2former": [
"MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"Mask2FormerConfig",
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["Mask2FormerImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"Mask2FormerForUniversalSegmentation",
"Mask2FormerModel",
"Mask2FormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_maskaformer import MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskaFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_maskaformer import MaskaFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskaformer import (
MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskaFormerForUniversalSegmentation,
MaskaFormerModel,
MaskaFormerPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 51
| 1
|
"""simple docstring"""
def lowercase__ ( lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase : List[Any] = len(lowercase_ )
for i in range(length - 1 ):
_UpperCamelCase : Optional[int] = i
for k in range(i + 1 ,lowercase_ ):
if collection[k] < collection[least]:
_UpperCamelCase : List[str] = k
if least != i:
_UpperCamelCase, _UpperCamelCase : Optional[Any] = (collection[i], collection[least])
return collection
if __name__ == "__main__":
lowerCamelCase__ = input("Enter numbers separated by a comma:\n").strip()
lowerCamelCase__ = [int(item) for item in user_input.split(",")]
print(selection_sort(unsorted))
| 51
|
"""simple docstring"""
lowerCamelCase__ = "\n# Installazione di Transformers\n! pip install transformers datasets\n# Per installare dalla fonte invece dell'ultima versione rilasciata, commenta il comando sopra e\n# rimuovi la modalità commento al comando seguente.\n# ! pip install git+https://github.com/huggingface/transformers.git\n"
lowerCamelCase__ = [{"type": "code", "content": INSTALL_CONTENT}]
lowerCamelCase__ = {
"{processor_class}": "FakeProcessorClass",
"{model_class}": "FakeModelClass",
"{object_class}": "FakeObjectClass",
}
| 51
| 1
|
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase__ = logging.get_logger(__name__)
def lowercase__ ( lowercase_ ) -> YolosConfig:
"""simple docstring"""
_UpperCamelCase : str = YolosConfig()
# size of the architecture
if "yolos_ti" in yolos_name:
_UpperCamelCase : Union[str, Any] = 192
_UpperCamelCase : Dict = 768
_UpperCamelCase : str = 12
_UpperCamelCase : List[Any] = 3
_UpperCamelCase : List[Any] = [800, 1_333]
_UpperCamelCase : Union[str, Any] = False
elif yolos_name == "yolos_s_dWr":
_UpperCamelCase : Optional[int] = 330
_UpperCamelCase : int = 14
_UpperCamelCase : int = 6
_UpperCamelCase : Dict = 1_320
elif "yolos_s" in yolos_name:
_UpperCamelCase : Tuple = 384
_UpperCamelCase : List[Any] = 1_536
_UpperCamelCase : Dict = 12
_UpperCamelCase : str = 6
elif "yolos_b" in yolos_name:
_UpperCamelCase : int = [800, 1_344]
_UpperCamelCase : Union[str, Any] = 91
_UpperCamelCase : Dict = "huggingface/label-files"
_UpperCamelCase : int = "coco-detection-id2label.json"
_UpperCamelCase : Optional[int] = json.load(open(hf_hub_download(lowercase_ ,lowercase_ ,repo_type="dataset" ) ,"r" ) )
_UpperCamelCase : Tuple = {int(lowercase_ ): v for k, v in idalabel.items()}
_UpperCamelCase : Optional[Any] = idalabel
_UpperCamelCase : Any = {v: k for k, v in idalabel.items()}
return config
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ = False ) -> Optional[Any]:
"""simple docstring"""
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_UpperCamelCase : List[str] = state_dict.pop(F'''blocks.{i}.attn.qkv.weight''' )
_UpperCamelCase : Optional[int] = state_dict.pop(F'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
_UpperCamelCase : Union[str, Any] = in_proj_weight[: config.hidden_size, :]
_UpperCamelCase : List[Any] = in_proj_bias[: config.hidden_size]
_UpperCamelCase : Optional[int] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_UpperCamelCase : int = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_UpperCamelCase : Dict = in_proj_weight[-config.hidden_size :, :]
_UpperCamelCase : List[str] = in_proj_bias[-config.hidden_size :]
def lowercase__ ( lowercase_ ) -> str:
"""simple docstring"""
if "backbone" in name:
_UpperCamelCase : List[str] = name.replace("backbone" ,"vit" )
if "cls_token" in name:
_UpperCamelCase : Tuple = name.replace("cls_token" ,"embeddings.cls_token" )
if "det_token" in name:
_UpperCamelCase : str = name.replace("det_token" ,"embeddings.detection_tokens" )
if "mid_pos_embed" in name:
_UpperCamelCase : Dict = name.replace("mid_pos_embed" ,"encoder.mid_position_embeddings" )
if "pos_embed" in name:
_UpperCamelCase : str = name.replace("pos_embed" ,"embeddings.position_embeddings" )
if "patch_embed.proj" in name:
_UpperCamelCase : List[Any] = name.replace("patch_embed.proj" ,"embeddings.patch_embeddings.projection" )
if "blocks" in name:
_UpperCamelCase : Dict = name.replace("blocks" ,"encoder.layer" )
if "attn.proj" in name:
_UpperCamelCase : Dict = name.replace("attn.proj" ,"attention.output.dense" )
if "attn" in name:
_UpperCamelCase : List[Any] = name.replace("attn" ,"attention.self" )
if "norm1" in name:
_UpperCamelCase : Optional[int] = name.replace("norm1" ,"layernorm_before" )
if "norm2" in name:
_UpperCamelCase : Optional[Any] = name.replace("norm2" ,"layernorm_after" )
if "mlp.fc1" in name:
_UpperCamelCase : Dict = name.replace("mlp.fc1" ,"intermediate.dense" )
if "mlp.fc2" in name:
_UpperCamelCase : str = name.replace("mlp.fc2" ,"output.dense" )
if "class_embed" in name:
_UpperCamelCase : Union[str, Any] = name.replace("class_embed" ,"class_labels_classifier" )
if "bbox_embed" in name:
_UpperCamelCase : str = name.replace("bbox_embed" ,"bbox_predictor" )
if "vit.norm" in name:
_UpperCamelCase : List[str] = name.replace("vit.norm" ,"vit.layernorm" )
return name
def lowercase__ ( lowercase_ ,lowercase_ ) -> dict:
"""simple docstring"""
for key in orig_state_dict.copy().keys():
_UpperCamelCase : Any = orig_state_dict.pop(lowercase_ )
if "qkv" in key:
_UpperCamelCase : Dict = key.split("." )
_UpperCamelCase : Optional[int] = int(key_split[2] )
_UpperCamelCase : List[Any] = model.vit.encoder.layer[layer_num].attention.attention.all_head_size
if "weight" in key:
_UpperCamelCase : Any = val[:dim, :]
_UpperCamelCase : Tuple = val[
dim : dim * 2, :
]
_UpperCamelCase : str = val[-dim:, :]
else:
_UpperCamelCase : str = val[:dim]
_UpperCamelCase : Optional[Any] = val[dim : dim * 2]
_UpperCamelCase : List[Any] = val[-dim:]
else:
_UpperCamelCase : List[str] = val
return orig_state_dict
def lowercase__ ( ) -> torch.Tensor:
"""simple docstring"""
_UpperCamelCase : List[str] = "http://images.cocodataset.org/val2017/000000039769.jpg"
_UpperCamelCase : Optional[int] = Image.open(requests.get(lowercase_ ,stream=lowercase_ ).raw )
return im
@torch.no_grad()
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ = False ) -> List[str]:
"""simple docstring"""
_UpperCamelCase : Dict = get_yolos_config(lowercase_ )
# load original state_dict
_UpperCamelCase : Tuple = torch.load(lowercase_ ,map_location="cpu" )["model"]
# load 🤗 model
_UpperCamelCase : Dict = YolosForObjectDetection(lowercase_ )
model.eval()
_UpperCamelCase : List[str] = convert_state_dict(lowercase_ ,lowercase_ )
model.load_state_dict(lowercase_ )
# Check outputs on an image, prepared by YolosImageProcessor
_UpperCamelCase : List[str] = 800 if yolos_name != "yolos_ti" else 512
_UpperCamelCase : Any = YolosImageProcessor(format="coco_detection" ,size=lowercase_ )
_UpperCamelCase : Union[str, Any] = image_processor(images=prepare_img() ,return_tensors="pt" )
_UpperCamelCase : Dict = model(**lowercase_ )
_UpperCamelCase, _UpperCamelCase : Dict = outputs.logits, outputs.pred_boxes
_UpperCamelCase, _UpperCamelCase : int = None, None
if yolos_name == "yolos_ti":
_UpperCamelCase : List[Any] = torch.tensor(
[[-39.5022, -11.9820, -17.6888], [-29.9574, -9.9769, -17.7691], [-42.3281, -20.7200, -30.6294]] )
_UpperCamelCase : str = torch.tensor(
[[0.4021, 0.0836, 0.7979], [0.0184, 0.2609, 0.0364], [0.1781, 0.2004, 0.2095]] )
elif yolos_name == "yolos_s_200_pre":
_UpperCamelCase : Union[str, Any] = torch.tensor(
[[-24.0248, -10.3024, -14.8290], [-42.0392, -16.8200, -27.4334], [-27.2743, -11.8154, -18.7148]] )
_UpperCamelCase : Optional[Any] = torch.tensor(
[[0.2559, 0.5455, 0.4706], [0.2989, 0.7279, 0.1875], [0.7732, 0.4017, 0.4462]] )
elif yolos_name == "yolos_s_300_pre":
_UpperCamelCase : Any = torch.tensor(
[[-36.2220, -14.4385, -23.5457], [-35.6970, -14.7583, -21.3935], [-31.5939, -13.6042, -16.8049]] )
_UpperCamelCase : int = torch.tensor(
[[0.7614, 0.2316, 0.4728], [0.7168, 0.4495, 0.3855], [0.4996, 0.1466, 0.9996]] )
elif yolos_name == "yolos_s_dWr":
_UpperCamelCase : int = torch.tensor(
[[-42.8668, -24.1049, -41.1690], [-34.7456, -14.1274, -24.9194], [-33.7898, -12.1946, -25.6495]] )
_UpperCamelCase : Optional[Any] = torch.tensor(
[[0.5587, 0.2773, 0.0605], [0.5004, 0.3014, 0.9994], [0.4999, 0.1548, 0.9994]] )
elif yolos_name == "yolos_base":
_UpperCamelCase : Dict = torch.tensor(
[[-40.6064, -24.3084, -32.6447], [-55.1990, -30.7719, -35.5877], [-51.4311, -33.3507, -35.6462]] )
_UpperCamelCase : Optional[Any] = torch.tensor(
[[0.5555, 0.2794, 0.0655], [0.9049, 0.2664, 0.1894], [0.9183, 0.1984, 0.1635]] )
else:
raise ValueError(F'''Unknown yolos_name: {yolos_name}''' )
assert torch.allclose(logits[0, :3, :3] ,lowercase_ ,atol=1e-4 )
assert torch.allclose(pred_boxes[0, :3, :3] ,lowercase_ ,atol=1e-4 )
Path(lowercase_ ).mkdir(exist_ok=lowercase_ )
print(F'''Saving model {yolos_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowercase_ )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(lowercase_ )
if push_to_hub:
_UpperCamelCase : str = {
"yolos_ti": "yolos-tiny",
"yolos_s_200_pre": "yolos-small",
"yolos_s_300_pre": "yolos-small-300",
"yolos_s_dWr": "yolos-small-dwr",
"yolos_base": "yolos-base",
}
print("Pushing to the hub..." )
_UpperCamelCase : List[str] = model_mapping[yolos_name]
image_processor.push_to_hub(lowercase_ ,organization="hustvl" )
model.push_to_hub(lowercase_ ,organization="hustvl" )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--yolos_name",
default="yolos_s_200_pre",
type=str,
help=(
"Name of the YOLOS model you'd like to convert. Should be one of 'yolos_ti', 'yolos_s_200_pre',"
" 'yolos_s_300_pre', 'yolos_s_dWr', 'yolos_base'."
),
)
parser.add_argument(
"--checkpoint_path", default=None, type=str, help="Path to the original state dict (.pth file)."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
lowerCamelCase__ = parser.parse_args()
convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 51
|
"""simple docstring"""
import os
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers.models.realm.configuration_realm import RealmConfig
from transformers.models.realm.retrieval_realm import _REALM_BLOCK_RECORDS_FILENAME, RealmRetriever
from transformers.models.realm.tokenization_realm import VOCAB_FILES_NAMES, RealmTokenizer
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[Any]:
_UpperCamelCase : Tuple = tempfile.mkdtemp()
_UpperCamelCase : str = 5
# Realm tok
_UpperCamelCase : Tuple = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"test",
"question",
"this",
"is",
"the",
"first",
"second",
"third",
"fourth",
"fifth",
"record",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
_UpperCamelCase : Optional[int] = os.path.join(self.tmpdirname , "realm_tokenizer" )
os.makedirs(__a , exist_ok=__a )
_UpperCamelCase : Optional[Any] = os.path.join(__a , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
_UpperCamelCase : Optional[int] = os.path.join(self.tmpdirname , "realm_block_records" )
os.makedirs(__a , exist_ok=__a )
def __SCREAMING_SNAKE_CASE ( self : str ) -> RealmTokenizer:
return RealmTokenizer.from_pretrained(os.path.join(self.tmpdirname , "realm_tokenizer" ) )
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Dict:
shutil.rmtree(self.tmpdirname )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[str]:
_UpperCamelCase : Optional[Any] = RealmConfig(num_block_records=self.num_block_records )
return config
def __SCREAMING_SNAKE_CASE ( self : int ) -> int:
_UpperCamelCase : Any = Dataset.from_dict(
{
"id": ["0", "1"],
"question": ["foo", "bar"],
"answers": [["Foo", "Bar"], ["Bar"]],
} )
return dataset
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> str:
_UpperCamelCase : int = np.array(
[
b"This is the first record",
b"This is the second record",
b"This is the third record",
b"This is the fourth record",
b"This is the fifth record",
b"This is a longer longer longer record",
] , dtype=__a , )
return block_records
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[Any]:
_UpperCamelCase : List[str] = RealmRetriever(
block_records=self.get_dummy_block_records() , tokenizer=self.get_tokenizer() , )
return retriever
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Tuple:
_UpperCamelCase : Tuple = self.get_config()
_UpperCamelCase : int = self.get_dummy_retriever()
_UpperCamelCase : Tuple = retriever.tokenizer
_UpperCamelCase : List[str] = np.array([0, 3] , dtype="long" )
_UpperCamelCase : Union[str, Any] = tokenizer(["Test question"] ).input_ids
_UpperCamelCase : List[str] = tokenizer(
["the fourth"] , add_special_tokens=__a , return_token_type_ids=__a , return_attention_mask=__a , ).input_ids
_UpperCamelCase : str = config.reader_seq_len
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase : List[Any] = retriever(
__a , __a , answer_ids=__a , max_length=__a , return_tensors="np" )
self.assertEqual(len(__a ) , 2 )
self.assertEqual(len(__a ) , 2 )
self.assertEqual(len(__a ) , 2 )
self.assertEqual(concat_inputs.input_ids.shape , (2, 10) )
self.assertEqual(concat_inputs.attention_mask.shape , (2, 10) )
self.assertEqual(concat_inputs.token_type_ids.shape , (2, 10) )
self.assertEqual(concat_inputs.special_tokens_mask.shape , (2, 10) )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[0] ) , ["[CLS]", "test", "question", "[SEP]", "this", "is", "the", "first", "record", "[SEP]"] , )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[1] ) , ["[CLS]", "test", "question", "[SEP]", "this", "is", "the", "fourth", "record", "[SEP]"] , )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[Any]:
_UpperCamelCase : Any = self.get_config()
_UpperCamelCase : Dict = self.get_dummy_retriever()
_UpperCamelCase : Dict = retriever.tokenizer
_UpperCamelCase : List[Any] = np.array([0, 3, 5] , dtype="long" )
_UpperCamelCase : Optional[int] = tokenizer(["Test question"] ).input_ids
_UpperCamelCase : str = tokenizer(
["the fourth", "longer longer"] , add_special_tokens=__a , return_token_type_ids=__a , return_attention_mask=__a , ).input_ids
_UpperCamelCase : Union[str, Any] = config.reader_seq_len
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase : Optional[Any] = retriever(
__a , __a , answer_ids=__a , max_length=__a , return_tensors="np" )
self.assertEqual([False, True, True] , __a )
self.assertEqual([[-1, -1, -1], [6, -1, -1], [6, 7, 8]] , __a )
self.assertEqual([[-1, -1, -1], [7, -1, -1], [7, 8, 9]] , __a )
def __SCREAMING_SNAKE_CASE ( self : int ) -> List[str]:
_UpperCamelCase : List[Any] = self.get_dummy_retriever()
retriever.save_pretrained(os.path.join(self.tmpdirname , "realm_block_records" ) )
# Test local path
_UpperCamelCase : int = retriever.from_pretrained(os.path.join(self.tmpdirname , "realm_block_records" ) )
self.assertEqual(retriever.block_records[0] , b"This is the first record" )
# Test mocked remote path
with patch("transformers.models.realm.retrieval_realm.hf_hub_download" ) as mock_hf_hub_download:
_UpperCamelCase : List[Any] = os.path.join(
os.path.join(self.tmpdirname , "realm_block_records" ) , _REALM_BLOCK_RECORDS_FILENAME )
_UpperCamelCase : int = RealmRetriever.from_pretrained("google/realm-cc-news-pretrained-openqa" )
self.assertEqual(retriever.block_records[0] , b"This is the first record" )
| 51
| 1
|
"""simple docstring"""
def lowercase__ ( lowercase_ ) -> set:
"""simple docstring"""
_UpperCamelCase : Union[str, Any] = set()
# edges = list of graph's edges
_UpperCamelCase : Union[str, Any] = get_edges(lowercase_ )
# While there are still elements in edges list, take an arbitrary edge
# (from_node, to_node) and add his extremity to chosen_vertices and then
# remove all arcs adjacent to the from_node and to_node
while edges:
_UpperCamelCase, _UpperCamelCase : str = edges.pop()
chosen_vertices.add(lowercase_ )
chosen_vertices.add(lowercase_ )
for edge in edges.copy():
if from_node in edge or to_node in edge:
edges.discard(lowercase_ )
return chosen_vertices
def lowercase__ ( lowercase_ ) -> set:
"""simple docstring"""
_UpperCamelCase : List[str] = set()
for from_node, to_nodes in graph.items():
for to_node in to_nodes:
edges.add((from_node, to_node) )
return edges
if __name__ == "__main__":
import doctest
doctest.testmod()
# graph = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
# print(f"Matching vertex cover:\n{matching_min_vertex_cover(graph)}")
| 51
|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import LEDConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFLEDForConditionalGeneration, TFLEDModel
@require_tf
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Any = LEDConfig
SCREAMING_SNAKE_CASE__ :str = {}
SCREAMING_SNAKE_CASE__ :List[str] = "gelu"
def __init__( self : List[Any] , __a : Union[str, Any] , __a : List[Any]=13 , __a : int=7 , __a : str=True , __a : Any=False , __a : str=99 , __a : str=32 , __a : Union[str, Any]=2 , __a : Optional[Any]=4 , __a : List[Any]=37 , __a : List[Any]=0.1 , __a : Tuple=0.1 , __a : Dict=20 , __a : str=2 , __a : Dict=1 , __a : Any=0 , __a : List[Any]=4 , ) -> List[Any]:
_UpperCamelCase : Optional[Any] = parent
_UpperCamelCase : List[str] = batch_size
_UpperCamelCase : str = seq_length
_UpperCamelCase : str = is_training
_UpperCamelCase : Any = use_labels
_UpperCamelCase : Any = vocab_size
_UpperCamelCase : List[str] = hidden_size
_UpperCamelCase : Optional[Any] = num_hidden_layers
_UpperCamelCase : Dict = num_attention_heads
_UpperCamelCase : Optional[Any] = intermediate_size
_UpperCamelCase : int = hidden_dropout_prob
_UpperCamelCase : Dict = attention_probs_dropout_prob
_UpperCamelCase : str = max_position_embeddings
_UpperCamelCase : int = eos_token_id
_UpperCamelCase : Dict = pad_token_id
_UpperCamelCase : Optional[Any] = bos_token_id
_UpperCamelCase : str = attention_window
# `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size
# [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention
# returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1]
# because its local attention only attends to `self.attention_window` and one before and one after
_UpperCamelCase : List[str] = self.attention_window + 2
# because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for
# the `test_attention_outputs` and `test_hidden_states_output` tests
_UpperCamelCase : int = (
self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window
)
def __SCREAMING_SNAKE_CASE ( self : int ) -> str:
_UpperCamelCase : Tuple = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
_UpperCamelCase : Optional[Any] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
_UpperCamelCase : Tuple = tf.concat([input_ids, eos_tensor] , axis=1 )
_UpperCamelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCamelCase : List[Any] = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , attention_window=self.attention_window , **self.config_updates , )
_UpperCamelCase : Dict = prepare_led_inputs_dict(__a , __a , __a )
_UpperCamelCase : Union[str, Any] = tf.concat(
[tf.zeros_like(__a )[:, :-1], tf.ones_like(__a )[:, -1:]] , axis=-1 , )
_UpperCamelCase : Union[str, Any] = global_attention_mask
return config, inputs_dict
def __SCREAMING_SNAKE_CASE ( self : List[str] , __a : List[Any] , __a : int ) -> Tuple:
_UpperCamelCase : Tuple = TFLEDModel(config=__a ).get_decoder()
_UpperCamelCase : Tuple = inputs_dict["input_ids"]
_UpperCamelCase : int = input_ids[:1, :]
_UpperCamelCase : List[str] = inputs_dict["attention_mask"][:1, :]
_UpperCamelCase : List[Any] = 1
# first forward pass
_UpperCamelCase : Any = model(__a , attention_mask=__a , use_cache=__a )
_UpperCamelCase, _UpperCamelCase : Union[str, Any] = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_UpperCamelCase : Any = ids_tensor((self.batch_size, 3) , config.vocab_size )
_UpperCamelCase : Any = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
_UpperCamelCase : List[str] = tf.concat([input_ids, next_tokens] , axis=-1 )
_UpperCamelCase : Union[str, Any] = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
_UpperCamelCase : Tuple = model(__a , attention_mask=__a )[0]
_UpperCamelCase : int = model(__a , attention_mask=__a , past_key_values=__a )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
_UpperCamelCase : List[Any] = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
_UpperCamelCase : List[str] = output_from_no_past[:, -3:, random_slice_idx]
_UpperCamelCase : Optional[int] = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(__a , __a , rtol=1e-3 )
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_=None ,lowercase_=None ,lowercase_=None ,lowercase_=None ,) -> Dict:
"""simple docstring"""
if attention_mask is None:
_UpperCamelCase : str = tf.cast(tf.math.not_equal(lowercase_ ,config.pad_token_id ) ,tf.inta )
if decoder_attention_mask is None:
_UpperCamelCase : str = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape ,dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] ,config.pad_token_id ) ,tf.inta ),
] ,axis=-1 ,)
if head_mask is None:
_UpperCamelCase : List[str] = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
_UpperCamelCase : List[str] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"attention_mask": attention_mask,
"decoder_input_ids": decoder_input_ids,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
}
@require_tf
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Any = (TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else ()
SCREAMING_SNAKE_CASE__ :List[str] = (TFLEDForConditionalGeneration,) if is_tf_available() else ()
SCREAMING_SNAKE_CASE__ :List[str] = (
{
"conversational": TFLEDForConditionalGeneration,
"feature-extraction": TFLEDModel,
"summarization": TFLEDForConditionalGeneration,
"text2text-generation": TFLEDForConditionalGeneration,
"translation": TFLEDForConditionalGeneration,
}
if is_tf_available()
else {}
)
SCREAMING_SNAKE_CASE__ :Tuple = True
SCREAMING_SNAKE_CASE__ :str = False
SCREAMING_SNAKE_CASE__ :Optional[Any] = False
SCREAMING_SNAKE_CASE__ :int = False
def __SCREAMING_SNAKE_CASE ( self : str ) -> List[Any]:
_UpperCamelCase : int = TFLEDModelTester(self )
_UpperCamelCase : Any = ConfigTester(self , config_class=__a )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[Any]:
self.config_tester.run_common_tests()
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> int:
_UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*__a )
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[str]:
_UpperCamelCase, _UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCamelCase : Optional[int] = tf.zeros_like(inputs_dict["attention_mask"] )
_UpperCamelCase : Union[str, Any] = 2
_UpperCamelCase : str = tf.where(
tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices , 1 , inputs_dict["global_attention_mask"] , )
_UpperCamelCase : Dict = True
_UpperCamelCase : str = self.model_tester.seq_length
_UpperCamelCase : Union[str, Any] = self.model_tester.encoder_seq_length
def check_decoder_attentions_output(__a : Optional[int] ):
_UpperCamelCase : Optional[int] = outputs.decoder_attentions
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
def check_encoder_attentions_output(__a : Optional[Any] ):
_UpperCamelCase : Union[str, Any] = [t.numpy() for t in outputs.encoder_attentions]
_UpperCamelCase : List[Any] = [t.numpy() for t in outputs.encoder_global_attentions]
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
self.assertListEqual(
list(global_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices] , )
for model_class in self.all_model_classes:
_UpperCamelCase : Dict = True
_UpperCamelCase : Optional[Any] = False
_UpperCamelCase : int = False
_UpperCamelCase : Optional[int] = model_class(__a )
_UpperCamelCase : int = model(self._prepare_for_class(__a , __a ) )
_UpperCamelCase : Any = len(__a )
self.assertEqual(config.output_hidden_states , __a )
check_encoder_attentions_output(__a )
if self.is_encoder_decoder:
_UpperCamelCase : Optional[Any] = model_class(__a )
_UpperCamelCase : List[Any] = model(self._prepare_for_class(__a , __a ) )
self.assertEqual(config.output_hidden_states , __a )
check_decoder_attentions_output(__a )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
_UpperCamelCase : int = True
_UpperCamelCase : Tuple = model_class(__a )
_UpperCamelCase : str = model(self._prepare_for_class(__a , __a ) )
self.assertEqual(config.output_hidden_states , __a )
check_encoder_attentions_output(__a )
# Check attention is always last and order is fine
_UpperCamelCase : Any = True
_UpperCamelCase : List[str] = True
_UpperCamelCase : Tuple = model_class(__a )
_UpperCamelCase : int = model(self._prepare_for_class(__a , __a ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(__a ) )
self.assertEqual(model.config.output_hidden_states , __a )
check_encoder_attentions_output(__a )
@unittest.skip("LED keeps using potentially symbolic tensors in conditionals and breaks tracing." )
def __SCREAMING_SNAKE_CASE ( self : str ) -> Dict:
pass
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Tuple:
# TODO: Head-masking not yet implement
pass
def lowercase__ ( lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
return tf.constant(lowercase_ ,dtype=tf.intaa )
lowerCamelCase__ = 1E-4
@slow
@require_tf
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> Union[str, Any]:
_UpperCamelCase : Any = TFLEDForConditionalGeneration.from_pretrained("allenai/led-base-16384" ).led
# change to intended input here
_UpperCamelCase : int = _long_tensor([512 * [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69]] )
_UpperCamelCase : Tuple = _long_tensor([128 * [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69]] )
_UpperCamelCase : Optional[Any] = prepare_led_inputs_dict(model.config , __a , __a )
_UpperCamelCase : Optional[int] = model(**__a )[0]
_UpperCamelCase : Optional[int] = (1, 1024, 768)
self.assertEqual(output.shape , __a )
# change to expected output here
_UpperCamelCase : Tuple = tf.convert_to_tensor(
[[2.30_50, 2.82_79, 0.65_31], [-1.84_57, -0.14_55, -3.56_61], [-1.01_86, 0.45_86, -2.20_43]] , )
tf.debugging.assert_near(output[:, :3, :3] , __a , atol=1e-3 )
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> str:
_UpperCamelCase : Optional[int] = TFLEDForConditionalGeneration.from_pretrained("allenai/led-base-16384" )
# change to intended input here
_UpperCamelCase : Optional[int] = _long_tensor([512 * [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69]] )
_UpperCamelCase : List[str] = _long_tensor([128 * [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69]] )
_UpperCamelCase : Optional[Any] = prepare_led_inputs_dict(model.config , __a , __a )
_UpperCamelCase : Union[str, Any] = model(**__a )[0]
_UpperCamelCase : int = (1, 1024, model.config.vocab_size)
self.assertEqual(output.shape , __a )
# change to expected output here
_UpperCamelCase : Optional[int] = tf.convert_to_tensor(
[[33.65_07, 6.45_72, 16.80_89], [5.87_39, -2.42_38, 11.29_02], [-3.21_39, -4.31_49, 4.27_83]] , )
tf.debugging.assert_near(output[:, :3, :3] , __a , atol=1e-3 , rtol=1e-3 )
| 51
| 1
|
"""simple docstring"""
import unittest
import torch
from diffusers import VQModel
from diffusers.utils import floats_tensor, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Optional[Any] = VQModel
SCREAMING_SNAKE_CASE__ :List[Any] = "sample"
@property
def __SCREAMING_SNAKE_CASE ( self : List[Any] , __a : Tuple=(32, 32) ) -> int:
_UpperCamelCase : List[Any] = 4
_UpperCamelCase : Dict = 3
_UpperCamelCase : Any = floats_tensor((batch_size, num_channels) + sizes ).to(__a )
return {"sample": image}
@property
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[Any]:
return (3, 32, 32)
@property
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Any:
return (3, 32, 32)
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[str]:
_UpperCamelCase : Tuple = {
"block_out_channels": [32, 64],
"in_channels": 3,
"out_channels": 3,
"down_block_types": ["DownEncoderBlock2D", "DownEncoderBlock2D"],
"up_block_types": ["UpDecoderBlock2D", "UpDecoderBlock2D"],
"latent_channels": 3,
}
_UpperCamelCase : List[Any] = self.dummy_input
return init_dict, inputs_dict
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> Tuple:
pass
def __SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]:
pass
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Tuple:
_UpperCamelCase, _UpperCamelCase : Any = VQModel.from_pretrained("fusing/vqgan-dummy" , output_loading_info=__a )
self.assertIsNotNone(__a )
self.assertEqual(len(loading_info["missing_keys"] ) , 0 )
model.to(__a )
_UpperCamelCase : List[str] = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> Any:
_UpperCamelCase : Dict = VQModel.from_pretrained("fusing/vqgan-dummy" )
model.to(__a ).eval()
torch.manual_seed(0 )
if torch.cuda.is_available():
torch.cuda.manual_seed_all(0 )
_UpperCamelCase : Dict = torch.randn(1 , model.config.in_channels , model.config.sample_size , model.config.sample_size )
_UpperCamelCase : str = image.to(__a )
with torch.no_grad():
_UpperCamelCase : List[str] = model(__a ).sample
_UpperCamelCase : str = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
_UpperCamelCase : str = torch.tensor([-0.01_53, -0.40_44, -0.18_80, -0.51_61, -0.24_18, -0.40_72, -0.16_12, -0.06_33, -0.01_43] )
# fmt: on
self.assertTrue(torch.allclose(__a , __a , atol=1e-3 ) )
| 51
|
"""simple docstring"""
import json
import os
import unittest
from transformers.models.roc_bert.tokenization_roc_bert import (
VOCAB_FILES_NAMES,
RoCBertBasicTokenizer,
RoCBertTokenizer,
RoCBertWordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Union[str, Any] = RoCBertTokenizer
SCREAMING_SNAKE_CASE__ :Dict = None
SCREAMING_SNAKE_CASE__ :List[Any] = False
SCREAMING_SNAKE_CASE__ :Union[str, Any] = True
SCREAMING_SNAKE_CASE__ :Union[str, Any] = filter_non_english
def __SCREAMING_SNAKE_CASE ( self : str ) -> Dict:
super().setUp()
_UpperCamelCase : Any = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "你", "好", "是", "谁", "a", "b", "c", "d"]
_UpperCamelCase : List[str] = {}
_UpperCamelCase : Tuple = {}
for i, value in enumerate(__a ):
_UpperCamelCase : List[str] = i
_UpperCamelCase : Optional[Any] = i
_UpperCamelCase : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
_UpperCamelCase : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["word_shape_file"] )
_UpperCamelCase : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["word_pronunciation_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
with open(self.word_shape_file , "w" , encoding="utf-8" ) as word_shape_writer:
json.dump(__a , __a , ensure_ascii=__a )
with open(self.word_pronunciation_file , "w" , encoding="utf-8" ) as word_pronunciation_writer:
json.dump(__a , __a , ensure_ascii=__a )
def __SCREAMING_SNAKE_CASE ( self : int ) -> Optional[int]:
_UpperCamelCase : Tuple = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
_UpperCamelCase : int = tokenizer.tokenize("你好[SEP]你是谁" )
self.assertListEqual(__a , ["你", "好", "[SEP]", "你", "是", "谁"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__a ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_shape_ids(__a ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_pronunciation_ids(__a ) , [5, 6, 2, 5, 7, 8] )
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> Dict:
_UpperCamelCase : Dict = RoCBertBasicTokenizer()
self.assertListEqual(tokenizer.tokenize("ah\u535A\u63A8zz" ) , ["ah", "\u535A", "\u63A8", "zz"] )
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Any:
_UpperCamelCase : List[Any] = RoCBertBasicTokenizer(do_lower_case=__a )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["hello", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple:
_UpperCamelCase : Optional[Any] = RoCBertBasicTokenizer(do_lower_case=__a , strip_accents=__a )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hällo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["h\u00E9llo"] )
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> str:
_UpperCamelCase : Dict = RoCBertBasicTokenizer(do_lower_case=__a , strip_accents=__a )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Union[str, Any]:
_UpperCamelCase : List[str] = RoCBertBasicTokenizer(do_lower_case=__a )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def __SCREAMING_SNAKE_CASE ( self : str ) -> Optional[int]:
_UpperCamelCase : Tuple = RoCBertBasicTokenizer(do_lower_case=__a )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["HeLLo", "!", "how", "Are", "yoU", "?"] )
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[str]:
_UpperCamelCase : Union[str, Any] = RoCBertBasicTokenizer(do_lower_case=__a , strip_accents=__a )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HäLLo", "!", "how", "Are", "yoU", "?"] )
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[Any]:
_UpperCamelCase : Tuple = RoCBertBasicTokenizer(do_lower_case=__a , strip_accents=__a )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HaLLo", "!", "how", "Are", "yoU", "?"] )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> int:
_UpperCamelCase : int = RoCBertBasicTokenizer(do_lower_case=__a , never_split=["[UNK]"] )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? [UNK]" ) , ["HeLLo", "!", "how", "Are", "yoU", "?", "[UNK]"] )
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[Any]:
_UpperCamelCase : Optional[int] = ["[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", "##ing"]
_UpperCamelCase : Any = {}
for i, token in enumerate(__a ):
_UpperCamelCase : str = i
_UpperCamelCase : Optional[int] = RoCBertWordpieceTokenizer(vocab=__a , unk_token="[UNK]" )
self.assertListEqual(tokenizer.tokenize("" ) , [] )
self.assertListEqual(tokenizer.tokenize("unwanted running" ) , ["un", "##want", "##ed", "runn", "##ing"] )
self.assertListEqual(tokenizer.tokenize("unwantedX running" ) , ["[UNK]", "runn", "##ing"] )
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[Any]:
self.assertTrue(_is_whitespace(" " ) )
self.assertTrue(_is_whitespace("\t" ) )
self.assertTrue(_is_whitespace("\r" ) )
self.assertTrue(_is_whitespace("\n" ) )
self.assertTrue(_is_whitespace("\u00A0" ) )
self.assertFalse(_is_whitespace("A" ) )
self.assertFalse(_is_whitespace("-" ) )
def __SCREAMING_SNAKE_CASE ( self : Any ) -> Dict:
self.assertTrue(_is_control("\u0005" ) )
self.assertFalse(_is_control("A" ) )
self.assertFalse(_is_control(" " ) )
self.assertFalse(_is_control("\t" ) )
self.assertFalse(_is_control("\r" ) )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[int]:
self.assertTrue(_is_punctuation("-" ) )
self.assertTrue(_is_punctuation("$" ) )
self.assertTrue(_is_punctuation("`" ) )
self.assertTrue(_is_punctuation("." ) )
self.assertFalse(_is_punctuation("A" ) )
self.assertFalse(_is_punctuation(" " ) )
def __SCREAMING_SNAKE_CASE ( self : int ) -> List[Any]:
_UpperCamelCase : Optional[Any] = self.get_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(__a ) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]] )
if self.test_rust_tokenizer:
_UpperCamelCase : Tuple = self.get_rust_tokenizer()
self.assertListEqual(
[rust_tokenizer.tokenize(__a ) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]] )
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[Any]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
_UpperCamelCase : int = self.rust_tokenizer_class.from_pretrained(__a , **__a )
_UpperCamelCase : Union[str, Any] = F'''A, naïve {tokenizer_r.mask_token} AllenNLP sentence.'''
_UpperCamelCase : Optional[Any] = tokenizer_r.encode_plus(
__a , return_attention_mask=__a , return_token_type_ids=__a , return_offsets_mapping=__a , add_special_tokens=__a , )
_UpperCamelCase : List[Any] = tokenizer_r.do_lower_case if hasattr(__a , "do_lower_case" ) else False
_UpperCamelCase : Dict = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), "A"),
((1, 2), ","),
((3, 5), "na"),
((5, 6), "##ï"),
((6, 8), "##ve"),
((9, 15), tokenizer_r.mask_token),
((16, 21), "Allen"),
((21, 23), "##NL"),
((23, 24), "##P"),
((25, 33), "sentence"),
((33, 34), "."),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), "a"),
((1, 2), ","),
((3, 8), "naive"),
((9, 15), tokenizer_r.mask_token),
((16, 21), "allen"),
((21, 23), "##nl"),
((23, 24), "##p"),
((25, 33), "sentence"),
((33, 34), "."),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens["input_ids"] ) )
self.assertEqual([e[0] for e in expected_results] , tokens["offset_mapping"] )
def __SCREAMING_SNAKE_CASE ( self : int ) -> List[Any]:
_UpperCamelCase : Optional[Any] = ["的", "人", "有"]
_UpperCamelCase : int = "".join(__a )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
_UpperCamelCase : int = True
_UpperCamelCase : Any = self.tokenizer_class.from_pretrained(__a , **__a )
_UpperCamelCase : Optional[Any] = self.rust_tokenizer_class.from_pretrained(__a , **__a )
_UpperCamelCase : int = tokenizer_p.encode(__a , add_special_tokens=__a )
_UpperCamelCase : int = tokenizer_r.encode(__a , add_special_tokens=__a )
_UpperCamelCase : List[Any] = tokenizer_r.convert_ids_to_tokens(__a )
_UpperCamelCase : Union[str, Any] = tokenizer_p.convert_ids_to_tokens(__a )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(__a , __a )
self.assertListEqual(__a , __a )
_UpperCamelCase : Any = False
_UpperCamelCase : Dict = self.rust_tokenizer_class.from_pretrained(__a , **__a )
_UpperCamelCase : Any = self.tokenizer_class.from_pretrained(__a , **__a )
_UpperCamelCase : Any = tokenizer_r.encode(__a , add_special_tokens=__a )
_UpperCamelCase : Any = tokenizer_p.encode(__a , add_special_tokens=__a )
_UpperCamelCase : Union[str, Any] = tokenizer_r.convert_ids_to_tokens(__a )
_UpperCamelCase : Dict = tokenizer_p.convert_ids_to_tokens(__a )
# it is expected that only the first Chinese character is not preceded by "##".
_UpperCamelCase : Any = [
F'''##{token}''' if idx != 0 else token for idx, token in enumerate(__a )
]
self.assertListEqual(__a , __a )
self.assertListEqual(__a , __a )
@slow
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Any:
_UpperCamelCase : Dict = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
_UpperCamelCase : Optional[int] = tokenizer.encode("你好" , add_special_tokens=__a )
_UpperCamelCase : Dict = tokenizer.encode("你是谁" , add_special_tokens=__a )
_UpperCamelCase : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(__a )
_UpperCamelCase : Tuple = tokenizer.build_inputs_with_special_tokens(__a , __a )
assert encoded_sentence == [1] + text + [2]
assert encoded_pair == [1] + text + [2] + text_a + [2]
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> int:
_UpperCamelCase : Optional[Any] = self.get_tokenizers(do_lower_case=__a )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
_UpperCamelCase : int = "你好,你是谁"
_UpperCamelCase : Any = tokenizer.tokenize(__a )
_UpperCamelCase : Optional[Any] = tokenizer.convert_tokens_to_ids(__a )
_UpperCamelCase : List[str] = tokenizer.convert_tokens_to_shape_ids(__a )
_UpperCamelCase : Any = tokenizer.convert_tokens_to_pronunciation_ids(__a )
_UpperCamelCase : Optional[int] = tokenizer.prepare_for_model(
__a , __a , __a , add_special_tokens=__a )
_UpperCamelCase : Tuple = tokenizer.encode_plus(__a , add_special_tokens=__a )
self.assertEqual(__a , __a )
| 51
| 1
|
"""simple docstring"""
from __future__ import annotations
from collections import deque
from collections.abc import Iterator
from dataclasses import dataclass
@dataclass
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :int
SCREAMING_SNAKE_CASE__ :int
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : List[Any] , __a : int ) -> Optional[Any]:
_UpperCamelCase : list[list[Edge]] = [[] for _ in range(__a )]
_UpperCamelCase : Tuple = size
def __getitem__( self : int , __a : int ) -> Iterator[Edge]:
return iter(self._graph[vertex] )
@property
def __SCREAMING_SNAKE_CASE ( self : str ) -> Optional[Any]:
return self._size
def __SCREAMING_SNAKE_CASE ( self : List[str] , __a : int , __a : int , __a : int ) -> Dict:
if weight not in (0, 1):
raise ValueError("Edge weight must be either 0 or 1." )
if to_vertex < 0 or to_vertex >= self.size:
raise ValueError("Vertex indexes must be in [0; size)." )
self._graph[from_vertex].append(Edge(__a , __a ) )
def __SCREAMING_SNAKE_CASE ( self : Tuple , __a : int , __a : int ) -> int | None:
_UpperCamelCase : Optional[Any] = deque([start_vertex] )
_UpperCamelCase : list[int | None] = [None] * self.size
_UpperCamelCase : int = 0
while queue:
_UpperCamelCase : List[Any] = queue.popleft()
_UpperCamelCase : List[str] = distances[current_vertex]
if current_distance is None:
continue
for edge in self[current_vertex]:
_UpperCamelCase : List[Any] = current_distance + edge.weight
_UpperCamelCase : Dict = distances[edge.destination_vertex]
if (
isinstance(__a , __a )
and new_distance >= dest_vertex_distance
):
continue
_UpperCamelCase : List[str] = new_distance
if edge.weight == 0:
queue.appendleft(edge.destination_vertex )
else:
queue.append(edge.destination_vertex )
if distances[finish_vertex] is None:
raise ValueError("No path from start_vertex to finish_vertex." )
return distances[finish_vertex]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 51
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"hustvl/yolos-small": "https://huggingface.co/hustvl/yolos-small/resolve/main/config.json",
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Tuple = "yolos"
def __init__( self : Dict , __a : Optional[Any]=768 , __a : List[Any]=12 , __a : Any=12 , __a : List[Any]=3072 , __a : Optional[int]="gelu" , __a : Dict=0.0 , __a : Optional[Any]=0.0 , __a : Any=0.02 , __a : Optional[int]=1e-1_2 , __a : List[Any]=[512, 864] , __a : List[str]=16 , __a : str=3 , __a : Optional[Any]=True , __a : Optional[Any]=100 , __a : List[str]=True , __a : Any=False , __a : List[str]=1 , __a : str=5 , __a : Optional[Any]=2 , __a : Tuple=5 , __a : Any=2 , __a : Union[str, Any]=0.1 , **__a : List[str] , ) -> List[str]:
super().__init__(**__a )
_UpperCamelCase : Dict = hidden_size
_UpperCamelCase : Any = num_hidden_layers
_UpperCamelCase : str = num_attention_heads
_UpperCamelCase : Dict = intermediate_size
_UpperCamelCase : List[str] = hidden_act
_UpperCamelCase : List[str] = hidden_dropout_prob
_UpperCamelCase : str = attention_probs_dropout_prob
_UpperCamelCase : Tuple = initializer_range
_UpperCamelCase : List[str] = layer_norm_eps
_UpperCamelCase : Tuple = image_size
_UpperCamelCase : Tuple = patch_size
_UpperCamelCase : Dict = num_channels
_UpperCamelCase : Any = qkv_bias
_UpperCamelCase : str = num_detection_tokens
_UpperCamelCase : str = use_mid_position_embeddings
_UpperCamelCase : List[str] = auxiliary_loss
# Hungarian matcher
_UpperCamelCase : List[Any] = class_cost
_UpperCamelCase : int = bbox_cost
_UpperCamelCase : Optional[int] = giou_cost
# Loss coefficients
_UpperCamelCase : List[Any] = bbox_loss_coefficient
_UpperCamelCase : str = giou_loss_coefficient
_UpperCamelCase : Dict = eos_coefficient
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :List[str] = version.parse("1.11" )
@property
def __SCREAMING_SNAKE_CASE ( self : str ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> float:
return 1e-4
@property
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> int:
return 12
| 51
| 1
|
"""simple docstring"""
import shutil
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_tf_cross_test,
require_tf,
require_torch,
require_torchvision,
require_vision,
)
from transformers.utils import is_tf_available, is_torch_available, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, SamImageProcessor, SamProcessor
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
@require_vision
@require_torchvision
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> int:
_UpperCamelCase : Union[str, Any] = tempfile.mkdtemp()
_UpperCamelCase : int = SamImageProcessor()
_UpperCamelCase : Dict = SamProcessor(__a )
processor.save_pretrained(self.tmpdirname )
def __SCREAMING_SNAKE_CASE ( self : List[Any] , **__a : List[str] ) -> int:
return AutoProcessor.from_pretrained(self.tmpdirname , **__a ).image_processor
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> Any:
shutil.rmtree(self.tmpdirname )
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> str:
_UpperCamelCase : List[Any] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
_UpperCamelCase : Any = [Image.fromarray(np.moveaxis(__a , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Tuple:
_UpperCamelCase : List[Any] = SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_UpperCamelCase : Dict = self.get_image_processor(do_normalize=__a , padding_value=1.0 )
_UpperCamelCase : List[Any] = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=__a , padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __a )
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> Tuple:
_UpperCamelCase : List[str] = self.get_image_processor()
_UpperCamelCase : Tuple = SamProcessor(image_processor=__a )
_UpperCamelCase : Any = self.prepare_image_inputs()
_UpperCamelCase : List[Any] = image_processor(__a , return_tensors="np" )
_UpperCamelCase : Any = processor(images=__a , return_tensors="np" )
input_feat_extract.pop("original_sizes" ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop("reshaped_input_sizes" ) # pop original_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
@require_torch
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> str:
_UpperCamelCase : Optional[int] = self.get_image_processor()
_UpperCamelCase : Optional[int] = SamProcessor(image_processor=__a )
_UpperCamelCase : Tuple = [torch.ones((1, 3, 5, 5) )]
_UpperCamelCase : Optional[Any] = [[1764, 2646]]
_UpperCamelCase : Tuple = [[683, 1024]]
_UpperCamelCase : Optional[int] = processor.post_process_masks(__a , __a , __a )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
_UpperCamelCase : Optional[Any] = processor.post_process_masks(
__a , torch.tensor(__a ) , torch.tensor(__a ) )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
# should also work with np
_UpperCamelCase : str = [np.ones((1, 3, 5, 5) )]
_UpperCamelCase : Dict = processor.post_process_masks(__a , np.array(__a ) , np.array(__a ) )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
_UpperCamelCase : List[Any] = [[1, 0], [0, 1]]
with self.assertRaises(__a ):
_UpperCamelCase : Optional[Any] = processor.post_process_masks(__a , np.array(__a ) , np.array(__a ) )
@require_vision
@require_tf
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[str]:
_UpperCamelCase : Optional[Any] = tempfile.mkdtemp()
_UpperCamelCase : Any = SamImageProcessor()
_UpperCamelCase : Optional[int] = SamProcessor(__a )
processor.save_pretrained(self.tmpdirname )
def __SCREAMING_SNAKE_CASE ( self : List[Any] , **__a : str ) -> List[Any]:
return AutoProcessor.from_pretrained(self.tmpdirname , **__a ).image_processor
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Any:
shutil.rmtree(self.tmpdirname )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[Any]:
_UpperCamelCase : Any = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
_UpperCamelCase : List[str] = [Image.fromarray(np.moveaxis(__a , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> Any:
_UpperCamelCase : Dict = SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_UpperCamelCase : Optional[int] = self.get_image_processor(do_normalize=__a , padding_value=1.0 )
_UpperCamelCase : Optional[int] = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=__a , padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __a )
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[int]:
_UpperCamelCase : Union[str, Any] = self.get_image_processor()
_UpperCamelCase : Optional[int] = SamProcessor(image_processor=__a )
_UpperCamelCase : str = self.prepare_image_inputs()
_UpperCamelCase : str = image_processor(__a , return_tensors="np" )
_UpperCamelCase : Dict = processor(images=__a , return_tensors="np" )
input_feat_extract.pop("original_sizes" ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop("reshaped_input_sizes" ) # pop reshaped_input_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
@require_tf
def __SCREAMING_SNAKE_CASE ( self : Any ) -> Union[str, Any]:
_UpperCamelCase : Dict = self.get_image_processor()
_UpperCamelCase : Optional[Any] = SamProcessor(image_processor=__a )
_UpperCamelCase : Tuple = [tf.ones((1, 3, 5, 5) )]
_UpperCamelCase : Any = [[1764, 2646]]
_UpperCamelCase : List[str] = [[683, 1024]]
_UpperCamelCase : List[Any] = processor.post_process_masks(__a , __a , __a , return_tensors="tf" )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
_UpperCamelCase : Tuple = processor.post_process_masks(
__a , tf.convert_to_tensor(__a ) , tf.convert_to_tensor(__a ) , return_tensors="tf" , )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
# should also work with np
_UpperCamelCase : Tuple = [np.ones((1, 3, 5, 5) )]
_UpperCamelCase : Optional[Any] = processor.post_process_masks(
__a , np.array(__a ) , np.array(__a ) , return_tensors="tf" )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
_UpperCamelCase : Any = [[1, 0], [0, 1]]
with self.assertRaises(tf.errors.InvalidArgumentError ):
_UpperCamelCase : str = processor.post_process_masks(
__a , np.array(__a ) , np.array(__a ) , return_tensors="tf" )
@require_vision
@require_torchvision
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self : Any ) -> str:
_UpperCamelCase : List[str] = tempfile.mkdtemp()
_UpperCamelCase : Optional[Any] = SamImageProcessor()
_UpperCamelCase : List[Any] = SamProcessor(__a )
processor.save_pretrained(self.tmpdirname )
def __SCREAMING_SNAKE_CASE ( self : Dict , **__a : Optional[Any] ) -> List[str]:
return AutoProcessor.from_pretrained(self.tmpdirname , **__a ).image_processor
def __SCREAMING_SNAKE_CASE ( self : int ) -> List[Any]:
shutil.rmtree(self.tmpdirname )
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[Any]:
_UpperCamelCase : int = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
_UpperCamelCase : List[str] = [Image.fromarray(np.moveaxis(__a , 0 , -1 ) ) for x in image_inputs]
return image_inputs
@is_pt_tf_cross_test
def __SCREAMING_SNAKE_CASE ( self : Any ) -> Any:
_UpperCamelCase : Union[str, Any] = self.get_image_processor()
_UpperCamelCase : int = SamProcessor(image_processor=__a )
_UpperCamelCase : Union[str, Any] = np.random.randint(0 , 2 , size=(1, 3, 5, 5) ).astype(np.floataa )
_UpperCamelCase : Union[str, Any] = [tf.convert_to_tensor(__a )]
_UpperCamelCase : Optional[Any] = [torch.tensor(__a )]
_UpperCamelCase : Tuple = [[1764, 2646]]
_UpperCamelCase : List[Any] = [[683, 1024]]
_UpperCamelCase : Tuple = processor.post_process_masks(
__a , __a , __a , return_tensors="tf" )
_UpperCamelCase : List[Any] = processor.post_process_masks(
__a , __a , __a , return_tensors="pt" )
self.assertTrue(np.all(tf_masks[0].numpy() == pt_masks[0].numpy() ) )
@is_pt_tf_cross_test
def __SCREAMING_SNAKE_CASE ( self : str ) -> List[Any]:
_UpperCamelCase : int = self.get_image_processor()
_UpperCamelCase : str = SamProcessor(image_processor=__a )
_UpperCamelCase : List[Any] = self.prepare_image_inputs()
_UpperCamelCase : List[str] = image_processor(__a , return_tensors="pt" )["pixel_values"].numpy()
_UpperCamelCase : Optional[Any] = processor(images=__a , return_tensors="pt" )["pixel_values"].numpy()
_UpperCamelCase : int = image_processor(__a , return_tensors="tf" )["pixel_values"].numpy()
_UpperCamelCase : int = processor(images=__a , return_tensors="tf" )["pixel_values"].numpy()
self.assertTrue(np.allclose(__a , __a ) )
self.assertTrue(np.allclose(__a , __a ) )
self.assertTrue(np.allclose(__a , __a ) )
| 51
|
"""simple docstring"""
from __future__ import annotations
import string
from itertools import cycle, product
from pathlib import Path
lowerCamelCase__ = (
string.ascii_letters + string.digits + string.punctuation + string.whitespace
)
lowerCamelCase__ = [ord(letter) for letter in string.ascii_lowercase]
lowerCamelCase__ = {ord(char) for char in VALID_CHARS}
lowerCamelCase__ = ["the", "be", "to", "of", "and", "in", "that", "have"]
def lowercase__ ( lowercase_ ,lowercase_ ) -> str | None:
"""simple docstring"""
_UpperCamelCase : str = ""
_UpperCamelCase : int
_UpperCamelCase : int
_UpperCamelCase : int
for keychar, cipherchar in zip(cycle(lowercase_ ) ,lowercase_ ):
_UpperCamelCase : Dict = cipherchar ^ keychar
if decodedchar not in VALID_INTS:
return None
decoded += chr(lowercase_ )
return decoded
def lowercase__ ( lowercase_ ) -> list[str]:
"""simple docstring"""
_UpperCamelCase : list[str] = []
for key in product(lowercase_ ,repeat=3 ):
_UpperCamelCase : int = try_key(lowercase_ ,lowercase_ )
if encoded is not None:
possibles.append(lowercase_ )
return possibles
def lowercase__ ( lowercase_ ,lowercase_ ) -> list[str]:
"""simple docstring"""
return [possible for possible in possibles if common_word in possible.lower()]
def lowercase__ ( lowercase_ = "p059_cipher.txt" ) -> int:
"""simple docstring"""
_UpperCamelCase : list[int]
_UpperCamelCase : list[str]
_UpperCamelCase : str
_UpperCamelCase : str
_UpperCamelCase : str = Path(lowercase_ ).parent.joinpath(lowercase_ ).read_text(encoding="utf-8" )
_UpperCamelCase : Optional[Any] = [int(lowercase_ ) for number in data.strip().split("," )]
_UpperCamelCase : List[str] = filter_valid_chars(lowercase_ )
for common_word in COMMON_WORDS:
_UpperCamelCase : Union[str, Any] = filter_common_word(lowercase_ ,lowercase_ )
if len(lowercase_ ) == 1:
break
_UpperCamelCase : Union[str, Any] = possibles[0]
return sum(ord(lowercase_ ) for char in decoded_text )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 51
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
if is_sentencepiece_available():
from ..ta.tokenization_ta import TaTokenizer
else:
from ...utils.dummy_sentencepiece_objects import TaTokenizer
lowerCamelCase__ = TaTokenizer
if is_tokenizers_available():
from ..ta.tokenization_ta_fast import TaTokenizerFast
else:
from ...utils.dummy_tokenizers_objects import TaTokenizerFast
lowerCamelCase__ = TaTokenizerFast
lowerCamelCase__ = {"configuration_mt5": ["MT5Config", "MT5OnnxConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"MT5EncoderModel",
"MT5ForConditionalGeneration",
"MT5ForQuestionAnswering",
"MT5Model",
"MT5PreTrainedModel",
"MT5Stack",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["TFMT5EncoderModel", "TFMT5ForConditionalGeneration", "TFMT5Model"]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["FlaxMT5EncoderModel", "FlaxMT5ForConditionalGeneration", "FlaxMT5Model"]
if TYPE_CHECKING:
from .configuration_mta import MTaConfig, MTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mta import (
MTaEncoderModel,
MTaForConditionalGeneration,
MTaForQuestionAnswering,
MTaModel,
MTaPreTrainedModel,
MTaStack,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mta import TFMTaEncoderModel, TFMTaForConditionalGeneration, TFMTaModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mta import FlaxMTaEncoderModel, FlaxMTaForConditionalGeneration, FlaxMTaModel
else:
import sys
lowerCamelCase__ = _LazyModule(
__name__,
globals()["__file__"],
_import_structure,
extra_objects={"MT5Tokenizer": MTaTokenizer, "MT5TokenizerFast": MTaTokenizerFast},
module_spec=__spec__,
)
| 51
|
"""simple docstring"""
def lowercase__ ( lowercase_ ,lowercase_ ) -> None:
"""simple docstring"""
_UpperCamelCase : List[Any] = len(lowercase_ )
print("The following activities are selected:" )
# The first activity is always selected
_UpperCamelCase : List[Any] = 0
print(lowercase_ ,end="," )
# Consider rest of the activities
for j in range(lowercase_ ):
# If this activity has start time greater than
# or equal to the finish time of previously
# selected activity, then select it
if start[j] >= finish[i]:
print(lowercase_ ,end="," )
_UpperCamelCase : Optional[Any] = j
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase__ = [1, 3, 0, 5, 8, 5]
lowerCamelCase__ = [2, 4, 6, 7, 9, 9]
print_max_activities(start, finish)
| 51
| 1
|
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_download, hf_hub_url
from PIL import Image
from transformers import DetaConfig, DetaForObjectDetection, DetaImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase__ = logging.get_logger(__name__)
def lowercase__ ( lowercase_ ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase : List[str] = SwinConfig(
embed_dim=192 ,depths=(2, 2, 18, 2) ,num_heads=(6, 12, 24, 48) ,window_size=12 ,out_features=["stage2", "stage3", "stage4"] ,)
_UpperCamelCase : List[Any] = DetaConfig(
backbone_config=lowercase_ ,num_queries=900 ,encoder_ffn_dim=2_048 ,decoder_ffn_dim=2_048 ,num_feature_levels=5 ,assign_first_stage=lowercase_ ,with_box_refine=lowercase_ ,two_stage=lowercase_ ,)
# set labels
_UpperCamelCase : int = "huggingface/label-files"
if "o365" in model_name:
_UpperCamelCase : Optional[Any] = 366
_UpperCamelCase : List[str] = "object365-id2label.json"
else:
_UpperCamelCase : List[str] = 91
_UpperCamelCase : List[str] = "coco-detection-id2label.json"
_UpperCamelCase : Union[str, Any] = num_labels
_UpperCamelCase : Optional[Any] = json.load(open(cached_download(hf_hub_url(lowercase_ ,lowercase_ ,repo_type="dataset" ) ) ,"r" ) )
_UpperCamelCase : Any = {int(lowercase_ ): v for k, v in idalabel.items()}
_UpperCamelCase : Tuple = idalabel
_UpperCamelCase : List[Any] = {v: k for k, v in idalabel.items()}
return config
def lowercase__ ( lowercase_ ) -> str:
"""simple docstring"""
_UpperCamelCase : List[str] = []
# stem
# fmt: off
rename_keys.append(("backbone.0.body.patch_embed.proj.weight", "model.backbone.model.embeddings.patch_embeddings.projection.weight") )
rename_keys.append(("backbone.0.body.patch_embed.proj.bias", "model.backbone.model.embeddings.patch_embeddings.projection.bias") )
rename_keys.append(("backbone.0.body.patch_embed.norm.weight", "model.backbone.model.embeddings.norm.weight") )
rename_keys.append(("backbone.0.body.patch_embed.norm.bias", "model.backbone.model.embeddings.norm.bias") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F'''backbone.0.body.layers.{i}.blocks.{j}.norm1.weight''', F'''model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight''') )
rename_keys.append((F'''backbone.0.body.layers.{i}.blocks.{j}.norm1.bias''', F'''model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias''') )
rename_keys.append((F'''backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_bias_table''', F'''model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table''') )
rename_keys.append((F'''backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_index''', F'''model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index''') )
rename_keys.append((F'''backbone.0.body.layers.{i}.blocks.{j}.attn.proj.weight''', F'''model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight''') )
rename_keys.append((F'''backbone.0.body.layers.{i}.blocks.{j}.attn.proj.bias''', F'''model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias''') )
rename_keys.append((F'''backbone.0.body.layers.{i}.blocks.{j}.norm2.weight''', F'''model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight''') )
rename_keys.append((F'''backbone.0.body.layers.{i}.blocks.{j}.norm2.bias''', F'''model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias''') )
rename_keys.append((F'''backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.weight''', F'''model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight''') )
rename_keys.append((F'''backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.bias''', F'''model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias''') )
rename_keys.append((F'''backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.weight''', F'''model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.weight''') )
rename_keys.append((F'''backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.bias''', F'''model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.bias''') )
if i < 3:
rename_keys.append((F'''backbone.0.body.layers.{i}.downsample.reduction.weight''', F'''model.backbone.model.encoder.layers.{i}.downsample.reduction.weight''') )
rename_keys.append((F'''backbone.0.body.layers.{i}.downsample.norm.weight''', F'''model.backbone.model.encoder.layers.{i}.downsample.norm.weight''') )
rename_keys.append((F'''backbone.0.body.layers.{i}.downsample.norm.bias''', F'''model.backbone.model.encoder.layers.{i}.downsample.norm.bias''') )
rename_keys.append(("backbone.0.body.norm1.weight", "model.backbone.model.hidden_states_norms.stage2.weight") )
rename_keys.append(("backbone.0.body.norm1.bias", "model.backbone.model.hidden_states_norms.stage2.bias") )
rename_keys.append(("backbone.0.body.norm2.weight", "model.backbone.model.hidden_states_norms.stage3.weight") )
rename_keys.append(("backbone.0.body.norm2.bias", "model.backbone.model.hidden_states_norms.stage3.bias") )
rename_keys.append(("backbone.0.body.norm3.weight", "model.backbone.model.hidden_states_norms.stage4.weight") )
rename_keys.append(("backbone.0.body.norm3.bias", "model.backbone.model.hidden_states_norms.stage4.bias") )
# transformer encoder
for i in range(config.encoder_layers ):
rename_keys.append((F'''transformer.encoder.layers.{i}.self_attn.sampling_offsets.weight''', F'''model.encoder.layers.{i}.self_attn.sampling_offsets.weight''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.self_attn.sampling_offsets.bias''', F'''model.encoder.layers.{i}.self_attn.sampling_offsets.bias''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.self_attn.attention_weights.weight''', F'''model.encoder.layers.{i}.self_attn.attention_weights.weight''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.self_attn.attention_weights.bias''', F'''model.encoder.layers.{i}.self_attn.attention_weights.bias''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.self_attn.value_proj.weight''', F'''model.encoder.layers.{i}.self_attn.value_proj.weight''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.self_attn.value_proj.bias''', F'''model.encoder.layers.{i}.self_attn.value_proj.bias''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.self_attn.output_proj.weight''', F'''model.encoder.layers.{i}.self_attn.output_proj.weight''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.self_attn.output_proj.bias''', F'''model.encoder.layers.{i}.self_attn.output_proj.bias''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.norm1.weight''', F'''model.encoder.layers.{i}.self_attn_layer_norm.weight''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.norm1.bias''', F'''model.encoder.layers.{i}.self_attn_layer_norm.bias''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.weight''', F'''model.encoder.layers.{i}.fc1.weight''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.bias''', F'''model.encoder.layers.{i}.fc1.bias''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.weight''', F'''model.encoder.layers.{i}.fc2.weight''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.bias''', F'''model.encoder.layers.{i}.fc2.bias''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.weight''', F'''model.encoder.layers.{i}.final_layer_norm.weight''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.bias''', F'''model.encoder.layers.{i}.final_layer_norm.bias''') )
# transformer decoder
for i in range(config.decoder_layers ):
rename_keys.append((F'''transformer.decoder.layers.{i}.cross_attn.sampling_offsets.weight''', F'''model.decoder.layers.{i}.encoder_attn.sampling_offsets.weight''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.cross_attn.sampling_offsets.bias''', F'''model.decoder.layers.{i}.encoder_attn.sampling_offsets.bias''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.cross_attn.attention_weights.weight''', F'''model.decoder.layers.{i}.encoder_attn.attention_weights.weight''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.cross_attn.attention_weights.bias''', F'''model.decoder.layers.{i}.encoder_attn.attention_weights.bias''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.cross_attn.value_proj.weight''', F'''model.decoder.layers.{i}.encoder_attn.value_proj.weight''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.cross_attn.value_proj.bias''', F'''model.decoder.layers.{i}.encoder_attn.value_proj.bias''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.cross_attn.output_proj.weight''', F'''model.decoder.layers.{i}.encoder_attn.output_proj.weight''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.cross_attn.output_proj.bias''', F'''model.decoder.layers.{i}.encoder_attn.output_proj.bias''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.norm1.weight''', F'''model.decoder.layers.{i}.encoder_attn_layer_norm.weight''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.norm1.bias''', F'''model.decoder.layers.{i}.encoder_attn_layer_norm.bias''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', F'''model.decoder.layers.{i}.self_attn.out_proj.weight''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', F'''model.decoder.layers.{i}.self_attn.out_proj.bias''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.norm2.weight''', F'''model.decoder.layers.{i}.self_attn_layer_norm.weight''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.norm2.bias''', F'''model.decoder.layers.{i}.self_attn_layer_norm.bias''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.weight''', F'''model.decoder.layers.{i}.fc1.weight''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.bias''', F'''model.decoder.layers.{i}.fc1.bias''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.weight''', F'''model.decoder.layers.{i}.fc2.weight''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.bias''', F'''model.decoder.layers.{i}.fc2.bias''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.weight''', F'''model.decoder.layers.{i}.final_layer_norm.weight''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.bias''', F'''model.decoder.layers.{i}.final_layer_norm.bias''') )
# fmt: on
return rename_keys
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase : List[Any] = dct.pop(lowercase_ )
_UpperCamelCase : Optional[Any] = val
def lowercase__ ( lowercase_ ,lowercase_ ) -> Tuple:
"""simple docstring"""
_UpperCamelCase : Dict = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
_UpperCamelCase : List[str] = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
_UpperCamelCase : Optional[Any] = state_dict.pop(F'''backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.weight''' )
_UpperCamelCase : Optional[int] = state_dict.pop(F'''backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
_UpperCamelCase : int = in_proj_weight[:dim, :]
_UpperCamelCase : Any = in_proj_bias[: dim]
_UpperCamelCase : List[str] = in_proj_weight[
dim : dim * 2, :
]
_UpperCamelCase : Union[str, Any] = in_proj_bias[
dim : dim * 2
]
_UpperCamelCase : List[Any] = in_proj_weight[
-dim :, :
]
_UpperCamelCase : Optional[int] = in_proj_bias[-dim :]
# fmt: on
def lowercase__ ( lowercase_ ,lowercase_ ) -> int:
"""simple docstring"""
_UpperCamelCase : Optional[Any] = config.d_model
for i in range(config.decoder_layers ):
# read in weights + bias of input projection layer of self-attention
_UpperCamelCase : str = state_dict.pop(F'''transformer.decoder.layers.{i}.self_attn.in_proj_weight''' )
_UpperCamelCase : Optional[int] = state_dict.pop(F'''transformer.decoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
_UpperCamelCase : List[str] = in_proj_weight[:hidden_size, :]
_UpperCamelCase : Dict = in_proj_bias[:hidden_size]
_UpperCamelCase : Dict = in_proj_weight[
hidden_size : hidden_size * 2, :
]
_UpperCamelCase : Any = in_proj_bias[hidden_size : hidden_size * 2]
_UpperCamelCase : int = in_proj_weight[-hidden_size:, :]
_UpperCamelCase : int = in_proj_bias[-hidden_size:]
def lowercase__ ( ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase : List[str] = "http://images.cocodataset.org/val2017/000000039769.jpg"
_UpperCamelCase : int = Image.open(requests.get(lowercase_ ,stream=lowercase_ ).raw )
return im
@torch.no_grad()
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase : List[Any] = get_deta_config(lowercase_ )
# load original state dict
if model_name == "deta-swin-large":
_UpperCamelCase : Optional[int] = hf_hub_download(repo_id="nielsr/deta-checkpoints" ,filename="adet_swin_ft.pth" )
elif model_name == "deta-swin-large-o365":
_UpperCamelCase : Any = hf_hub_download(repo_id="jozhang97/deta-swin-l-o365" ,filename="deta_swin_pt_o365.pth" )
else:
raise ValueError(F'''Model name {model_name} not supported''' )
_UpperCamelCase : Dict = torch.load(lowercase_ ,map_location="cpu" )["model"]
# original state dict
for name, param in state_dict.items():
print(lowercase_ ,param.shape )
# rename keys
_UpperCamelCase : Dict = create_rename_keys(lowercase_ )
for src, dest in rename_keys:
rename_key(lowercase_ ,lowercase_ ,lowercase_ )
read_in_swin_q_k_v(lowercase_ ,config.backbone_config )
read_in_decoder_q_k_v(lowercase_ ,lowercase_ )
# fix some prefixes
for key in state_dict.copy().keys():
if "transformer.decoder.class_embed" in key or "transformer.decoder.bbox_embed" in key:
_UpperCamelCase : Optional[int] = state_dict.pop(lowercase_ )
_UpperCamelCase : Tuple = val
if "input_proj" in key:
_UpperCamelCase : Union[str, Any] = state_dict.pop(lowercase_ )
_UpperCamelCase : Union[str, Any] = val
if "level_embed" in key or "pos_trans" in key or "pix_trans" in key or "enc_output" in key:
_UpperCamelCase : List[Any] = state_dict.pop(lowercase_ )
_UpperCamelCase : Optional[Any] = val
# finally, create HuggingFace model and load state dict
_UpperCamelCase : Union[str, Any] = DetaForObjectDetection(lowercase_ )
model.load_state_dict(lowercase_ )
model.eval()
_UpperCamelCase : List[str] = "cuda" if torch.cuda.is_available() else "cpu"
model.to(lowercase_ )
# load image processor
_UpperCamelCase : int = DetaImageProcessor(format="coco_detection" )
# verify our conversion on image
_UpperCamelCase : Optional[int] = prepare_img()
_UpperCamelCase : Dict = processor(images=lowercase_ ,return_tensors="pt" )
_UpperCamelCase : Optional[Any] = encoding["pixel_values"]
_UpperCamelCase : Any = model(pixel_values.to(lowercase_ ) )
# verify logits
print("Logits:" ,outputs.logits[0, :3, :3] )
print("Boxes:" ,outputs.pred_boxes[0, :3, :3] )
if model_name == "deta-swin-large":
_UpperCamelCase : Tuple = torch.tensor(
[[-7.6308, -2.8485, -5.3737], [-7.2037, -4.5505, -4.8027], [-7.2943, -4.2611, -4.6617]] )
_UpperCamelCase : List[Any] = torch.tensor([[0.4987, 0.4969, 0.9999], [0.2549, 0.5498, 0.4805], [0.5498, 0.2757, 0.0569]] )
elif model_name == "deta-swin-large-o365":
_UpperCamelCase : Dict = torch.tensor(
[[-8.0122, -3.5720, -4.9717], [-8.1547, -3.6886, -4.6389], [-7.6610, -3.6194, -5.0134]] )
_UpperCamelCase : int = torch.tensor([[0.2523, 0.5549, 0.4881], [0.7715, 0.4149, 0.4601], [0.5503, 0.2753, 0.0575]] )
assert torch.allclose(outputs.logits[0, :3, :3] ,expected_logits.to(lowercase_ ) ,atol=1e-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] ,expected_boxes.to(lowercase_ ) ,atol=1e-4 )
print("Everything ok!" )
if pytorch_dump_folder_path:
# Save model and processor
logger.info(F'''Saving PyTorch model and processor to {pytorch_dump_folder_path}...''' )
Path(lowercase_ ).mkdir(exist_ok=lowercase_ )
model.save_pretrained(lowercase_ )
processor.save_pretrained(lowercase_ )
# Push to hub
if push_to_hub:
print("Pushing model and processor to hub..." )
model.push_to_hub(F'''jozhang97/{model_name}''' )
processor.push_to_hub(F'''jozhang97/{model_name}''' )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
parser.add_argument(
"--model_name",
type=str,
default="deta-swin-large",
choices=["deta-swin-large", "deta-swin-large-o365"],
help="Name of the model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
help="Path to the folder to output PyTorch model.",
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
lowerCamelCase__ = parser.parse_args()
convert_deta_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 51
|
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from ..utils import BaseOutput, is_torch_version, randn_tensor
from .attention_processor import SpatialNorm
from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block
@dataclass
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :torch.FloatTensor
class __SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self : Dict , __a : Dict=3 , __a : Any=3 , __a : Union[str, Any]=("DownEncoderBlock2D",) , __a : Optional[int]=(64,) , __a : int=2 , __a : Tuple=32 , __a : int="silu" , __a : str=True , ) -> Dict:
super().__init__()
_UpperCamelCase : List[str] = layers_per_block
_UpperCamelCase : Dict = torch.nn.Convad(
__a , block_out_channels[0] , kernel_size=3 , stride=1 , padding=1 , )
_UpperCamelCase : int = None
_UpperCamelCase : Any = nn.ModuleList([] )
# down
_UpperCamelCase : List[str] = block_out_channels[0]
for i, down_block_type in enumerate(__a ):
_UpperCamelCase : Tuple = output_channel
_UpperCamelCase : int = block_out_channels[i]
_UpperCamelCase : int = i == len(__a ) - 1
_UpperCamelCase : Dict = get_down_block(
__a , num_layers=self.layers_per_block , in_channels=__a , out_channels=__a , add_downsample=not is_final_block , resnet_eps=1e-6 , downsample_padding=0 , resnet_act_fn=__a , resnet_groups=__a , attention_head_dim=__a , temb_channels=__a , )
self.down_blocks.append(__a )
# mid
_UpperCamelCase : Union[str, Any] = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=__a , output_scale_factor=1 , resnet_time_scale_shift="default" , attention_head_dim=block_out_channels[-1] , resnet_groups=__a , temb_channels=__a , )
# out
_UpperCamelCase : Any = nn.GroupNorm(num_channels=block_out_channels[-1] , num_groups=__a , eps=1e-6 )
_UpperCamelCase : Any = nn.SiLU()
_UpperCamelCase : Union[str, Any] = 2 * out_channels if double_z else out_channels
_UpperCamelCase : Tuple = nn.Convad(block_out_channels[-1] , __a , 3 , padding=1 )
_UpperCamelCase : Optional[int] = False
def __SCREAMING_SNAKE_CASE ( self : List[str] , __a : Dict ) -> List[str]:
_UpperCamelCase : int = x
_UpperCamelCase : Optional[int] = self.conv_in(__a )
if self.training and self.gradient_checkpointing:
def create_custom_forward(__a : Tuple ):
def custom_forward(*__a : Any ):
return module(*__a )
return custom_forward
# down
if is_torch_version(">=" , "1.11.0" ):
for down_block in self.down_blocks:
_UpperCamelCase : Optional[int] = torch.utils.checkpoint.checkpoint(
create_custom_forward(__a ) , __a , use_reentrant=__a )
# middle
_UpperCamelCase : Tuple = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , __a , use_reentrant=__a )
else:
for down_block in self.down_blocks:
_UpperCamelCase : Any = torch.utils.checkpoint.checkpoint(create_custom_forward(__a ) , __a )
# middle
_UpperCamelCase : int = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) , __a )
else:
# down
for down_block in self.down_blocks:
_UpperCamelCase : int = down_block(__a )
# middle
_UpperCamelCase : int = self.mid_block(__a )
# post-process
_UpperCamelCase : Any = self.conv_norm_out(__a )
_UpperCamelCase : Any = self.conv_act(__a )
_UpperCamelCase : Optional[Any] = self.conv_out(__a )
return sample
class __SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self : Dict , __a : int=3 , __a : Any=3 , __a : str=("UpDecoderBlock2D",) , __a : Optional[int]=(64,) , __a : int=2 , __a : Optional[int]=32 , __a : Tuple="silu" , __a : Union[str, Any]="group" , ) -> str:
super().__init__()
_UpperCamelCase : List[Any] = layers_per_block
_UpperCamelCase : Tuple = nn.Convad(
__a , block_out_channels[-1] , kernel_size=3 , stride=1 , padding=1 , )
_UpperCamelCase : List[str] = None
_UpperCamelCase : Dict = nn.ModuleList([] )
_UpperCamelCase : List[Any] = in_channels if norm_type == "spatial" else None
# mid
_UpperCamelCase : Optional[Any] = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=__a , output_scale_factor=1 , resnet_time_scale_shift="default" if norm_type == "group" else norm_type , attention_head_dim=block_out_channels[-1] , resnet_groups=__a , temb_channels=__a , )
# up
_UpperCamelCase : List[str] = list(reversed(__a ) )
_UpperCamelCase : int = reversed_block_out_channels[0]
for i, up_block_type in enumerate(__a ):
_UpperCamelCase : int = output_channel
_UpperCamelCase : Union[str, Any] = reversed_block_out_channels[i]
_UpperCamelCase : Optional[Any] = i == len(__a ) - 1
_UpperCamelCase : Union[str, Any] = get_up_block(
__a , num_layers=self.layers_per_block + 1 , in_channels=__a , out_channels=__a , prev_output_channel=__a , add_upsample=not is_final_block , resnet_eps=1e-6 , resnet_act_fn=__a , resnet_groups=__a , attention_head_dim=__a , temb_channels=__a , resnet_time_scale_shift=__a , )
self.up_blocks.append(__a )
_UpperCamelCase : Optional[Any] = output_channel
# out
if norm_type == "spatial":
_UpperCamelCase : Optional[int] = SpatialNorm(block_out_channels[0] , __a )
else:
_UpperCamelCase : Optional[int] = nn.GroupNorm(num_channels=block_out_channels[0] , num_groups=__a , eps=1e-6 )
_UpperCamelCase : str = nn.SiLU()
_UpperCamelCase : str = nn.Convad(block_out_channels[0] , __a , 3 , padding=1 )
_UpperCamelCase : Dict = False
def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __a : List[Any] , __a : Union[str, Any]=None ) -> Tuple:
_UpperCamelCase : List[str] = z
_UpperCamelCase : Dict = self.conv_in(__a )
_UpperCamelCase : Any = next(iter(self.up_blocks.parameters() ) ).dtype
if self.training and self.gradient_checkpointing:
def create_custom_forward(__a : Any ):
def custom_forward(*__a : Tuple ):
return module(*__a )
return custom_forward
if is_torch_version(">=" , "1.11.0" ):
# middle
_UpperCamelCase : str = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , __a , __a , use_reentrant=__a )
_UpperCamelCase : Optional[int] = sample.to(__a )
# up
for up_block in self.up_blocks:
_UpperCamelCase : List[Any] = torch.utils.checkpoint.checkpoint(
create_custom_forward(__a ) , __a , __a , use_reentrant=__a )
else:
# middle
_UpperCamelCase : Optional[int] = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , __a , __a )
_UpperCamelCase : Union[str, Any] = sample.to(__a )
# up
for up_block in self.up_blocks:
_UpperCamelCase : str = torch.utils.checkpoint.checkpoint(create_custom_forward(__a ) , __a , __a )
else:
# middle
_UpperCamelCase : str = self.mid_block(__a , __a )
_UpperCamelCase : int = sample.to(__a )
# up
for up_block in self.up_blocks:
_UpperCamelCase : Any = up_block(__a , __a )
# post-process
if latent_embeds is None:
_UpperCamelCase : List[str] = self.conv_norm_out(__a )
else:
_UpperCamelCase : Optional[int] = self.conv_norm_out(__a , __a )
_UpperCamelCase : Tuple = self.conv_act(__a )
_UpperCamelCase : List[Any] = self.conv_out(__a )
return sample
class __SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self : Dict , __a : Tuple , __a : List[str] , __a : List[str] , __a : str=None , __a : Optional[int]="random" , __a : Any=False , __a : Optional[Any]=True ) -> List[Any]:
super().__init__()
_UpperCamelCase : Tuple = n_e
_UpperCamelCase : Tuple = vq_embed_dim
_UpperCamelCase : Union[str, Any] = beta
_UpperCamelCase : str = legacy
_UpperCamelCase : Dict = nn.Embedding(self.n_e , self.vq_embed_dim )
self.embedding.weight.data.uniform_(-1.0 / self.n_e , 1.0 / self.n_e )
_UpperCamelCase : Any = remap
if self.remap is not None:
self.register_buffer("used" , torch.tensor(np.load(self.remap ) ) )
_UpperCamelCase : Dict = self.used.shape[0]
_UpperCamelCase : Optional[int] = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
_UpperCamelCase : Optional[int] = self.re_embed
_UpperCamelCase : Any = self.re_embed + 1
print(
F'''Remapping {self.n_e} indices to {self.re_embed} indices. '''
F'''Using {self.unknown_index} for unknown indices.''' )
else:
_UpperCamelCase : Union[str, Any] = n_e
_UpperCamelCase : List[str] = sane_index_shape
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __a : Optional[Any] ) -> Optional[int]:
_UpperCamelCase : str = inds.shape
assert len(__a ) > 1
_UpperCamelCase : Union[str, Any] = inds.reshape(ishape[0] , -1 )
_UpperCamelCase : Optional[Any] = self.used.to(__a )
_UpperCamelCase : List[str] = (inds[:, :, None] == used[None, None, ...]).long()
_UpperCamelCase : Optional[Any] = match.argmax(-1 )
_UpperCamelCase : Any = match.sum(2 ) < 1
if self.unknown_index == "random":
_UpperCamelCase : Optional[int] = torch.randint(0 , self.re_embed , size=new[unknown].shape ).to(device=new.device )
else:
_UpperCamelCase : Dict = self.unknown_index
return new.reshape(__a )
def __SCREAMING_SNAKE_CASE ( self : Dict , __a : Optional[int] ) -> Optional[int]:
_UpperCamelCase : int = inds.shape
assert len(__a ) > 1
_UpperCamelCase : List[Any] = inds.reshape(ishape[0] , -1 )
_UpperCamelCase : Optional[int] = self.used.to(__a )
if self.re_embed > self.used.shape[0]: # extra token
_UpperCamelCase : int = 0 # simply set to zero
_UpperCamelCase : Union[str, Any] = torch.gather(used[None, :][inds.shape[0] * [0], :] , 1 , __a )
return back.reshape(__a )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __a : str ) -> Optional[int]:
# reshape z -> (batch, height, width, channel) and flatten
_UpperCamelCase : List[str] = z.permute(0 , 2 , 3 , 1 ).contiguous()
_UpperCamelCase : int = z.view(-1 , self.vq_embed_dim )
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
_UpperCamelCase : Optional[int] = torch.argmin(torch.cdist(__a , self.embedding.weight ) , dim=1 )
_UpperCamelCase : int = self.embedding(__a ).view(z.shape )
_UpperCamelCase : str = None
_UpperCamelCase : Any = None
# compute loss for embedding
if not self.legacy:
_UpperCamelCase : List[str] = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 )
else:
_UpperCamelCase : str = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 )
# preserve gradients
_UpperCamelCase : List[str] = z + (z_q - z).detach()
# reshape back to match original input shape
_UpperCamelCase : Optional[Any] = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
if self.remap is not None:
_UpperCamelCase : Tuple = min_encoding_indices.reshape(z.shape[0] , -1 ) # add batch axis
_UpperCamelCase : Dict = self.remap_to_used(__a )
_UpperCamelCase : List[str] = min_encoding_indices.reshape(-1 , 1 ) # flatten
if self.sane_index_shape:
_UpperCamelCase : str = min_encoding_indices.reshape(z_q.shape[0] , z_q.shape[2] , z_q.shape[3] )
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def __SCREAMING_SNAKE_CASE ( self : List[str] , __a : List[str] , __a : str ) -> Any:
# shape specifying (batch, height, width, channel)
if self.remap is not None:
_UpperCamelCase : str = indices.reshape(shape[0] , -1 ) # add batch axis
_UpperCamelCase : str = self.unmap_to_all(__a )
_UpperCamelCase : int = indices.reshape(-1 ) # flatten again
# get quantized latent vectors
_UpperCamelCase : Optional[int] = self.embedding(__a )
if shape is not None:
_UpperCamelCase : Tuple = z_q.view(__a )
# reshape back to match original input shape
_UpperCamelCase : Tuple = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
return z_q
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
def __init__( self : Optional[int] , __a : List[str] , __a : Optional[Any]=False ) -> int:
_UpperCamelCase : Dict = parameters
_UpperCamelCase, _UpperCamelCase : str = torch.chunk(__a , 2 , dim=1 )
_UpperCamelCase : Tuple = torch.clamp(self.logvar , -30.0 , 20.0 )
_UpperCamelCase : Union[str, Any] = deterministic
_UpperCamelCase : Dict = torch.exp(0.5 * self.logvar )
_UpperCamelCase : Any = torch.exp(self.logvar )
if self.deterministic:
_UpperCamelCase : List[Any] = torch.zeros_like(
self.mean , device=self.parameters.device , dtype=self.parameters.dtype )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __a : Optional[torch.Generator] = None ) -> torch.FloatTensor:
# make sure sample is on the same device as the parameters and has same dtype
_UpperCamelCase : List[Any] = randn_tensor(
self.mean.shape , generator=__a , device=self.parameters.device , dtype=self.parameters.dtype )
_UpperCamelCase : List[Any] = self.mean + self.std * sample
return x
def __SCREAMING_SNAKE_CASE ( self : Any , __a : List[str]=None ) -> List[Any]:
if self.deterministic:
return torch.Tensor([0.0] )
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean , 2 ) + self.var - 1.0 - self.logvar , dim=[1, 2, 3] )
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean , 2 ) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar , dim=[1, 2, 3] , )
def __SCREAMING_SNAKE_CASE ( self : str , __a : Tuple , __a : List[str]=[1, 2, 3] ) -> int:
if self.deterministic:
return torch.Tensor([0.0] )
_UpperCamelCase : List[str] = np.log(2.0 * np.pi )
return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean , 2 ) / self.var , dim=__a )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[int]:
return self.mean
| 51
| 1
|
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
from ...utils.constants import SAGEMAKER_PARALLEL_EC2_INSTANCES, TORCH_DYNAMO_MODES
from ...utils.dataclasses import ComputeEnvironment, SageMakerDistributedType
from ...utils.imports import is_botoa_available
from .config_args import SageMakerConfig
from .config_utils import (
DYNAMO_BACKENDS,
_ask_field,
_ask_options,
_convert_dynamo_backend,
_convert_mixed_precision,
_convert_sagemaker_distributed_mode,
_convert_yes_no_to_bool,
)
if is_botoa_available():
import botoa # noqa: F401
def lowercase__ ( lowercase_ ) -> int:
"""simple docstring"""
_UpperCamelCase : List[str] = botoa.client("iam" )
_UpperCamelCase : Tuple = {
"Version": "2012-10-17",
"Statement": [
{"Effect": "Allow", "Principal": {"Service": "sagemaker.amazonaws.com"}, "Action": "sts:AssumeRole"}
],
}
try:
# create the role, associated with the chosen trust policy
iam_client.create_role(
RoleName=lowercase_ ,AssumeRolePolicyDocument=json.dumps(lowercase_ ,indent=2 ) )
_UpperCamelCase : List[str] = {
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": [
"sagemaker:*",
"ecr:GetDownloadUrlForLayer",
"ecr:BatchGetImage",
"ecr:BatchCheckLayerAvailability",
"ecr:GetAuthorizationToken",
"cloudwatch:PutMetricData",
"cloudwatch:GetMetricData",
"cloudwatch:GetMetricStatistics",
"cloudwatch:ListMetrics",
"logs:CreateLogGroup",
"logs:CreateLogStream",
"logs:DescribeLogStreams",
"logs:PutLogEvents",
"logs:GetLogEvents",
"s3:CreateBucket",
"s3:ListBucket",
"s3:GetBucketLocation",
"s3:GetObject",
"s3:PutObject",
],
"Resource": "*",
}
],
}
# attach policy to role
iam_client.put_role_policy(
RoleName=lowercase_ ,PolicyName=F'''{role_name}_policy_permission''' ,PolicyDocument=json.dumps(lowercase_ ,indent=2 ) ,)
except iam_client.exceptions.EntityAlreadyExistsException:
print(F'''role {role_name} already exists. Using existing one''' )
def lowercase__ ( lowercase_ ) -> List[str]:
"""simple docstring"""
_UpperCamelCase : Dict = botoa.client("iam" )
return iam_client.get_role(RoleName=lowercase_ )["Role"]["Arn"]
def lowercase__ ( ) -> int:
"""simple docstring"""
_UpperCamelCase : str = _ask_options(
"How do you want to authorize?" ,["AWS Profile", "Credentials (AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY) "] ,lowercase_ ,)
_UpperCamelCase : List[Any] = None
if credentials_configuration == 0:
_UpperCamelCase : int = _ask_field("Enter your AWS Profile name: [default] " ,default="default" )
_UpperCamelCase : Optional[int] = aws_profile
else:
print(
"Note you will need to provide AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY when you launch you training script with,"
"`accelerate launch --aws_access_key_id XXX --aws_secret_access_key YYY`" )
_UpperCamelCase : Union[str, Any] = _ask_field("AWS Access Key ID: " )
_UpperCamelCase : Any = aws_access_key_id
_UpperCamelCase : Any = _ask_field("AWS Secret Access Key: " )
_UpperCamelCase : Tuple = aws_secret_access_key
_UpperCamelCase : Union[str, Any] = _ask_field("Enter your AWS Region: [us-east-1]" ,default="us-east-1" )
_UpperCamelCase : str = aws_region
_UpperCamelCase : Optional[Any] = _ask_options(
"Do you already have an IAM Role for executing Amazon SageMaker Training Jobs?" ,["Provide IAM Role name", "Create new IAM role using credentials"] ,lowercase_ ,)
if role_management == 0:
_UpperCamelCase : List[Any] = _ask_field("Enter your IAM role name: " )
else:
_UpperCamelCase : str = "accelerate_sagemaker_execution_role"
print(F'''Accelerate will create an iam role "{iam_role_name}" using the provided credentials''' )
_create_iam_role_for_sagemaker(lowercase_ )
_UpperCamelCase : Union[str, Any] = _ask_field(
"Do you want to use custom Docker image? [yes/NO]: " ,_convert_yes_no_to_bool ,default=lowercase_ ,error_message="Please enter yes or no." ,)
_UpperCamelCase : Optional[int] = None
if is_custom_docker_image:
_UpperCamelCase : Optional[int] = _ask_field("Enter your Docker image: " ,lambda lowercase_ : str(lowercase_ ).lower() )
_UpperCamelCase : List[str] = _ask_field(
"Do you want to provide SageMaker input channels with data locations? [yes/NO]: " ,_convert_yes_no_to_bool ,default=lowercase_ ,error_message="Please enter yes or no." ,)
_UpperCamelCase : Optional[int] = None
if is_sagemaker_inputs_enabled:
_UpperCamelCase : List[str] = _ask_field(
"Enter the path to the SageMaker inputs TSV file with columns (channel_name, data_location): " ,lambda lowercase_ : str(lowercase_ ).lower() ,)
_UpperCamelCase : List[Any] = _ask_field(
"Do you want to enable SageMaker metrics? [yes/NO]: " ,_convert_yes_no_to_bool ,default=lowercase_ ,error_message="Please enter yes or no." ,)
_UpperCamelCase : Union[str, Any] = None
if is_sagemaker_metrics_enabled:
_UpperCamelCase : Tuple = _ask_field(
"Enter the path to the SageMaker metrics TSV file with columns (metric_name, metric_regex): " ,lambda lowercase_ : str(lowercase_ ).lower() ,)
_UpperCamelCase : str = _ask_options(
"What is the distributed mode?" ,["No distributed training", "Data parallelism"] ,_convert_sagemaker_distributed_mode ,)
_UpperCamelCase : str = {}
_UpperCamelCase : List[str] = _ask_field(
"Do you wish to optimize your script with torch dynamo?[yes/NO]:" ,_convert_yes_no_to_bool ,default=lowercase_ ,error_message="Please enter yes or no." ,)
if use_dynamo:
_UpperCamelCase : Optional[Any] = "dynamo_"
_UpperCamelCase : Tuple = _ask_options(
"Which dynamo backend would you like to use?" ,[x.lower() for x in DYNAMO_BACKENDS] ,_convert_dynamo_backend ,default=2 ,)
_UpperCamelCase : int = _ask_field(
"Do you want to customize the defaults sent to torch.compile? [yes/NO]: " ,_convert_yes_no_to_bool ,default=lowercase_ ,error_message="Please enter yes or no." ,)
if use_custom_options:
_UpperCamelCase : int = _ask_options(
"Which mode do you want to use?" ,lowercase_ ,lambda lowercase_ : TORCH_DYNAMO_MODES[int(lowercase_ )] ,default="default" ,)
_UpperCamelCase : Optional[Any] = _ask_field(
"Do you want the fullgraph mode or it is ok to break model into several subgraphs? [yes/NO]: " ,_convert_yes_no_to_bool ,default=lowercase_ ,error_message="Please enter yes or no." ,)
_UpperCamelCase : Union[str, Any] = _ask_field(
"Do you want to enable dynamic shape tracing? [yes/NO]: " ,_convert_yes_no_to_bool ,default=lowercase_ ,error_message="Please enter yes or no." ,)
_UpperCamelCase : Any = "Which EC2 instance type you want to use for your training?"
if distributed_type != SageMakerDistributedType.NO:
_UpperCamelCase : Union[str, Any] = _ask_options(
lowercase_ ,lowercase_ ,lambda lowercase_ : SAGEMAKER_PARALLEL_EC2_INSTANCES[int(lowercase_ )] )
else:
eca_instance_query += "? [ml.p3.2xlarge]:"
_UpperCamelCase : Any = _ask_field(lowercase_ ,lambda lowercase_ : str(lowercase_ ).lower() ,default="ml.p3.2xlarge" )
_UpperCamelCase : Optional[int] = 1
if distributed_type in (SageMakerDistributedType.DATA_PARALLEL, SageMakerDistributedType.MODEL_PARALLEL):
_UpperCamelCase : Any = _ask_field(
"How many machines do you want use? [1]: " ,lowercase_ ,default=1 ,)
_UpperCamelCase : List[Any] = _ask_options(
"Do you wish to use FP16 or BF16 (mixed precision)?" ,["no", "fp16", "bf16", "fp8"] ,_convert_mixed_precision ,)
if use_dynamo and mixed_precision == "no":
print(
"Torch dynamo used without mixed precision requires TF32 to be efficient. Accelerate will enable it by default when launching your scripts." )
return SageMakerConfig(
image_uri=lowercase_ ,compute_environment=ComputeEnvironment.AMAZON_SAGEMAKER ,distributed_type=lowercase_ ,use_cpu=lowercase_ ,dynamo_config=lowercase_ ,eca_instance_type=lowercase_ ,profile=lowercase_ ,region=lowercase_ ,iam_role_name=lowercase_ ,mixed_precision=lowercase_ ,num_machines=lowercase_ ,sagemaker_inputs_file=lowercase_ ,sagemaker_metrics_file=lowercase_ ,)
| 51
|
"""simple docstring"""
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=_UpperCamelCase )
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :str = field(default="summarization" , metadata={"include_in_asdict_even_if_is_default": True} )
SCREAMING_SNAKE_CASE__ :ClassVar[Features] = Features({"text": Value("string" )} )
SCREAMING_SNAKE_CASE__ :ClassVar[Features] = Features({"summary": Value("string" )} )
SCREAMING_SNAKE_CASE__ :str = "text"
SCREAMING_SNAKE_CASE__ :str = "summary"
@property
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Dict[str, str]:
return {self.text_column: "text", self.summary_column: "summary"}
| 51
| 1
|
"""simple docstring"""
from .data_collator import (
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForSeqaSeq,
DataCollatorForSOP,
DataCollatorForTokenClassification,
DataCollatorForWholeWordMask,
DataCollatorWithPadding,
DefaultDataCollator,
default_data_collator,
)
from .metrics import glue_compute_metrics, xnli_compute_metrics
from .processors import (
DataProcessor,
InputExample,
InputFeatures,
SingleSentenceClassificationProcessor,
SquadExample,
SquadFeatures,
SquadVaProcessor,
SquadVaProcessor,
glue_convert_examples_to_features,
glue_output_modes,
glue_processors,
glue_tasks_num_labels,
squad_convert_examples_to_features,
xnli_output_modes,
xnli_processors,
xnli_tasks_num_labels,
)
| 51
|
"""simple docstring"""
def lowercase__ ( lowercase_ ) -> set:
"""simple docstring"""
_UpperCamelCase : Union[str, Any] = set()
# edges = list of graph's edges
_UpperCamelCase : Union[str, Any] = get_edges(lowercase_ )
# While there are still elements in edges list, take an arbitrary edge
# (from_node, to_node) and add his extremity to chosen_vertices and then
# remove all arcs adjacent to the from_node and to_node
while edges:
_UpperCamelCase, _UpperCamelCase : str = edges.pop()
chosen_vertices.add(lowercase_ )
chosen_vertices.add(lowercase_ )
for edge in edges.copy():
if from_node in edge or to_node in edge:
edges.discard(lowercase_ )
return chosen_vertices
def lowercase__ ( lowercase_ ) -> set:
"""simple docstring"""
_UpperCamelCase : List[str] = set()
for from_node, to_nodes in graph.items():
for to_node in to_nodes:
edges.add((from_node, to_node) )
return edges
if __name__ == "__main__":
import doctest
doctest.testmod()
# graph = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
# print(f"Matching vertex cover:\n{matching_min_vertex_cover(graph)}")
| 51
| 1
|
"""simple docstring"""
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
lowerCamelCase__ = 200
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
lowerCamelCase__ = 50
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
lowerCamelCase__ = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 1000))
def lowercase__ ( lowercase_ ,lowercase_ ) -> tuple[str, float]:
"""simple docstring"""
_UpperCamelCase : str = len([g for position, g in enumerate(lowercase_ ) if g == main_target[position]] )
return (item, float(lowercase_ ))
def lowercase__ ( lowercase_ ,lowercase_ ) -> tuple[str, str]:
"""simple docstring"""
_UpperCamelCase : Tuple = random.randint(0 ,len(lowercase_ ) - 1 )
_UpperCamelCase : Dict = parent_a[:random_slice] + parent_a[random_slice:]
_UpperCamelCase : Tuple = parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def lowercase__ ( lowercase_ ,lowercase_ ) -> str:
"""simple docstring"""
_UpperCamelCase : int = list(lowercase_ )
if random.uniform(0 ,1 ) < MUTATION_PROBABILITY:
_UpperCamelCase : int = random.choice(lowercase_ )
return "".join(lowercase_ )
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,) -> list[str]:
"""simple docstring"""
_UpperCamelCase : Optional[Any] = []
# Generate more children proportionally to the fitness score.
_UpperCamelCase : List[str] = int(parent_a[1] * 100 ) + 1
_UpperCamelCase : Union[str, Any] = 10 if child_n >= 10 else child_n
for _ in range(lowercase_ ):
_UpperCamelCase : Dict = population_score[random.randint(0 ,lowercase_ )][0]
_UpperCamelCase, _UpperCamelCase : Dict = crossover(parent_a[0] ,lowercase_ )
# Append new string to the population list.
pop.append(mutate(lowercase_ ,lowercase_ ) )
pop.append(mutate(lowercase_ ,lowercase_ ) )
return pop
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ = True ) -> tuple[int, int, str]:
"""simple docstring"""
if N_POPULATION < N_SELECTED:
_UpperCamelCase : List[str] = F'''{N_POPULATION} must be bigger than {N_SELECTED}'''
raise ValueError(lowercase_ )
# Verify that the target contains no genes besides the ones inside genes variable.
_UpperCamelCase : int = sorted({c for c in target if c not in genes} )
if not_in_genes_list:
_UpperCamelCase : int = F'''{not_in_genes_list} is not in genes list, evolution cannot converge'''
raise ValueError(lowercase_ )
# Generate random starting population.
_UpperCamelCase : Union[str, Any] = []
for _ in range(lowercase_ ):
population.append("".join([random.choice(lowercase_ ) for i in range(len(lowercase_ ) )] ) )
# Just some logs to know what the algorithms is doing.
_UpperCamelCase, _UpperCamelCase : str = 0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(lowercase_ )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
_UpperCamelCase : int = [evaluate(lowercase_ ,lowercase_ ) for item in population]
# Check if there is a matching evolution.
_UpperCamelCase : Optional[Any] = sorted(lowercase_ ,key=lambda lowercase_ : x[1] ,reverse=lowercase_ )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
F'''\nGeneration: {generation}'''
F'''\nTotal Population:{total_population}'''
F'''\nBest score: {population_score[0][1]}'''
F'''\nBest string: {population_score[0][0]}''' )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
_UpperCamelCase : str = population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(lowercase_ )
# Normalize population score to be between 0 and 1.
_UpperCamelCase : str = [
(item, score / len(lowercase_ )) for item, score in population_score
]
# This is selection
for i in range(lowercase_ ):
population.extend(select(population_score[int(lowercase_ )] ,lowercase_ ,lowercase_ ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(lowercase_ ) > N_POPULATION:
break
if __name__ == "__main__":
lowerCamelCase__ = (
"This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!"
)
lowerCamelCase__ = list(
" ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm"
"nopqrstuvwxyz.,;!?+-*#@^'èéòà€ù=)(&%$£/\\"
)
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = basic(target_str, genes_list)
print(
f"""\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}"""
)
| 51
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
lowerCamelCase__ = {
"configuration_owlvit": [
"OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"OwlViTConfig",
"OwlViTOnnxConfig",
"OwlViTTextConfig",
"OwlViTVisionConfig",
],
"processing_owlvit": ["OwlViTProcessor"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["OwlViTFeatureExtractor"]
lowerCamelCase__ = ["OwlViTImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"OwlViTModel",
"OwlViTPreTrainedModel",
"OwlViTTextModel",
"OwlViTVisionModel",
"OwlViTForObjectDetection",
]
if TYPE_CHECKING:
from .configuration_owlvit import (
OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OwlViTConfig,
OwlViTOnnxConfig,
OwlViTTextConfig,
OwlViTVisionConfig,
)
from .processing_owlvit import OwlViTProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_owlvit import OwlViTFeatureExtractor
from .image_processing_owlvit import OwlViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_owlvit import (
OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
OwlViTForObjectDetection,
OwlViTModel,
OwlViTPreTrainedModel,
OwlViTTextModel,
OwlViTVisionModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 51
| 1
|
"""simple docstring"""
import inspect
import os
import sys
import unittest
import accelerate
from accelerate.test_utils import execute_subprocess_async, require_tpu
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> Dict:
_UpperCamelCase : int = inspect.getfile(accelerate.test_utils )
_UpperCamelCase : List[str] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["scripts", "test_script.py"] )
_UpperCamelCase : str = os.path.sep.join(inspect.getfile(self.__class__ ).split(os.path.sep )[:-1] )
@require_tpu
def __SCREAMING_SNAKE_CASE ( self : Any ) -> Any:
_UpperCamelCase : int = F'''
{self.test_dir}/xla_spawn.py
--num_cores 8
{self.test_file_path}
'''.split()
_UpperCamelCase : Optional[int] = [sys.executable] + distributed_args
execute_subprocess_async(__a , env=os.environ.copy() )
| 51
|
"""simple docstring"""
from collections import defaultdict
from math import ceil, sqrt
def lowercase__ ( lowercase_ = 1_000_000 ,lowercase_ = 10 ) -> int:
"""simple docstring"""
_UpperCamelCase : defaultdict = defaultdict(lowercase_ )
for outer_width in range(3 ,(t_limit // 4) + 2 ):
if outer_width * outer_width > t_limit:
_UpperCamelCase : Any = max(
ceil(sqrt(outer_width * outer_width - t_limit ) ) ,1 )
else:
_UpperCamelCase : str = 1
hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2
for hole_width in range(lowercase_ ,outer_width - 1 ,2 ):
count[outer_width * outer_width - hole_width * hole_width] += 1
return sum(1 for n in count.values() if 1 <= n <= 10 )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 51
| 1
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"shi-labs/nat-mini-in1k-224": "https://huggingface.co/shi-labs/nat-mini-in1k-224/resolve/main/config.json",
# See all Nat models at https://huggingface.co/models?filter=nat
}
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Tuple = "nat"
SCREAMING_SNAKE_CASE__ :Tuple = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self : Dict , __a : str=4 , __a : Optional[int]=3 , __a : Optional[int]=64 , __a : Any=[3, 4, 6, 5] , __a : Optional[int]=[2, 4, 8, 16] , __a : List[Any]=7 , __a : Optional[Any]=3.0 , __a : Any=True , __a : int=0.0 , __a : Optional[Any]=0.0 , __a : Any=0.1 , __a : Dict="gelu" , __a : List[str]=0.02 , __a : int=1e-5 , __a : int=0.0 , __a : str=None , __a : List[Any]=None , **__a : Dict , ) -> List[str]:
super().__init__(**__a )
_UpperCamelCase : str = patch_size
_UpperCamelCase : Dict = num_channels
_UpperCamelCase : List[str] = embed_dim
_UpperCamelCase : int = depths
_UpperCamelCase : Any = len(__a )
_UpperCamelCase : str = num_heads
_UpperCamelCase : str = kernel_size
_UpperCamelCase : Any = mlp_ratio
_UpperCamelCase : str = qkv_bias
_UpperCamelCase : Union[str, Any] = hidden_dropout_prob
_UpperCamelCase : Tuple = attention_probs_dropout_prob
_UpperCamelCase : Optional[int] = drop_path_rate
_UpperCamelCase : Union[str, Any] = hidden_act
_UpperCamelCase : Union[str, Any] = layer_norm_eps
_UpperCamelCase : Tuple = initializer_range
# we set the hidden_size attribute in order to make Nat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
_UpperCamelCase : Tuple = int(embed_dim * 2 ** (len(__a ) - 1) )
_UpperCamelCase : Union[str, Any] = layer_scale_init_value
_UpperCamelCase : List[Any] = ["stem"] + [F'''stage{idx}''' for idx in range(1 , len(__a ) + 1 )]
_UpperCamelCase, _UpperCamelCase : List[Any] = get_aligned_output_features_output_indices(
out_features=__a , out_indices=__a , stage_names=self.stage_names )
| 51
|
"""simple docstring"""
from collections.abc import Iterator, MutableMapping
from dataclasses import dataclass
from typing import Generic, TypeVar
lowerCamelCase__ = TypeVar("KEY")
lowerCamelCase__ = TypeVar("VAL")
@dataclass(frozen=_UpperCamelCase , slots=_UpperCamelCase )
class __SCREAMING_SNAKE_CASE ( Generic[KEY, VAL] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :KEY
SCREAMING_SNAKE_CASE__ :VAL
class __SCREAMING_SNAKE_CASE ( _Item ):
'''simple docstring'''
def __init__( self : List[str] ) -> None:
super().__init__(__a , __a )
def __bool__( self : Dict ) -> bool:
return False
lowerCamelCase__ = _DeletedItem()
class __SCREAMING_SNAKE_CASE ( MutableMapping[KEY, VAL] ):
'''simple docstring'''
def __init__( self : int , __a : int = 8 , __a : float = 0.75 ) -> None:
_UpperCamelCase : str = initial_block_size
_UpperCamelCase : list[_Item | None] = [None] * initial_block_size
assert 0.0 < capacity_factor < 1.0
_UpperCamelCase : List[str] = capacity_factor
_UpperCamelCase : Dict = 0
def __SCREAMING_SNAKE_CASE ( self : int , __a : KEY ) -> int:
return hash(__a ) % len(self._buckets )
def __SCREAMING_SNAKE_CASE ( self : List[Any] , __a : int ) -> int:
return (ind + 1) % len(self._buckets )
def __SCREAMING_SNAKE_CASE ( self : Tuple , __a : int , __a : KEY , __a : VAL ) -> bool:
_UpperCamelCase : List[Any] = self._buckets[ind]
if not stored:
_UpperCamelCase : Tuple = _Item(__a , __a )
self._len += 1
return True
elif stored.key == key:
_UpperCamelCase : Union[str, Any] = _Item(__a , __a )
return True
else:
return False
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> bool:
_UpperCamelCase : Any = len(self._buckets ) * self._capacity_factor
return len(self ) >= int(__a )
def __SCREAMING_SNAKE_CASE ( self : str ) -> bool:
if len(self._buckets ) <= self._initial_block_size:
return False
_UpperCamelCase : List[str] = len(self._buckets ) * self._capacity_factor / 2
return len(self ) < limit
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __a : int ) -> None:
_UpperCamelCase : Any = self._buckets
_UpperCamelCase : List[Any] = [None] * new_size
_UpperCamelCase : List[str] = 0
for item in old_buckets:
if item:
self._add_item(item.key , item.val )
def __SCREAMING_SNAKE_CASE ( self : int ) -> None:
self._resize(len(self._buckets ) * 2 )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> None:
self._resize(len(self._buckets ) // 2 )
def __SCREAMING_SNAKE_CASE ( self : List[Any] , __a : KEY ) -> Iterator[int]:
_UpperCamelCase : str = self._get_bucket_index(__a )
for _ in range(len(self._buckets ) ):
yield ind
_UpperCamelCase : Tuple = self._get_next_ind(__a )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __a : KEY , __a : VAL ) -> None:
for ind in self._iterate_buckets(__a ):
if self._try_set(__a , __a , __a ):
break
def __setitem__( self : int , __a : KEY , __a : VAL ) -> None:
if self._is_full():
self._size_up()
self._add_item(__a , __a )
def __delitem__( self : str , __a : KEY ) -> None:
for ind in self._iterate_buckets(__a ):
_UpperCamelCase : Tuple = self._buckets[ind]
if item is None:
raise KeyError(__a )
if item is _deleted:
continue
if item.key == key:
_UpperCamelCase : List[Any] = _deleted
self._len -= 1
break
if self._is_sparse():
self._size_down()
def __getitem__( self : str , __a : KEY ) -> VAL:
for ind in self._iterate_buckets(__a ):
_UpperCamelCase : Tuple = self._buckets[ind]
if item is None:
break
if item is _deleted:
continue
if item.key == key:
return item.val
raise KeyError(__a )
def __len__( self : List[Any] ) -> int:
return self._len
def __iter__( self : List[str] ) -> Iterator[KEY]:
yield from (item.key for item in self._buckets if item)
def __repr__( self : List[str] ) -> str:
_UpperCamelCase : Optional[int] = " ,".join(
F'''{item.key}: {item.val}''' for item in self._buckets if item )
return F'''HashMap({val_string})'''
| 51
| 1
|
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from ..utils import BaseOutput, is_torch_version, randn_tensor
from .attention_processor import SpatialNorm
from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block
@dataclass
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :torch.FloatTensor
class __SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self : Dict , __a : Dict=3 , __a : Any=3 , __a : Union[str, Any]=("DownEncoderBlock2D",) , __a : Optional[int]=(64,) , __a : int=2 , __a : Tuple=32 , __a : int="silu" , __a : str=True , ) -> Dict:
super().__init__()
_UpperCamelCase : List[str] = layers_per_block
_UpperCamelCase : Dict = torch.nn.Convad(
__a , block_out_channels[0] , kernel_size=3 , stride=1 , padding=1 , )
_UpperCamelCase : int = None
_UpperCamelCase : Any = nn.ModuleList([] )
# down
_UpperCamelCase : List[str] = block_out_channels[0]
for i, down_block_type in enumerate(__a ):
_UpperCamelCase : Tuple = output_channel
_UpperCamelCase : int = block_out_channels[i]
_UpperCamelCase : int = i == len(__a ) - 1
_UpperCamelCase : Dict = get_down_block(
__a , num_layers=self.layers_per_block , in_channels=__a , out_channels=__a , add_downsample=not is_final_block , resnet_eps=1e-6 , downsample_padding=0 , resnet_act_fn=__a , resnet_groups=__a , attention_head_dim=__a , temb_channels=__a , )
self.down_blocks.append(__a )
# mid
_UpperCamelCase : Union[str, Any] = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=__a , output_scale_factor=1 , resnet_time_scale_shift="default" , attention_head_dim=block_out_channels[-1] , resnet_groups=__a , temb_channels=__a , )
# out
_UpperCamelCase : Any = nn.GroupNorm(num_channels=block_out_channels[-1] , num_groups=__a , eps=1e-6 )
_UpperCamelCase : Any = nn.SiLU()
_UpperCamelCase : Union[str, Any] = 2 * out_channels if double_z else out_channels
_UpperCamelCase : Tuple = nn.Convad(block_out_channels[-1] , __a , 3 , padding=1 )
_UpperCamelCase : Optional[int] = False
def __SCREAMING_SNAKE_CASE ( self : List[str] , __a : Dict ) -> List[str]:
_UpperCamelCase : int = x
_UpperCamelCase : Optional[int] = self.conv_in(__a )
if self.training and self.gradient_checkpointing:
def create_custom_forward(__a : Tuple ):
def custom_forward(*__a : Any ):
return module(*__a )
return custom_forward
# down
if is_torch_version(">=" , "1.11.0" ):
for down_block in self.down_blocks:
_UpperCamelCase : Optional[int] = torch.utils.checkpoint.checkpoint(
create_custom_forward(__a ) , __a , use_reentrant=__a )
# middle
_UpperCamelCase : Tuple = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , __a , use_reentrant=__a )
else:
for down_block in self.down_blocks:
_UpperCamelCase : Any = torch.utils.checkpoint.checkpoint(create_custom_forward(__a ) , __a )
# middle
_UpperCamelCase : int = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) , __a )
else:
# down
for down_block in self.down_blocks:
_UpperCamelCase : int = down_block(__a )
# middle
_UpperCamelCase : int = self.mid_block(__a )
# post-process
_UpperCamelCase : Any = self.conv_norm_out(__a )
_UpperCamelCase : Any = self.conv_act(__a )
_UpperCamelCase : Optional[Any] = self.conv_out(__a )
return sample
class __SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self : Dict , __a : int=3 , __a : Any=3 , __a : str=("UpDecoderBlock2D",) , __a : Optional[int]=(64,) , __a : int=2 , __a : Optional[int]=32 , __a : Tuple="silu" , __a : Union[str, Any]="group" , ) -> str:
super().__init__()
_UpperCamelCase : List[Any] = layers_per_block
_UpperCamelCase : Tuple = nn.Convad(
__a , block_out_channels[-1] , kernel_size=3 , stride=1 , padding=1 , )
_UpperCamelCase : List[str] = None
_UpperCamelCase : Dict = nn.ModuleList([] )
_UpperCamelCase : List[Any] = in_channels if norm_type == "spatial" else None
# mid
_UpperCamelCase : Optional[Any] = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=__a , output_scale_factor=1 , resnet_time_scale_shift="default" if norm_type == "group" else norm_type , attention_head_dim=block_out_channels[-1] , resnet_groups=__a , temb_channels=__a , )
# up
_UpperCamelCase : List[str] = list(reversed(__a ) )
_UpperCamelCase : int = reversed_block_out_channels[0]
for i, up_block_type in enumerate(__a ):
_UpperCamelCase : int = output_channel
_UpperCamelCase : Union[str, Any] = reversed_block_out_channels[i]
_UpperCamelCase : Optional[Any] = i == len(__a ) - 1
_UpperCamelCase : Union[str, Any] = get_up_block(
__a , num_layers=self.layers_per_block + 1 , in_channels=__a , out_channels=__a , prev_output_channel=__a , add_upsample=not is_final_block , resnet_eps=1e-6 , resnet_act_fn=__a , resnet_groups=__a , attention_head_dim=__a , temb_channels=__a , resnet_time_scale_shift=__a , )
self.up_blocks.append(__a )
_UpperCamelCase : Optional[Any] = output_channel
# out
if norm_type == "spatial":
_UpperCamelCase : Optional[int] = SpatialNorm(block_out_channels[0] , __a )
else:
_UpperCamelCase : Optional[int] = nn.GroupNorm(num_channels=block_out_channels[0] , num_groups=__a , eps=1e-6 )
_UpperCamelCase : str = nn.SiLU()
_UpperCamelCase : str = nn.Convad(block_out_channels[0] , __a , 3 , padding=1 )
_UpperCamelCase : Dict = False
def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __a : List[Any] , __a : Union[str, Any]=None ) -> Tuple:
_UpperCamelCase : List[str] = z
_UpperCamelCase : Dict = self.conv_in(__a )
_UpperCamelCase : Any = next(iter(self.up_blocks.parameters() ) ).dtype
if self.training and self.gradient_checkpointing:
def create_custom_forward(__a : Any ):
def custom_forward(*__a : Tuple ):
return module(*__a )
return custom_forward
if is_torch_version(">=" , "1.11.0" ):
# middle
_UpperCamelCase : str = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , __a , __a , use_reentrant=__a )
_UpperCamelCase : Optional[int] = sample.to(__a )
# up
for up_block in self.up_blocks:
_UpperCamelCase : List[Any] = torch.utils.checkpoint.checkpoint(
create_custom_forward(__a ) , __a , __a , use_reentrant=__a )
else:
# middle
_UpperCamelCase : Optional[int] = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , __a , __a )
_UpperCamelCase : Union[str, Any] = sample.to(__a )
# up
for up_block in self.up_blocks:
_UpperCamelCase : str = torch.utils.checkpoint.checkpoint(create_custom_forward(__a ) , __a , __a )
else:
# middle
_UpperCamelCase : str = self.mid_block(__a , __a )
_UpperCamelCase : int = sample.to(__a )
# up
for up_block in self.up_blocks:
_UpperCamelCase : Any = up_block(__a , __a )
# post-process
if latent_embeds is None:
_UpperCamelCase : List[str] = self.conv_norm_out(__a )
else:
_UpperCamelCase : Optional[int] = self.conv_norm_out(__a , __a )
_UpperCamelCase : Tuple = self.conv_act(__a )
_UpperCamelCase : List[Any] = self.conv_out(__a )
return sample
class __SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self : Dict , __a : Tuple , __a : List[str] , __a : List[str] , __a : str=None , __a : Optional[int]="random" , __a : Any=False , __a : Optional[Any]=True ) -> List[Any]:
super().__init__()
_UpperCamelCase : Tuple = n_e
_UpperCamelCase : Tuple = vq_embed_dim
_UpperCamelCase : Union[str, Any] = beta
_UpperCamelCase : str = legacy
_UpperCamelCase : Dict = nn.Embedding(self.n_e , self.vq_embed_dim )
self.embedding.weight.data.uniform_(-1.0 / self.n_e , 1.0 / self.n_e )
_UpperCamelCase : Any = remap
if self.remap is not None:
self.register_buffer("used" , torch.tensor(np.load(self.remap ) ) )
_UpperCamelCase : Dict = self.used.shape[0]
_UpperCamelCase : Optional[int] = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
_UpperCamelCase : Optional[int] = self.re_embed
_UpperCamelCase : Any = self.re_embed + 1
print(
F'''Remapping {self.n_e} indices to {self.re_embed} indices. '''
F'''Using {self.unknown_index} for unknown indices.''' )
else:
_UpperCamelCase : Union[str, Any] = n_e
_UpperCamelCase : List[str] = sane_index_shape
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __a : Optional[Any] ) -> Optional[int]:
_UpperCamelCase : str = inds.shape
assert len(__a ) > 1
_UpperCamelCase : Union[str, Any] = inds.reshape(ishape[0] , -1 )
_UpperCamelCase : Optional[Any] = self.used.to(__a )
_UpperCamelCase : List[str] = (inds[:, :, None] == used[None, None, ...]).long()
_UpperCamelCase : Optional[Any] = match.argmax(-1 )
_UpperCamelCase : Any = match.sum(2 ) < 1
if self.unknown_index == "random":
_UpperCamelCase : Optional[int] = torch.randint(0 , self.re_embed , size=new[unknown].shape ).to(device=new.device )
else:
_UpperCamelCase : Dict = self.unknown_index
return new.reshape(__a )
def __SCREAMING_SNAKE_CASE ( self : Dict , __a : Optional[int] ) -> Optional[int]:
_UpperCamelCase : int = inds.shape
assert len(__a ) > 1
_UpperCamelCase : List[Any] = inds.reshape(ishape[0] , -1 )
_UpperCamelCase : Optional[int] = self.used.to(__a )
if self.re_embed > self.used.shape[0]: # extra token
_UpperCamelCase : int = 0 # simply set to zero
_UpperCamelCase : Union[str, Any] = torch.gather(used[None, :][inds.shape[0] * [0], :] , 1 , __a )
return back.reshape(__a )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __a : str ) -> Optional[int]:
# reshape z -> (batch, height, width, channel) and flatten
_UpperCamelCase : List[str] = z.permute(0 , 2 , 3 , 1 ).contiguous()
_UpperCamelCase : int = z.view(-1 , self.vq_embed_dim )
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
_UpperCamelCase : Optional[int] = torch.argmin(torch.cdist(__a , self.embedding.weight ) , dim=1 )
_UpperCamelCase : int = self.embedding(__a ).view(z.shape )
_UpperCamelCase : str = None
_UpperCamelCase : Any = None
# compute loss for embedding
if not self.legacy:
_UpperCamelCase : List[str] = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 )
else:
_UpperCamelCase : str = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 )
# preserve gradients
_UpperCamelCase : List[str] = z + (z_q - z).detach()
# reshape back to match original input shape
_UpperCamelCase : Optional[Any] = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
if self.remap is not None:
_UpperCamelCase : Tuple = min_encoding_indices.reshape(z.shape[0] , -1 ) # add batch axis
_UpperCamelCase : Dict = self.remap_to_used(__a )
_UpperCamelCase : List[str] = min_encoding_indices.reshape(-1 , 1 ) # flatten
if self.sane_index_shape:
_UpperCamelCase : str = min_encoding_indices.reshape(z_q.shape[0] , z_q.shape[2] , z_q.shape[3] )
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def __SCREAMING_SNAKE_CASE ( self : List[str] , __a : List[str] , __a : str ) -> Any:
# shape specifying (batch, height, width, channel)
if self.remap is not None:
_UpperCamelCase : str = indices.reshape(shape[0] , -1 ) # add batch axis
_UpperCamelCase : str = self.unmap_to_all(__a )
_UpperCamelCase : int = indices.reshape(-1 ) # flatten again
# get quantized latent vectors
_UpperCamelCase : Optional[int] = self.embedding(__a )
if shape is not None:
_UpperCamelCase : Tuple = z_q.view(__a )
# reshape back to match original input shape
_UpperCamelCase : Tuple = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
return z_q
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
def __init__( self : Optional[int] , __a : List[str] , __a : Optional[Any]=False ) -> int:
_UpperCamelCase : Dict = parameters
_UpperCamelCase, _UpperCamelCase : str = torch.chunk(__a , 2 , dim=1 )
_UpperCamelCase : Tuple = torch.clamp(self.logvar , -30.0 , 20.0 )
_UpperCamelCase : Union[str, Any] = deterministic
_UpperCamelCase : Dict = torch.exp(0.5 * self.logvar )
_UpperCamelCase : Any = torch.exp(self.logvar )
if self.deterministic:
_UpperCamelCase : List[Any] = torch.zeros_like(
self.mean , device=self.parameters.device , dtype=self.parameters.dtype )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __a : Optional[torch.Generator] = None ) -> torch.FloatTensor:
# make sure sample is on the same device as the parameters and has same dtype
_UpperCamelCase : List[Any] = randn_tensor(
self.mean.shape , generator=__a , device=self.parameters.device , dtype=self.parameters.dtype )
_UpperCamelCase : List[Any] = self.mean + self.std * sample
return x
def __SCREAMING_SNAKE_CASE ( self : Any , __a : List[str]=None ) -> List[Any]:
if self.deterministic:
return torch.Tensor([0.0] )
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean , 2 ) + self.var - 1.0 - self.logvar , dim=[1, 2, 3] )
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean , 2 ) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar , dim=[1, 2, 3] , )
def __SCREAMING_SNAKE_CASE ( self : str , __a : Tuple , __a : List[str]=[1, 2, 3] ) -> int:
if self.deterministic:
return torch.Tensor([0.0] )
_UpperCamelCase : List[str] = np.log(2.0 * np.pi )
return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean , 2 ) / self.var , dim=__a )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[int]:
return self.mean
| 51
|
"""simple docstring"""
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : Optional[Any] , __a : list[int] ) -> None:
_UpperCamelCase : Tuple = len(__a )
_UpperCamelCase : Dict = [0] * len_array
if len_array > 0:
_UpperCamelCase : Optional[Any] = array[0]
for i in range(1 , __a ):
_UpperCamelCase : Tuple = self.prefix_sum[i - 1] + array[i]
def __SCREAMING_SNAKE_CASE ( self : Dict , __a : int , __a : int ) -> int:
if start == 0:
return self.prefix_sum[end]
return self.prefix_sum[end] - self.prefix_sum[start - 1]
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __a : int ) -> bool:
_UpperCamelCase : int = {0}
for sum_item in self.prefix_sum:
if sum_item - target_sum in sums:
return True
sums.add(__a )
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 51
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase__ = {
"configuration_autoformer": [
"AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"AutoformerConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"AutoformerForPrediction",
"AutoformerModel",
"AutoformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_autoformer import (
AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_autoformer import (
AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
AutoformerForPrediction,
AutoformerModel,
AutoformerPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 51
|
"""simple docstring"""
import os
import zipfile
import requests
from get_ci_error_statistics import download_artifact, get_artifacts_links
def lowercase__ ( lowercase_ ,lowercase_=7 ) -> Tuple:
"""simple docstring"""
_UpperCamelCase : Optional[int] = None
if token is not None:
_UpperCamelCase : Optional[Any] = {"Accept": "application/vnd.github+json", "Authorization": F'''Bearer {token}'''}
# The id of a workflow (not of a workflow run)
_UpperCamelCase : Any = "636036"
_UpperCamelCase : Tuple = F'''https://api.github.com/repos/huggingface/transformers/actions/workflows/{workflow_id}/runs'''
# On `main` branch + event being `schedule` + not returning PRs + only `num_runs` results
url += F'''?branch=main&event=schedule&exclude_pull_requests=true&per_page={num_runs}'''
_UpperCamelCase : Dict = requests.get(lowercase_ ,headers=lowercase_ ).json()
return result["workflow_runs"]
def lowercase__ ( lowercase_ ) -> List[str]:
"""simple docstring"""
_UpperCamelCase : List[Any] = get_daily_ci_runs(lowercase_ )
_UpperCamelCase : Tuple = None
for workflow_run in workflow_runs:
if workflow_run["status"] == "completed":
_UpperCamelCase : Union[str, Any] = workflow_run["id"]
break
return workflow_run_id
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase : str = get_last_daily_ci_runs(lowercase_ )
if workflow_run_id is not None:
_UpperCamelCase : int = get_artifacts_links(worflow_run_id=lowercase_ ,token=lowercase_ )
for artifact_name in artifact_names:
if artifact_name in artifacts_links:
_UpperCamelCase : Dict = artifacts_links[artifact_name]
download_artifact(
artifact_name=lowercase_ ,artifact_url=lowercase_ ,output_dir=lowercase_ ,token=lowercase_ )
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ) -> int:
"""simple docstring"""
get_last_daily_ci_artifacts(lowercase_ ,lowercase_ ,lowercase_ )
_UpperCamelCase : Dict = {}
for artifact_name in artifact_names:
_UpperCamelCase : Union[str, Any] = os.path.join(lowercase_ ,F'''{artifact_name}.zip''' )
if os.path.isfile(lowercase_ ):
_UpperCamelCase : int = {}
with zipfile.ZipFile(lowercase_ ) as z:
for filename in z.namelist():
if not os.path.isdir(lowercase_ ):
# read the file
with z.open(lowercase_ ) as f:
_UpperCamelCase : int = f.read().decode("UTF-8" )
return results
| 51
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
lowerCamelCase__ = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["MLukeTokenizer"]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mluke import MLukeTokenizer
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 51
|
"""simple docstring"""
import math
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self : Any , __a : list[list[float]] , __a : list[int] ) -> int:
_UpperCamelCase : List[Any] = 0.0
_UpperCamelCase : Union[str, Any] = 0.0
for i in range(len(__a ) ):
da += math.pow((sample[i] - weights[0][i]) , 2 )
da += math.pow((sample[i] - weights[1][i]) , 2 )
return 0 if da > da else 1
return 0
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __a : list[list[int | float]] , __a : list[int] , __a : int , __a : float ) -> list[list[int | float]]:
for i in range(len(__a ) ):
weights[j][i] += alpha * (sample[i] - weights[j][i])
return weights
def lowercase__ ( ) -> None:
"""simple docstring"""
_UpperCamelCase : Optional[int] = [[1, 1, 0, 0], [0, 0, 0, 1], [1, 0, 0, 0], [0, 0, 1, 1]]
# weight initialization ( n, C )
_UpperCamelCase : List[str] = [[0.2, 0.6, 0.5, 0.9], [0.8, 0.4, 0.7, 0.3]]
# training
_UpperCamelCase : List[Any] = SelfOrganizingMap()
_UpperCamelCase : int = 3
_UpperCamelCase : List[Any] = 0.5
for _ in range(lowercase_ ):
for j in range(len(lowercase_ ) ):
# training sample
_UpperCamelCase : int = training_samples[j]
# Compute the winning vector
_UpperCamelCase : Tuple = self_organizing_map.get_winner(lowercase_ ,lowercase_ )
# Update the winning vector
_UpperCamelCase : int = self_organizing_map.update(lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ )
# classify test sample
_UpperCamelCase : Optional[int] = [0, 0, 0, 1]
_UpperCamelCase : Dict = self_organizing_map.get_winner(lowercase_ ,lowercase_ )
# results
print(F'''Clusters that the test sample belongs to : {winner}''' )
print(F'''Weights that have been trained : {weights}''' )
# running the main() function
if __name__ == "__main__":
main()
| 51
| 1
|
"""simple docstring"""
import warnings
from contextlib import contextmanager
from ....processing_utils import ProcessorMixin
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Optional[int] = "MCTCTFeatureExtractor"
SCREAMING_SNAKE_CASE__ :Tuple = "AutoTokenizer"
def __init__( self : Tuple , __a : Tuple , __a : str ) -> str:
super().__init__(__a , __a )
_UpperCamelCase : Tuple = self.feature_extractor
_UpperCamelCase : Union[str, Any] = False
def __call__( self : int , *__a : Tuple , **__a : Dict ) -> Any:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*__a , **__a )
if "raw_speech" in kwargs:
warnings.warn("Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead." )
_UpperCamelCase : List[Any] = kwargs.pop("raw_speech" )
else:
_UpperCamelCase : str = kwargs.pop("audio" , __a )
_UpperCamelCase : Optional[int] = kwargs.pop("sampling_rate" , __a )
_UpperCamelCase : List[str] = kwargs.pop("text" , __a )
if len(__a ) > 0:
_UpperCamelCase : Dict = args[0]
_UpperCamelCase : Optional[int] = args[1:]
if audio is None and text is None:
raise ValueError("You need to specify either an `audio` or `text` input to process." )
if audio is not None:
_UpperCamelCase : Optional[int] = self.feature_extractor(__a , *__a , sampling_rate=__a , **__a )
if text is not None:
_UpperCamelCase : Union[str, Any] = self.tokenizer(__a , **__a )
if text is None:
return inputs
elif audio is None:
return encodings
else:
_UpperCamelCase : str = encodings["input_ids"]
return inputs
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , *__a : str , **__a : Any ) -> str:
return self.tokenizer.batch_decode(*__a , **__a )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , *__a : Optional[Any] , **__a : Optional[Any] ) -> Optional[int]:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor.pad(*__a , **__a )
_UpperCamelCase : List[Any] = kwargs.pop("input_features" , __a )
_UpperCamelCase : Dict = kwargs.pop("labels" , __a )
if len(__a ) > 0:
_UpperCamelCase : str = args[0]
_UpperCamelCase : Union[str, Any] = args[1:]
if input_features is not None:
_UpperCamelCase : Dict = self.feature_extractor.pad(__a , *__a , **__a )
if labels is not None:
_UpperCamelCase : List[str] = self.tokenizer.pad(__a , **__a )
if labels is None:
return input_features
elif input_features is None:
return labels
else:
_UpperCamelCase : int = labels["input_ids"]
return input_features
def __SCREAMING_SNAKE_CASE ( self : List[Any] , *__a : List[str] , **__a : List[Any] ) -> Any:
return self.tokenizer.decode(*__a , **__a )
@contextmanager
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> str:
warnings.warn(
"`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your "
"labels by using the argument `text` of the regular `__call__` method (either in the same call as "
"your audio inputs, or in a separate call." )
_UpperCamelCase : Union[str, Any] = True
_UpperCamelCase : List[str] = self.tokenizer
yield
_UpperCamelCase : Optional[int] = self.feature_extractor
_UpperCamelCase : int = False
| 51
|
"""simple docstring"""
import argparse
import collections
import os
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_table.py
lowerCamelCase__ = "src/transformers"
lowerCamelCase__ = "docs/source/en"
lowerCamelCase__ = "."
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ) -> List[str]:
"""simple docstring"""
with open(lowercase_ ,"r" ,encoding="utf-8" ,newline="\n" ) as f:
_UpperCamelCase : Union[str, Any] = f.readlines()
# Find the start prompt.
_UpperCamelCase : Dict = 0
while not lines[start_index].startswith(lowercase_ ):
start_index += 1
start_index += 1
_UpperCamelCase : Optional[int] = start_index
while not lines[end_index].startswith(lowercase_ ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# Add here suffixes that are used to identify models, separated by |
lowerCamelCase__ = "Model|Encoder|Decoder|ForConditionalGeneration"
# Regexes that match TF/Flax/PT model names.
lowerCamelCase__ = re.compile(R"TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
lowerCamelCase__ = re.compile(R"Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
lowerCamelCase__ = re.compile(R"(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
# This is to make sure the transformers module imported is the one in the repo.
lowerCamelCase__ = direct_transformers_import(TRANSFORMERS_PATH)
def lowercase__ ( lowercase_ ) -> Any:
"""simple docstring"""
_UpperCamelCase : Tuple = re.finditer(".+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)" ,lowercase_ )
return [m.group(0 ) for m in matches]
def lowercase__ ( lowercase_ ,lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase : str = 2 if text == "✅" or text == "❌" else len(lowercase_ )
_UpperCamelCase : Union[str, Any] = (width - text_length) // 2
_UpperCamelCase : Dict = width - text_length - left_indent
return " " * left_indent + text + " " * right_indent
def lowercase__ ( ) -> str:
"""simple docstring"""
_UpperCamelCase : Optional[Any] = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
_UpperCamelCase : str = {
name: config_maping_names[code]
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if code in config_maping_names
}
_UpperCamelCase : Dict = {name: config.replace("Config" ,"" ) for name, config in model_name_to_config.items()}
# Dictionaries flagging if each model prefix has a slow/fast tokenizer, backend in PT/TF/Flax.
_UpperCamelCase : int = collections.defaultdict(lowercase_ )
_UpperCamelCase : Dict = collections.defaultdict(lowercase_ )
_UpperCamelCase : Dict = collections.defaultdict(lowercase_ )
_UpperCamelCase : int = collections.defaultdict(lowercase_ )
_UpperCamelCase : str = collections.defaultdict(lowercase_ )
# Let's lookup through all transformers object (once).
for attr_name in dir(lowercase_ ):
_UpperCamelCase : List[str] = None
if attr_name.endswith("Tokenizer" ):
_UpperCamelCase : Tuple = slow_tokenizers
_UpperCamelCase : Any = attr_name[:-9]
elif attr_name.endswith("TokenizerFast" ):
_UpperCamelCase : Optional[Any] = fast_tokenizers
_UpperCamelCase : List[str] = attr_name[:-13]
elif _re_tf_models.match(lowercase_ ) is not None:
_UpperCamelCase : List[Any] = tf_models
_UpperCamelCase : Dict = _re_tf_models.match(lowercase_ ).groups()[0]
elif _re_flax_models.match(lowercase_ ) is not None:
_UpperCamelCase : Dict = flax_models
_UpperCamelCase : Union[str, Any] = _re_flax_models.match(lowercase_ ).groups()[0]
elif _re_pt_models.match(lowercase_ ) is not None:
_UpperCamelCase : Optional[int] = pt_models
_UpperCamelCase : Any = _re_pt_models.match(lowercase_ ).groups()[0]
if lookup_dict is not None:
while len(lowercase_ ) > 0:
if attr_name in model_name_to_prefix.values():
_UpperCamelCase : Dict = True
break
# Try again after removing the last word in the name
_UpperCamelCase : List[str] = "".join(camel_case_split(lowercase_ )[:-1] )
# Let's build that table!
_UpperCamelCase : Any = list(model_name_to_config.keys() )
model_names.sort(key=str.lower )
_UpperCamelCase : List[str] = ["Model", "Tokenizer slow", "Tokenizer fast", "PyTorch support", "TensorFlow support", "Flax Support"]
# We'll need widths to properly display everything in the center (+2 is to leave one extra space on each side).
_UpperCamelCase : Union[str, Any] = [len(lowercase_ ) + 2 for c in columns]
_UpperCamelCase : Any = max([len(lowercase_ ) for name in model_names] ) + 2
# Build the table per se
_UpperCamelCase : Tuple = "|" + "|".join([_center_text(lowercase_ ,lowercase_ ) for c, w in zip(lowercase_ ,lowercase_ )] ) + "|\n"
# Use ":-----:" format to center-aligned table cell texts
table += "|" + "|".join([":" + "-" * (w - 2) + ":" for w in widths] ) + "|\n"
_UpperCamelCase : Union[str, Any] = {True: "✅", False: "❌"}
for name in model_names:
_UpperCamelCase : Optional[int] = model_name_to_prefix[name]
_UpperCamelCase : Tuple = [
name,
check[slow_tokenizers[prefix]],
check[fast_tokenizers[prefix]],
check[pt_models[prefix]],
check[tf_models[prefix]],
check[flax_models[prefix]],
]
table += "|" + "|".join([_center_text(lowercase_ ,lowercase_ ) for l, w in zip(lowercase_ ,lowercase_ )] ) + "|\n"
return table
def lowercase__ ( lowercase_=False ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase : str = _find_text_in_file(
filename=os.path.join(lowercase_ ,"index.md" ) ,start_prompt="<!--This table is updated automatically from the auto modules" ,end_prompt="<!-- End table-->" ,)
_UpperCamelCase : Any = get_model_table_from_auto_modules()
if current_table != new_table:
if overwrite:
with open(os.path.join(lowercase_ ,"index.md" ) ,"w" ,encoding="utf-8" ,newline="\n" ) as f:
f.writelines(lines[:start_index] + [new_table] + lines[end_index:] )
else:
raise ValueError(
"The model table in the `index.md` has not been updated. Run `make fix-copies` to fix this." )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
lowerCamelCase__ = parser.parse_args()
check_model_table(args.fix_and_overwrite)
| 51
| 1
|
"""simple docstring"""
def lowercase__ ( lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase : str = 0
_UpperCamelCase : List[str] = len(lowercase_ )
for i in range(n - 1 ):
for j in range(i + 1 ,lowercase_ ):
if arr[i] > arr[j]:
num_inversions += 1
return num_inversions
def lowercase__ ( lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
if len(lowercase_ ) <= 1:
return arr, 0
_UpperCamelCase : Union[str, Any] = len(lowercase_ ) // 2
_UpperCamelCase : Union[str, Any] = arr[0:mid]
_UpperCamelCase : Optional[int] = arr[mid:]
_UpperCamelCase, _UpperCamelCase : Dict = count_inversions_recursive(lowercase_ )
_UpperCamelCase, _UpperCamelCase : Optional[int] = count_inversions_recursive(lowercase_ )
_UpperCamelCase, _UpperCamelCase : str = _count_cross_inversions(lowercase_ ,lowercase_ )
_UpperCamelCase : Union[str, Any] = inversion_p + inversions_q + cross_inversions
return c, num_inversions
def lowercase__ ( lowercase_ ,lowercase_ ) -> Dict:
"""simple docstring"""
_UpperCamelCase : Tuple = []
_UpperCamelCase : str = 0
while i < len(lowercase_ ) and j < len(lowercase_ ):
if p[i] > q[j]:
# if P[1] > Q[j], then P[k] > Q[k] for all i < k <= len(P)
# These are all inversions. The claim emerges from the
# property that P is sorted.
num_inversion += len(lowercase_ ) - i
r.append(q[j] )
j += 1
else:
r.append(p[i] )
i += 1
if i < len(lowercase_ ):
r.extend(p[i:] )
else:
r.extend(q[j:] )
return r, num_inversion
def lowercase__ ( ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase : Tuple = [10, 2, 1, 5, 5, 2, 11]
# this arr has 8 inversions:
# (10, 2), (10, 1), (10, 5), (10, 5), (10, 2), (2, 1), (5, 2), (5, 2)
_UpperCamelCase : Tuple = count_inversions_bf(lowercase_ )
_UpperCamelCase, _UpperCamelCase : Dict = count_inversions_recursive(lowercase_ )
assert num_inversions_bf == num_inversions_recursive == 8
print("number of inversions = " ,lowercase_ )
# testing an array with zero inversion (a sorted arr_1)
arr_a.sort()
_UpperCamelCase : Union[str, Any] = count_inversions_bf(lowercase_ )
_UpperCamelCase, _UpperCamelCase : Tuple = count_inversions_recursive(lowercase_ )
assert num_inversions_bf == num_inversions_recursive == 0
print("number of inversions = " ,lowercase_ )
# an empty list should also have zero inversions
_UpperCamelCase : str = []
_UpperCamelCase : Dict = count_inversions_bf(lowercase_ )
_UpperCamelCase, _UpperCamelCase : List[Any] = count_inversions_recursive(lowercase_ )
assert num_inversions_bf == num_inversions_recursive == 0
print("number of inversions = " ,lowercase_ )
if __name__ == "__main__":
main()
| 51
|
"""simple docstring"""
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
lowerCamelCase__ = 200
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
lowerCamelCase__ = 50
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
lowerCamelCase__ = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 1000))
def lowercase__ ( lowercase_ ,lowercase_ ) -> tuple[str, float]:
"""simple docstring"""
_UpperCamelCase : str = len([g for position, g in enumerate(lowercase_ ) if g == main_target[position]] )
return (item, float(lowercase_ ))
def lowercase__ ( lowercase_ ,lowercase_ ) -> tuple[str, str]:
"""simple docstring"""
_UpperCamelCase : Tuple = random.randint(0 ,len(lowercase_ ) - 1 )
_UpperCamelCase : Dict = parent_a[:random_slice] + parent_a[random_slice:]
_UpperCamelCase : Tuple = parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def lowercase__ ( lowercase_ ,lowercase_ ) -> str:
"""simple docstring"""
_UpperCamelCase : int = list(lowercase_ )
if random.uniform(0 ,1 ) < MUTATION_PROBABILITY:
_UpperCamelCase : int = random.choice(lowercase_ )
return "".join(lowercase_ )
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,) -> list[str]:
"""simple docstring"""
_UpperCamelCase : Optional[Any] = []
# Generate more children proportionally to the fitness score.
_UpperCamelCase : List[str] = int(parent_a[1] * 100 ) + 1
_UpperCamelCase : Union[str, Any] = 10 if child_n >= 10 else child_n
for _ in range(lowercase_ ):
_UpperCamelCase : Dict = population_score[random.randint(0 ,lowercase_ )][0]
_UpperCamelCase, _UpperCamelCase : Dict = crossover(parent_a[0] ,lowercase_ )
# Append new string to the population list.
pop.append(mutate(lowercase_ ,lowercase_ ) )
pop.append(mutate(lowercase_ ,lowercase_ ) )
return pop
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ = True ) -> tuple[int, int, str]:
"""simple docstring"""
if N_POPULATION < N_SELECTED:
_UpperCamelCase : List[str] = F'''{N_POPULATION} must be bigger than {N_SELECTED}'''
raise ValueError(lowercase_ )
# Verify that the target contains no genes besides the ones inside genes variable.
_UpperCamelCase : int = sorted({c for c in target if c not in genes} )
if not_in_genes_list:
_UpperCamelCase : int = F'''{not_in_genes_list} is not in genes list, evolution cannot converge'''
raise ValueError(lowercase_ )
# Generate random starting population.
_UpperCamelCase : Union[str, Any] = []
for _ in range(lowercase_ ):
population.append("".join([random.choice(lowercase_ ) for i in range(len(lowercase_ ) )] ) )
# Just some logs to know what the algorithms is doing.
_UpperCamelCase, _UpperCamelCase : str = 0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(lowercase_ )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
_UpperCamelCase : int = [evaluate(lowercase_ ,lowercase_ ) for item in population]
# Check if there is a matching evolution.
_UpperCamelCase : Optional[Any] = sorted(lowercase_ ,key=lambda lowercase_ : x[1] ,reverse=lowercase_ )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
F'''\nGeneration: {generation}'''
F'''\nTotal Population:{total_population}'''
F'''\nBest score: {population_score[0][1]}'''
F'''\nBest string: {population_score[0][0]}''' )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
_UpperCamelCase : str = population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(lowercase_ )
# Normalize population score to be between 0 and 1.
_UpperCamelCase : str = [
(item, score / len(lowercase_ )) for item, score in population_score
]
# This is selection
for i in range(lowercase_ ):
population.extend(select(population_score[int(lowercase_ )] ,lowercase_ ,lowercase_ ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(lowercase_ ) > N_POPULATION:
break
if __name__ == "__main__":
lowerCamelCase__ = (
"This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!"
)
lowerCamelCase__ = list(
" ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm"
"nopqrstuvwxyz.,;!?+-*#@^'èéòà€ù=)(&%$£/\\"
)
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = basic(target_str, genes_list)
print(
f"""\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}"""
)
| 51
| 1
|
"""simple docstring"""
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pytest
from datasets.arrow_dataset import Dataset
from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex
from .utils import require_elasticsearch, require_faiss
lowerCamelCase__ = pytest.mark.integration
@require_faiss
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[Any]:
_UpperCamelCase : Optional[int] = Dataset.from_dict({"filename": ["my_name-train" + "_" + str(__a ) for x in np.arange(30 ).tolist()]} )
return dset
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> Tuple:
import faiss
_UpperCamelCase : Dataset = self._create_dummy_dataset()
_UpperCamelCase : Union[str, Any] = dset.map(
lambda __a , __a : {"vecs": i * np.ones(5 , dtype=np.floataa )} , with_indices=__a , keep_in_memory=__a )
_UpperCamelCase : Dict = dset.add_faiss_index("vecs" , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT )
_UpperCamelCase, _UpperCamelCase : Any = dset.get_nearest_examples("vecs" , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples["filename"][0] , "my_name-train_29" )
dset.drop_index("vecs" )
def __SCREAMING_SNAKE_CASE ( self : int ) -> Union[str, Any]:
import faiss
_UpperCamelCase : Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="vecs" , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT , )
_UpperCamelCase, _UpperCamelCase : str = dset.get_nearest_examples("vecs" , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples["filename"][0] , "my_name-train_29" )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[Any]:
import faiss
_UpperCamelCase : Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="vecs" , metric_type=faiss.METRIC_INNER_PRODUCT , )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=__a ) as tmp_file:
dset.save_faiss_index("vecs" , tmp_file.name )
dset.load_faiss_index("vecs2" , tmp_file.name )
os.unlink(tmp_file.name )
_UpperCamelCase, _UpperCamelCase : List[Any] = dset.get_nearest_examples("vecs2" , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples["filename"][0] , "my_name-train_29" )
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Any:
_UpperCamelCase : Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="vecs" )
dset.drop_index("vecs" )
self.assertRaises(__a , partial(dset.get_nearest_examples , "vecs2" , np.ones(5 , dtype=np.floataa ) ) )
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[Any]:
from elasticsearch import Elasticsearch
_UpperCamelCase : Dataset = self._create_dummy_dataset()
with patch("elasticsearch.Elasticsearch.search" ) as mocked_search, patch(
"elasticsearch.client.IndicesClient.create" ) as mocked_index_create, patch("elasticsearch.helpers.streaming_bulk" ) as mocked_bulk:
_UpperCamelCase : Tuple = {"acknowledged": True}
mocked_bulk.return_value([(True, None)] * 30 )
_UpperCamelCase : Any = {"hits": {"hits": [{"_score": 1, "_id": 29}]}}
_UpperCamelCase : Dict = Elasticsearch()
dset.add_elasticsearch_index("filename" , es_client=__a )
_UpperCamelCase, _UpperCamelCase : Dict = dset.get_nearest_examples("filename" , "my_name-train_29" )
self.assertEqual(examples["filename"][0] , "my_name-train_29" )
@require_faiss
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[str]:
import faiss
_UpperCamelCase : List[Any] = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
# add vectors
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsNotNone(index.faiss_index )
self.assertEqual(index.faiss_index.ntotal , 5 )
index.add_vectors(np.zeros((5, 5) , dtype=np.floataa ) )
self.assertEqual(index.faiss_index.ntotal , 10 )
# single query
_UpperCamelCase : Optional[int] = np.zeros(5 , dtype=np.floataa )
_UpperCamelCase : Dict = 1
_UpperCamelCase, _UpperCamelCase : Optional[Any] = index.search(__a )
self.assertRaises(__a , index.search , query.reshape(-1 , 1 ) )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
# batched queries
_UpperCamelCase : Optional[int] = np.eye(5 , dtype=np.floataa )[::-1]
_UpperCamelCase, _UpperCamelCase : Optional[Any] = index.search_batch(__a )
self.assertRaises(__a , index.search_batch , queries[0] )
_UpperCamelCase : str = [scores[0] for scores in total_scores]
_UpperCamelCase : Optional[Any] = [indices[0] for indices in total_indices]
self.assertGreater(np.min(__a ) , 0 )
self.assertListEqual([4, 3, 2, 1, 0] , __a )
def __SCREAMING_SNAKE_CASE ( self : Any ) -> Tuple:
import faiss
_UpperCamelCase : str = FaissIndex(string_factory="Flat" )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
_UpperCamelCase : Any = FaissIndex(string_factory="LSH" )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexLSH )
with self.assertRaises(__a ):
_UpperCamelCase : List[Any] = FaissIndex(string_factory="Flat" , custom_index=faiss.IndexFlat(5 ) )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Tuple:
import faiss
_UpperCamelCase : List[Any] = faiss.IndexFlat(5 )
_UpperCamelCase : Union[str, Any] = FaissIndex(custom_index=__a )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Any:
import faiss
_UpperCamelCase : str = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=__a ) as tmp_file:
index.save(tmp_file.name )
_UpperCamelCase : str = FaissIndex.load(tmp_file.name )
os.unlink(tmp_file.name )
_UpperCamelCase : Dict = np.zeros(5 , dtype=np.floataa )
_UpperCamelCase : Union[str, Any] = 1
_UpperCamelCase, _UpperCamelCase : Tuple = index.search(__a )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
@require_faiss
def lowercase__ ( lowercase_ ) -> str:
"""simple docstring"""
import faiss
_UpperCamelCase : Optional[Any] = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 ,dtype=np.floataa ) )
_UpperCamelCase : Any = "index.faiss"
_UpperCamelCase : List[Any] = F'''mock://{index_name}'''
index.save(lowercase_ ,storage_options=mockfs.storage_options )
_UpperCamelCase : int = FaissIndex.load(lowercase_ ,storage_options=mockfs.storage_options )
_UpperCamelCase : Dict = np.zeros(5 ,dtype=np.floataa )
_UpperCamelCase : str = 1
_UpperCamelCase, _UpperCamelCase : Tuple = index.search(lowercase_ )
assert scores[0] > 0
assert indices[0] == 1
@require_elasticsearch
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Tuple:
from elasticsearch import Elasticsearch
with patch("elasticsearch.Elasticsearch.search" ) as mocked_search, patch(
"elasticsearch.client.IndicesClient.create" ) as mocked_index_create, patch("elasticsearch.helpers.streaming_bulk" ) as mocked_bulk:
_UpperCamelCase : List[Any] = Elasticsearch()
_UpperCamelCase : Optional[int] = {"acknowledged": True}
_UpperCamelCase : Union[str, Any] = ElasticSearchIndex(es_client=__a )
mocked_bulk.return_value([(True, None)] * 3 )
index.add_documents(["foo", "bar", "foobar"] )
# single query
_UpperCamelCase : Tuple = "foo"
_UpperCamelCase : Union[str, Any] = {"hits": {"hits": [{"_score": 1, "_id": 0}]}}
_UpperCamelCase, _UpperCamelCase : Optional[int] = index.search(__a )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# single query with timeout
_UpperCamelCase : List[Any] = "foo"
_UpperCamelCase : List[Any] = {"hits": {"hits": [{"_score": 1, "_id": 0}]}}
_UpperCamelCase, _UpperCamelCase : Any = index.search(__a , request_timeout=30 )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# batched queries
_UpperCamelCase : Tuple = ["foo", "bar", "foobar"]
_UpperCamelCase : List[str] = {"hits": {"hits": [{"_score": 1, "_id": 1}]}}
_UpperCamelCase, _UpperCamelCase : Optional[int] = index.search_batch(__a )
_UpperCamelCase : str = [scores[0] for scores in total_scores]
_UpperCamelCase : int = [indices[0] for indices in total_indices]
self.assertGreater(np.min(__a ) , 0 )
self.assertListEqual([1, 1, 1] , __a )
# batched queries with timeout
_UpperCamelCase : Optional[int] = ["foo", "bar", "foobar"]
_UpperCamelCase : Dict = {"hits": {"hits": [{"_score": 1, "_id": 1}]}}
_UpperCamelCase, _UpperCamelCase : Dict = index.search_batch(__a , request_timeout=30 )
_UpperCamelCase : Dict = [scores[0] for scores in total_scores]
_UpperCamelCase : Optional[Any] = [indices[0] for indices in total_indices]
self.assertGreater(np.min(__a ) , 0 )
self.assertListEqual([1, 1, 1] , __a )
| 51
|
"""simple docstring"""
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = ["model.decoder.embed_positions.weights"]
def lowercase__ ( lowercase_ ) -> Optional[Any]:
"""simple docstring"""
if "emb" in name:
_UpperCamelCase : List[str] = name.replace("emb" ,"model.decoder.embed_tokens" )
if "transformer" in name:
_UpperCamelCase : Optional[int] = name.replace("transformer" ,"model.decoder" )
if "cross_attention" in name:
_UpperCamelCase : Optional[int] = name.replace("cross_attention" ,"encoder_attn" )
if "linear1" in name:
_UpperCamelCase : Optional[Any] = name.replace("linear1" ,"fc1" )
if "linear2" in name:
_UpperCamelCase : Union[str, Any] = name.replace("linear2" ,"fc2" )
if "norm1" in name:
_UpperCamelCase : Optional[Any] = name.replace("norm1" ,"self_attn_layer_norm" )
if "norm_cross" in name:
_UpperCamelCase : Dict = name.replace("norm_cross" ,"encoder_attn_layer_norm" )
if "norm2" in name:
_UpperCamelCase : Union[str, Any] = name.replace("norm2" ,"final_layer_norm" )
if "out_norm" in name:
_UpperCamelCase : Union[str, Any] = name.replace("out_norm" ,"model.decoder.layer_norm" )
if "linears" in name:
_UpperCamelCase : List[str] = name.replace("linears" ,"lm_heads" )
if "condition_provider.conditioners.description.output_proj" in name:
_UpperCamelCase : Any = name.replace("condition_provider.conditioners.description.output_proj" ,"enc_to_dec_proj" )
return name
def lowercase__ ( lowercase_ ,lowercase_ ) -> Tuple[Dict, Dict]:
"""simple docstring"""
_UpperCamelCase : str = list(state_dict.keys() )
_UpperCamelCase : Optional[Any] = {}
for key in keys:
_UpperCamelCase : Optional[int] = state_dict.pop(lowercase_ )
_UpperCamelCase : List[Any] = rename_keys(lowercase_ )
if "in_proj_weight" in key:
# split fused qkv proj
_UpperCamelCase : Tuple = val[:hidden_size, :]
_UpperCamelCase : Optional[Any] = val[hidden_size : 2 * hidden_size, :]
_UpperCamelCase : Optional[Any] = val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
_UpperCamelCase : Optional[Any] = val
else:
_UpperCamelCase : List[str] = val
return state_dict, enc_dec_proj_state_dict
def lowercase__ ( lowercase_ ) -> MusicgenDecoderConfig:
"""simple docstring"""
if checkpoint == "small":
# default config values
_UpperCamelCase : List[Any] = 1_024
_UpperCamelCase : List[str] = 24
_UpperCamelCase : Any = 16
elif checkpoint == "medium":
_UpperCamelCase : Tuple = 1_536
_UpperCamelCase : Dict = 48
_UpperCamelCase : Tuple = 24
elif checkpoint == "large":
_UpperCamelCase : int = 2_048
_UpperCamelCase : Optional[int] = 48
_UpperCamelCase : Dict = 32
else:
raise ValueError(F'''Checkpoint should be one of `[\'small\', \'medium\', \'large\']`, got {checkpoint}.''' )
_UpperCamelCase : str = MusicgenDecoderConfig(
hidden_size=lowercase_ ,ffn_dim=hidden_size * 4 ,num_hidden_layers=lowercase_ ,num_attention_heads=lowercase_ ,)
return config
@torch.no_grad()
def lowercase__ ( lowercase_ ,lowercase_=None ,lowercase_=None ,lowercase_="cpu" ) -> List[str]:
"""simple docstring"""
_UpperCamelCase : str = MusicGen.get_pretrained(lowercase_ ,device=lowercase_ )
_UpperCamelCase : Union[str, Any] = decoder_config_from_checkpoint(lowercase_ )
_UpperCamelCase : Optional[int] = fairseq_model.lm.state_dict()
_UpperCamelCase, _UpperCamelCase : Optional[Any] = rename_state_dict(
lowercase_ ,hidden_size=decoder_config.hidden_size )
_UpperCamelCase : Tuple = TaEncoderModel.from_pretrained("t5-base" )
_UpperCamelCase : Union[str, Any] = EncodecModel.from_pretrained("facebook/encodec_32khz" )
_UpperCamelCase : str = MusicgenForCausalLM(lowercase_ ).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
_UpperCamelCase, _UpperCamelCase : str = decoder.load_state_dict(lowercase_ ,strict=lowercase_ )
for key in missing_keys.copy():
if key.startswith(("text_encoder", "audio_encoder") ) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(lowercase_ )
if len(lowercase_ ) > 0:
raise ValueError(F'''Missing key(s) in state_dict: {missing_keys}''' )
if len(lowercase_ ) > 0:
raise ValueError(F'''Unexpected key(s) in state_dict: {unexpected_keys}''' )
# init the composite model
_UpperCamelCase : str = MusicgenForConditionalGeneration(text_encoder=lowercase_ ,audio_encoder=lowercase_ ,decoder=lowercase_ )
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(lowercase_ )
# check we can do a forward pass
_UpperCamelCase : List[str] = torch.arange(0 ,8 ,dtype=torch.long ).reshape(2 ,-1 )
_UpperCamelCase : Dict = input_ids.reshape(2 * 4 ,-1 )
with torch.no_grad():
_UpperCamelCase : Tuple = model(input_ids=lowercase_ ,decoder_input_ids=lowercase_ ).logits
if logits.shape != (8, 1, 2_048):
raise ValueError("Incorrect shape for logits" )
# now construct the processor
_UpperCamelCase : int = AutoTokenizer.from_pretrained("t5-base" )
_UpperCamelCase : str = AutoFeatureExtractor.from_pretrained("facebook/encodec_32khz" ,padding_side="left" )
_UpperCamelCase : Optional[int] = MusicgenProcessor(feature_extractor=lowercase_ ,tokenizer=lowercase_ )
# set the appropriate bos/pad token ids
_UpperCamelCase : str = 2_048
_UpperCamelCase : str = 2_048
# set other default generation config params
_UpperCamelCase : Optional[Any] = int(30 * audio_encoder.config.frame_rate )
_UpperCamelCase : List[str] = True
_UpperCamelCase : int = 3.0
if pytorch_dump_folder is not None:
Path(lowercase_ ).mkdir(exist_ok=lowercase_ )
logger.info(F'''Saving model {checkpoint} to {pytorch_dump_folder}''' )
model.save_pretrained(lowercase_ )
processor.save_pretrained(lowercase_ )
if repo_id:
logger.info(F'''Pushing model {checkpoint} to {repo_id}''' )
model.push_to_hub(lowercase_ )
processor.push_to_hub(lowercase_ )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint",
default="small",
type=str,
help="Checkpoint size of the MusicGen model you'd like to convert. Can be one of: `['small', 'medium', 'large']`.",
)
parser.add_argument(
"--pytorch_dump_folder",
required=True,
default=None,
type=str,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
parser.add_argument(
"--device", default="cpu", type=str, help="Torch device to run the conversion, either cpu or cuda."
)
lowerCamelCase__ = parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
| 51
| 1
|
"""simple docstring"""
import argparse
import math
import os
import torch
from neural_compressor.utils.pytorch import load
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, StableDiffusionPipeline, UNetaDConditionModel
def lowercase__ ( ) -> Dict:
"""simple docstring"""
_UpperCamelCase : Tuple = argparse.ArgumentParser()
parser.add_argument(
"-m" ,"--pretrained_model_name_or_path" ,type=lowercase_ ,default=lowercase_ ,required=lowercase_ ,help="Path to pretrained model or model identifier from huggingface.co/models." ,)
parser.add_argument(
"-c" ,"--caption" ,type=lowercase_ ,default="robotic cat with wings" ,help="Text used to generate images." ,)
parser.add_argument(
"-n" ,"--images_num" ,type=lowercase_ ,default=4 ,help="How much images to generate." ,)
parser.add_argument(
"-s" ,"--seed" ,type=lowercase_ ,default=42 ,help="Seed for random process." ,)
parser.add_argument(
"-ci" ,"--cuda_id" ,type=lowercase_ ,default=0 ,help="cuda_id." ,)
_UpperCamelCase : Union[str, Any] = parser.parse_args()
return args
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
if not len(lowercase_ ) == rows * cols:
raise ValueError("The specified number of rows and columns are not correct." )
_UpperCamelCase, _UpperCamelCase : int = imgs[0].size
_UpperCamelCase : Optional[int] = Image.new("RGB" ,size=(cols * w, rows * h) )
_UpperCamelCase, _UpperCamelCase : List[str] = grid.size
for i, img in enumerate(lowercase_ ):
grid.paste(lowercase_ ,box=(i % cols * w, i // cols * h) )
return grid
def lowercase__ ( lowercase_ ,lowercase_="robotic cat with wings" ,lowercase_=7.5 ,lowercase_=50 ,lowercase_=1 ,lowercase_=42 ,) -> Any:
"""simple docstring"""
_UpperCamelCase : List[Any] = torch.Generator(pipeline.device ).manual_seed(lowercase_ )
_UpperCamelCase : Optional[Any] = pipeline(
lowercase_ ,guidance_scale=lowercase_ ,num_inference_steps=lowercase_ ,generator=lowercase_ ,num_images_per_prompt=lowercase_ ,).images
_UpperCamelCase : List[Any] = int(math.sqrt(lowercase_ ) )
_UpperCamelCase : Any = image_grid(lowercase_ ,rows=_rows ,cols=num_images_per_prompt // _rows )
return grid, images
lowerCamelCase__ = parse_args()
# Load models and create wrapper for stable diffusion
lowerCamelCase__ = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder="tokenizer")
lowerCamelCase__ = CLIPTextModel.from_pretrained(args.pretrained_model_name_or_path, subfolder="text_encoder")
lowerCamelCase__ = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder="vae")
lowerCamelCase__ = UNetaDConditionModel.from_pretrained(args.pretrained_model_name_or_path, subfolder="unet")
lowerCamelCase__ = StableDiffusionPipeline.from_pretrained(
args.pretrained_model_name_or_path, text_encoder=text_encoder, vae=vae, unet=unet, tokenizer=tokenizer
)
lowerCamelCase__ = lambda images, clip_input: (images, False)
if os.path.exists(os.path.join(args.pretrained_model_name_or_path, "best_model.pt")):
lowerCamelCase__ = load(args.pretrained_model_name_or_path, model=unet)
unet.eval()
setattr(pipeline, "unet", unet)
else:
lowerCamelCase__ = unet.to(torch.device("cuda", args.cuda_id))
lowerCamelCase__ = pipeline.to(unet.device)
lowerCamelCase__ , lowerCamelCase__ = generate_images(pipeline, prompt=args.caption, num_images_per_prompt=args.images_num, seed=args.seed)
grid.save(os.path.join(args.pretrained_model_name_or_path, "{}.png".format("_".join(args.caption.split()))))
lowerCamelCase__ = os.path.join(args.pretrained_model_name_or_path, "_".join(args.caption.split()))
os.makedirs(dirname, exist_ok=True)
for idx, image in enumerate(images):
image.save(os.path.join(dirname, "{}.png".format(idx + 1)))
| 51
|
"""simple docstring"""
from datetime import datetime
import requests
from bsa import BeautifulSoup
if __name__ == "__main__":
lowerCamelCase__ = input("Enter image url: ").strip()
print(f"""Downloading image from {url} ...""")
lowerCamelCase__ = BeautifulSoup(requests.get(url).content, "html.parser")
# The image URL is in the content field of the first meta tag with property og:image
lowerCamelCase__ = soup.find("meta", {"property": "og:image"})["content"]
lowerCamelCase__ = requests.get(image_url).content
lowerCamelCase__ = f"""{datetime.now():%Y-%m-%d_%H:%M:%S}.jpg"""
with open(file_name, "wb") as fp:
fp.write(image_data)
print(f"""Done. Image saved to disk as {file_name}.""")
| 51
| 1
|
"""simple docstring"""
import math
import sys
def lowercase__ ( lowercase_ ) -> int:
"""simple docstring"""
if number != int(lowercase_ ):
raise ValueError("the value of input must be a natural number" )
if number < 0:
raise ValueError("the value of input must not be a negative number" )
if number == 0:
return 1
_UpperCamelCase : Optional[int] = [-1] * (number + 1)
_UpperCamelCase : List[str] = 0
for i in range(1 ,number + 1 ):
_UpperCamelCase : List[str] = sys.maxsize
_UpperCamelCase : Optional[int] = int(math.sqrt(lowercase_ ) )
for j in range(1 ,root + 1 ):
_UpperCamelCase : str = 1 + answers[i - (j**2)]
_UpperCamelCase : Union[str, Any] = min(lowercase_ ,lowercase_ )
_UpperCamelCase : Optional[int] = answer
return answers[number]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 51
|
"""simple docstring"""
import importlib
import os
import sys
# This is required to make the module import works (when the python process is running from the root of the repo)
sys.path.append(".")
def lowercase__ ( lowercase_ ) -> int:
"""simple docstring"""
_UpperCamelCase : Any = test_file.split(os.path.sep )
if components[0:2] != ["tests", "models"]:
raise ValueError(
"`test_file` should start with `tests/models/` (with `/` being the OS specific path separator). Got "
F'''{test_file} instead.''' )
_UpperCamelCase : str = components[-1]
if not test_fn.endswith("py" ):
raise ValueError(F'''`test_file` should be a python file. Got {test_fn} instead.''' )
if not test_fn.startswith("test_modeling_" ):
raise ValueError(
F'''`test_file` should point to a file name of the form `test_modeling_*.py`. Got {test_fn} instead.''' )
_UpperCamelCase : Dict = components[:-1] + [test_fn.replace(".py" ,"" )]
_UpperCamelCase : List[str] = ".".join(lowercase_ )
return test_module_path
def lowercase__ ( lowercase_ ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase : Optional[Any] = get_module_path(lowercase_ )
_UpperCamelCase : str = importlib.import_module(lowercase_ )
return test_module
def lowercase__ ( lowercase_ ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase : Union[str, Any] = []
_UpperCamelCase : List[Any] = get_test_module(lowercase_ )
for attr in dir(lowercase_ ):
if attr.endswith("ModelTester" ):
tester_classes.append(getattr(lowercase_ ,lowercase_ ) )
# sort with class names
return sorted(lowercase_ ,key=lambda lowercase_ : x.__name__ )
def lowercase__ ( lowercase_ ) -> Tuple:
"""simple docstring"""
_UpperCamelCase : Optional[Any] = []
_UpperCamelCase : Any = get_test_module(lowercase_ )
for attr in dir(lowercase_ ):
_UpperCamelCase : int = getattr(lowercase_ ,lowercase_ )
# (TF/Flax)ModelTesterMixin is also an attribute in specific model test module. Let's exclude them by checking
# `all_model_classes` is not empty (which also excludes other special classes).
_UpperCamelCase : Optional[Any] = getattr(lowercase_ ,"all_model_classes" ,[] )
if len(lowercase_ ) > 0:
test_classes.append(lowercase_ )
# sort with class names
return sorted(lowercase_ ,key=lambda lowercase_ : x.__name__ )
def lowercase__ ( lowercase_ ) -> Any:
"""simple docstring"""
_UpperCamelCase : Dict = get_test_classes(lowercase_ )
_UpperCamelCase : int = set()
for test_class in test_classes:
model_classes.update(test_class.all_model_classes )
# sort with class names
return sorted(lowercase_ ,key=lambda lowercase_ : x.__name__ )
def lowercase__ ( lowercase_ ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase : List[str] = test_class()
if hasattr(lowercase_ ,"setUp" ):
test.setUp()
_UpperCamelCase : Tuple = None
if hasattr(lowercase_ ,"model_tester" ):
# `(TF/Flax)ModelTesterMixin` has this attribute default to `None`. Let's skip this case.
if test.model_tester is not None:
_UpperCamelCase : Tuple = test.model_tester.__class__
return model_tester
def lowercase__ ( lowercase_ ,lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase : str = get_test_classes(lowercase_ )
_UpperCamelCase : Dict = []
for test_class in test_classes:
if model_class in test_class.all_model_classes:
target_test_classes.append(lowercase_ )
# sort with class names
return sorted(lowercase_ ,key=lambda lowercase_ : x.__name__ )
def lowercase__ ( lowercase_ ,lowercase_ ) -> Dict:
"""simple docstring"""
_UpperCamelCase : Any = get_test_classes_for_model(lowercase_ ,lowercase_ )
_UpperCamelCase : List[Any] = []
for test_class in test_classes:
_UpperCamelCase : List[Any] = get_model_tester_from_test_class(lowercase_ )
if tester_class is not None:
tester_classes.append(lowercase_ )
# sort with class names
return sorted(lowercase_ ,key=lambda lowercase_ : x.__name__ )
def lowercase__ ( lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase : Any = get_test_classes(lowercase_ )
_UpperCamelCase : Tuple = {test_class: get_model_tester_from_test_class(lowercase_ ) for test_class in test_classes}
return test_tester_mapping
def lowercase__ ( lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase : List[Any] = get_model_classes(lowercase_ )
_UpperCamelCase : Optional[int] = {
model_class: get_test_classes_for_model(lowercase_ ,lowercase_ ) for model_class in model_classes
}
return model_test_mapping
def lowercase__ ( lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase : Optional[Any] = get_model_classes(lowercase_ )
_UpperCamelCase : Tuple = {
model_class: get_tester_classes_for_model(lowercase_ ,lowercase_ ) for model_class in model_classes
}
return model_to_tester_mapping
def lowercase__ ( lowercase_ ) -> Optional[int]:
"""simple docstring"""
if isinstance(lowercase_ ,lowercase_ ):
return o
elif isinstance(lowercase_ ,lowercase_ ):
return o.__name__
elif isinstance(lowercase_ ,(list, tuple) ):
return [to_json(lowercase_ ) for x in o]
elif isinstance(lowercase_ ,lowercase_ ):
return {to_json(lowercase_ ): to_json(lowercase_ ) for k, v in o.items()}
else:
return o
| 51
| 1
|
"""simple docstring"""
from manim import *
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Dict:
_UpperCamelCase : int = Rectangle(height=0.5 , width=0.5 )
_UpperCamelCase : List[Any] = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
_UpperCamelCase : Optional[int] = [mem.copy() for i in range(6 )]
_UpperCamelCase : Optional[int] = [mem.copy() for i in range(6 )]
_UpperCamelCase : Optional[Any] = VGroup(*__a ).arrange(__a , buff=0 )
_UpperCamelCase : Union[str, Any] = VGroup(*__a ).arrange(__a , buff=0 )
_UpperCamelCase : int = VGroup(__a , __a ).arrange(__a , buff=0 )
_UpperCamelCase : Any = Text("CPU" , font_size=24 )
_UpperCamelCase : List[str] = Group(__a , __a ).arrange(__a , buff=0.5 , aligned_edge=__a )
cpu.move_to([-2.5, -0.5, 0] )
self.add(__a )
_UpperCamelCase : Optional[int] = [mem.copy() for i in range(4 )]
_UpperCamelCase : Optional[int] = VGroup(*__a ).arrange(__a , buff=0 )
_UpperCamelCase : Dict = Text("GPU" , font_size=24 )
_UpperCamelCase : Optional[Any] = Group(__a , __a ).arrange(__a , buff=0.5 , aligned_edge=__a )
gpu.move_to([-1, -1, 0] )
self.add(__a )
_UpperCamelCase : Tuple = [mem.copy() for i in range(6 )]
_UpperCamelCase : Dict = VGroup(*__a ).arrange(__a , buff=0 )
_UpperCamelCase : str = Text("Model" , font_size=24 )
_UpperCamelCase : Any = Group(__a , __a ).arrange(__a , buff=0.5 , aligned_edge=__a )
model.move_to([3, -1.0, 0] )
self.add(__a )
_UpperCamelCase : Any = []
for i, rect in enumerate(__a ):
rect.set_stroke(__a )
# target = fill.copy().set_fill(YELLOW, opacity=0.7)
# target.move_to(rect)
# self.add(target)
_UpperCamelCase : Optional[Any] = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(__a , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=__a )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(cpu_targs[0] , direction=__a , buff=0.0 )
else:
cpu_target.next_to(cpu_targs[i - 1] , direction=__a , buff=0.0 )
self.add(__a )
cpu_targs.append(__a )
_UpperCamelCase : List[Any] = [mem.copy() for i in range(6 )]
_UpperCamelCase : str = VGroup(*__a ).arrange(__a , buff=0 )
_UpperCamelCase : Any = Text("Loaded Checkpoint" , font_size=24 )
_UpperCamelCase : List[str] = Group(__a , __a ).arrange(__a , aligned_edge=__a , buff=0.4 )
checkpoint.move_to([3, 0.5, 0] )
_UpperCamelCase : Optional[Any] = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
_UpperCamelCase : Optional[Any] = MarkupText(
F'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(__a , __a )
_UpperCamelCase : Dict = MarkupText(
F'''<span fgcolor=\'{BLUE}\'>●</span> Checkpoint''' , font_size=18 , )
blue_text.next_to(__a , DOWN * 2.4 , aligned_edge=key_text.get_left() )
_UpperCamelCase : Optional[int] = MarkupText(
F'''Next, a <i><span fgcolor="{BLUE}">second</span></i> model is loaded into memory,\nwith the weights of a <span fgcolor="{BLUE}">single shard</span>.''' , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(__a ) , Write(__a ) )
self.play(Write(__a , run_time=1 ) , Create(__a , run_time=1 ) )
_UpperCamelCase : Optional[int] = []
_UpperCamelCase : List[str] = []
for i, rect in enumerate(__a ):
_UpperCamelCase : int = fill.copy().set_fill(__a , opacity=0.7 )
target.move_to(__a )
first_animations.append(GrowFromCenter(__a , run_time=1 ) )
_UpperCamelCase : Tuple = target.copy()
cpu_target.generate_target()
if i < 5:
cpu_target.target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.target.move_to(cpu_right_col_base[i - 5] )
second_animations.append(MoveToTarget(__a , run_time=1.5 ) )
self.play(*__a )
self.play(*__a )
self.wait()
| 51
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCamelCase__ = {
"configuration_mask2former": [
"MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"Mask2FormerConfig",
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["Mask2FormerImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"Mask2FormerForUniversalSegmentation",
"Mask2FormerModel",
"Mask2FormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_maskaformer import MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskaFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_maskaformer import MaskaFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskaformer import (
MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskaFormerForUniversalSegmentation,
MaskaFormerModel,
MaskaFormerPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 51
| 1
|
"""simple docstring"""
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import MaMaaaTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from transformers.utils import is_sentencepiece_available
if is_sentencepiece_available():
from transformers.models.mam_aaa.tokenization_mam_aaa import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
if is_sentencepiece_available():
lowerCamelCase__ = get_tests_dir("fixtures/test_sentencepiece.model")
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
lowerCamelCase__ = 12_8022
lowerCamelCase__ = 12_8028
@require_sentencepiece
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Any = MaMaaaTokenizer
SCREAMING_SNAKE_CASE__ :Optional[Any] = False
SCREAMING_SNAKE_CASE__ :Tuple = False
SCREAMING_SNAKE_CASE__ :str = True
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> str:
super().setUp()
_UpperCamelCase : List[Any] = ["</s>", "<unk>", "▁This", "▁is", "▁a", "▁t", "est", "\u0120", "<pad>"]
_UpperCamelCase : Dict = dict(zip(__a , range(len(__a ) ) ) )
_UpperCamelCase : List[str] = Path(self.tmpdirname )
save_json(__a , save_dir / VOCAB_FILES_NAMES["vocab_file"] )
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(__a , save_dir / VOCAB_FILES_NAMES["spm_file"] )
_UpperCamelCase : Tuple = MaMaaaTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def __SCREAMING_SNAKE_CASE ( self : Dict , **__a : Dict ) -> Optional[int]:
return MaMaaaTokenizer.from_pretrained(self.tmpdirname , **__a )
def __SCREAMING_SNAKE_CASE ( self : str , __a : Union[str, Any] ) -> int:
return (
"This is a test",
"This is a test",
)
def __SCREAMING_SNAKE_CASE ( self : Any ) -> Union[str, Any]:
_UpperCamelCase : List[str] = "</s>"
_UpperCamelCase : List[Any] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__a ) , __a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__a ) , __a )
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> int:
_UpperCamelCase : Tuple = self.get_tokenizer()
_UpperCamelCase : str = list(tokenizer.get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "</s>" )
self.assertEqual(vocab_keys[1] , "<unk>" )
self.assertEqual(vocab_keys[-1] , "<s>" )
self.assertEqual(len(__a ) , tokenizer.vocab_size + len(tokenizer.get_added_vocab() ) )
@unittest.skip("Skip this test while all models are still to be uploaded." )
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[int]:
pass
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> str:
_UpperCamelCase : Optional[int] = self.get_tokenizer()
_UpperCamelCase : Optional[int] = tokenizer.tokenize("This is a test" )
self.assertListEqual(__a , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__a ) , [2, 3, 4, 5, 6] , )
_UpperCamelCase : Union[str, Any] = tokenizer.convert_ids_to_tokens([2, 3, 4, 5, 6] )
self.assertListEqual(__a , ["▁This", "▁is", "▁a", "▁t", "est"] )
_UpperCamelCase : Any = tokenizer.convert_tokens_to_string(__a )
self.assertEqual(__a , "This is a test" )
@slow
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Tuple:
# fmt: off
_UpperCamelCase : List[Any] = {"input_ids": [[12_8022, 11_0108, 397, 11, 3_8272, 2247, 12_4811, 285, 1_8105, 1586, 207, 7, 3_9534, 4428, 397, 1019, 1_8105, 1586, 207, 7, 4_1337, 1_6786, 241, 7, 2_0214, 17, 12_5690, 1_0398, 7, 4_4378, 5_8069, 6_8342, 7798, 7343, 11, 299, 3_3310, 4, 158, 3_7350, 9_4077, 4569, 299, 3_3310, 90, 4, 5_2840, 290, 4, 3_1270, 112, 299, 682, 4, 5_2840, 3_9953, 1_4079, 193, 5_2519, 9_0894, 1_7894, 12_0697, 11, 4_0445, 551, 17, 1019, 5_2519, 9_0894, 1_7756, 963, 11, 4_0445, 480, 17, 9792, 1120, 5173, 1393, 6240, 1_6786, 241, 12_0996, 28, 1245, 1393, 11_8240, 1_1123, 1019, 9_3612, 2691, 1_0618, 9_8058, 12_0409, 1928, 279, 4, 4_0683, 367, 178, 207, 1019, 103, 10_3121, 506, 6_5296, 5, 2], [12_8022, 2_1217, 367, 117, 12_5450, 128, 719, 7, 7308, 40, 9_3612, 1_2669, 1116, 1_6704, 71, 1_7785, 3699, 1_5592, 35, 144, 9584, 241, 1_1943, 713, 950, 799, 2247, 8_8427, 150, 149, 11_8813, 12_0706, 1019, 10_6906, 8_1518, 28, 1224, 2_2799, 397, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [12_8022, 1658, 12_3311, 5155, 5578, 4722, 279, 1_4947, 2366, 1120, 1197, 14, 1348, 9232, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__a , model_name="facebook/m2m100_418M" , revision="c168bae485c864188cf9aa0e4108b0b6934dc91e" , )
@require_torch
@require_sentencepiece
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :str = "facebook/m2m100_418M"
SCREAMING_SNAKE_CASE__ :Union[str, Any] = [
"In my opinion, there are two levels of response from the French government.",
"NSA Affair Emphasizes Complete Lack of Debate on Intelligence",
]
SCREAMING_SNAKE_CASE__ :Optional[int] = [
"Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.",
"L'affaire NSA souligne l'absence totale de débat sur le renseignement",
]
# fmt: off
SCREAMING_SNAKE_CASE__ :int = [EN_CODE, 593, 1_949, 115_781, 4, 71_586, 4_234, 60_633, 126_233, 432, 123_808, 15_592, 1_197, 117_132, 120_618, 5, 2]
@classmethod
def __SCREAMING_SNAKE_CASE ( cls : List[Any] ) -> str:
_UpperCamelCase : MaMaaaTokenizer = MaMaaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang="en" , tgt_lang="fr" )
_UpperCamelCase : Union[str, Any] = 1
return cls
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[Any]:
self.assertEqual(self.tokenizer.get_lang_id("ar" ) , 12_8006 )
self.assertEqual(self.tokenizer.get_lang_id("en" ) , 12_8022 )
self.assertEqual(self.tokenizer.get_lang_id("ro" ) , 12_8076 )
self.assertEqual(self.tokenizer.get_lang_id("mr" ) , 12_8063 )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Union[str, Any]:
_UpperCamelCase : Union[str, Any] = self.tokenizer.get_vocab()
self.assertEqual(len(__a ) , self.tokenizer.vocab_size )
self.assertEqual(vocab["<unk>"] , 3 )
self.assertIn(self.tokenizer.get_lang_token("en" ) , __a )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> str:
_UpperCamelCase : Union[str, Any] = "en"
_UpperCamelCase : Union[str, Any] = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , __a )
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[Any]:
self.assertIn(__a , self.tokenizer.all_special_ids )
# fmt: off
_UpperCamelCase : Union[str, Any] = [FR_CODE, 5364, 82, 8642, 4, 294, 47, 8, 1_4028, 136, 3286, 9706, 6, 9_0797, 6, 14_4012, 162, 8_8128, 3_0061, 5, 2]
# fmt: on
_UpperCamelCase : Dict = self.tokenizer.decode(__a , skip_special_tokens=__a )
_UpperCamelCase : Dict = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=__a )
self.assertEqual(__a , __a )
self.assertNotIn(self.tokenizer.eos_token , __a )
def __SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[Any]:
_UpperCamelCase : Tuple = tempfile.mkdtemp()
_UpperCamelCase : List[str] = self.tokenizer.lang_token_to_id
self.tokenizer.save_pretrained(__a )
_UpperCamelCase : int = MaMaaaTokenizer.from_pretrained(__a )
self.assertDictEqual(new_tok.lang_token_to_id , __a )
@require_torch
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Any:
_UpperCamelCase : Optional[Any] = "en"
_UpperCamelCase : int = "fr"
_UpperCamelCase : str = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=__a , return_tensors="pt" )
_UpperCamelCase : Optional[int] = shift_tokens_right(
batch["labels"] , self.tokenizer.pad_token_id , self.tokenizer.eos_token_id )
for k in batch:
_UpperCamelCase : Any = batch[k].tolist()
# batch = {k: v.tolist() for k,v in batch.items()}
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
# batch.decoder_inputs_ids[0][0] ==
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == FR_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2] == [2, FR_CODE]
@require_torch
def __SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]:
_UpperCamelCase : Any = "mr"
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("mr" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
_UpperCamelCase : Optional[int] = "zh"
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("zh" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
@require_torch
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[str]:
_UpperCamelCase : Any = "mr"
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("mr" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
_UpperCamelCase : List[str] = "zh"
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("zh" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
@require_torch
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[Any]:
_UpperCamelCase : Union[str, Any] = self.tokenizer._build_translation_inputs("A test" , return_tensors="pt" , src_lang="en" , tgt_lang="ar" )
self.assertEqual(
nested_simplify(__a ) , {
# en_XX, A, test, EOS
"input_ids": [[12_8022, 58, 4183, 2]],
"attention_mask": [[1, 1, 1, 1]],
# ar_AR
"forced_bos_token_id": 12_8006,
} , )
| 51
|
"""simple docstring"""
lowerCamelCase__ = "\n# Installazione di Transformers\n! pip install transformers datasets\n# Per installare dalla fonte invece dell'ultima versione rilasciata, commenta il comando sopra e\n# rimuovi la modalità commento al comando seguente.\n# ! pip install git+https://github.com/huggingface/transformers.git\n"
lowerCamelCase__ = [{"type": "code", "content": INSTALL_CONTENT}]
lowerCamelCase__ = {
"{processor_class}": "FakeProcessorClass",
"{model_class}": "FakeModelClass",
"{object_class}": "FakeObjectClass",
}
| 51
| 1
|
"""simple docstring"""
def lowercase__ ( lowercase_ ) -> Dict:
"""simple docstring"""
if not head:
return True
# split the list to two parts
_UpperCamelCase, _UpperCamelCase : Tuple = head.next, head
while fast and fast.next:
_UpperCamelCase : List[str] = fast.next.next
_UpperCamelCase : Tuple = slow.next
_UpperCamelCase : Tuple = slow.next
_UpperCamelCase : Any = None # Don't forget here! But forget still works!
# reverse the second part
_UpperCamelCase : List[Any] = None
while second:
_UpperCamelCase : Optional[int] = second.next
_UpperCamelCase : List[str] = node
_UpperCamelCase : List[Any] = second
_UpperCamelCase : List[Any] = nxt
# compare two parts
# second part has the same or one less node
while node:
if node.val != head.val:
return False
_UpperCamelCase : str = node.next
_UpperCamelCase : Tuple = head.next
return True
def lowercase__ ( lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
if not head or not head.next:
return True
# 1. Get the midpoint (slow)
_UpperCamelCase : Dict = head
while fast and fast.next:
_UpperCamelCase, _UpperCamelCase : Union[str, Any] = fast.next.next, slow.next
# 2. Push the second half into the stack
_UpperCamelCase : str = [slow.val]
while slow.next:
_UpperCamelCase : Dict = slow.next
stack.append(slow.val )
# 3. Comparison
while stack:
if stack.pop() != cur.val:
return False
_UpperCamelCase : Any = cur.next
return True
def lowercase__ ( lowercase_ ) -> Optional[Any]:
"""simple docstring"""
if not head or not head.next:
return True
_UpperCamelCase : Union[str, Any] = {}
_UpperCamelCase : List[Any] = 0
while head:
if head.val in d:
d[head.val].append(lowercase_ )
else:
_UpperCamelCase : List[str] = [pos]
_UpperCamelCase : Optional[int] = head.next
pos += 1
_UpperCamelCase : Tuple = pos - 1
_UpperCamelCase : List[str] = 0
for v in d.values():
if len(lowercase_ ) % 2 != 0:
middle += 1
else:
_UpperCamelCase : Dict = 0
for i in range(0 ,len(lowercase_ ) ):
if v[i] + v[len(lowercase_ ) - 1 - step] != checksum:
return False
step += 1
if middle > 1:
return False
return True
| 51
|
"""simple docstring"""
import os
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers.models.realm.configuration_realm import RealmConfig
from transformers.models.realm.retrieval_realm import _REALM_BLOCK_RECORDS_FILENAME, RealmRetriever
from transformers.models.realm.tokenization_realm import VOCAB_FILES_NAMES, RealmTokenizer
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[Any]:
_UpperCamelCase : Tuple = tempfile.mkdtemp()
_UpperCamelCase : str = 5
# Realm tok
_UpperCamelCase : Tuple = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"test",
"question",
"this",
"is",
"the",
"first",
"second",
"third",
"fourth",
"fifth",
"record",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
_UpperCamelCase : Optional[int] = os.path.join(self.tmpdirname , "realm_tokenizer" )
os.makedirs(__a , exist_ok=__a )
_UpperCamelCase : Optional[Any] = os.path.join(__a , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
_UpperCamelCase : Optional[int] = os.path.join(self.tmpdirname , "realm_block_records" )
os.makedirs(__a , exist_ok=__a )
def __SCREAMING_SNAKE_CASE ( self : str ) -> RealmTokenizer:
return RealmTokenizer.from_pretrained(os.path.join(self.tmpdirname , "realm_tokenizer" ) )
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Dict:
shutil.rmtree(self.tmpdirname )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[str]:
_UpperCamelCase : Optional[Any] = RealmConfig(num_block_records=self.num_block_records )
return config
def __SCREAMING_SNAKE_CASE ( self : int ) -> int:
_UpperCamelCase : Any = Dataset.from_dict(
{
"id": ["0", "1"],
"question": ["foo", "bar"],
"answers": [["Foo", "Bar"], ["Bar"]],
} )
return dataset
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> str:
_UpperCamelCase : int = np.array(
[
b"This is the first record",
b"This is the second record",
b"This is the third record",
b"This is the fourth record",
b"This is the fifth record",
b"This is a longer longer longer record",
] , dtype=__a , )
return block_records
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[Any]:
_UpperCamelCase : List[str] = RealmRetriever(
block_records=self.get_dummy_block_records() , tokenizer=self.get_tokenizer() , )
return retriever
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Tuple:
_UpperCamelCase : Tuple = self.get_config()
_UpperCamelCase : int = self.get_dummy_retriever()
_UpperCamelCase : Tuple = retriever.tokenizer
_UpperCamelCase : List[str] = np.array([0, 3] , dtype="long" )
_UpperCamelCase : Union[str, Any] = tokenizer(["Test question"] ).input_ids
_UpperCamelCase : List[str] = tokenizer(
["the fourth"] , add_special_tokens=__a , return_token_type_ids=__a , return_attention_mask=__a , ).input_ids
_UpperCamelCase : str = config.reader_seq_len
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase : List[Any] = retriever(
__a , __a , answer_ids=__a , max_length=__a , return_tensors="np" )
self.assertEqual(len(__a ) , 2 )
self.assertEqual(len(__a ) , 2 )
self.assertEqual(len(__a ) , 2 )
self.assertEqual(concat_inputs.input_ids.shape , (2, 10) )
self.assertEqual(concat_inputs.attention_mask.shape , (2, 10) )
self.assertEqual(concat_inputs.token_type_ids.shape , (2, 10) )
self.assertEqual(concat_inputs.special_tokens_mask.shape , (2, 10) )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[0] ) , ["[CLS]", "test", "question", "[SEP]", "this", "is", "the", "first", "record", "[SEP]"] , )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[1] ) , ["[CLS]", "test", "question", "[SEP]", "this", "is", "the", "fourth", "record", "[SEP]"] , )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[Any]:
_UpperCamelCase : Any = self.get_config()
_UpperCamelCase : Dict = self.get_dummy_retriever()
_UpperCamelCase : Dict = retriever.tokenizer
_UpperCamelCase : List[Any] = np.array([0, 3, 5] , dtype="long" )
_UpperCamelCase : Optional[int] = tokenizer(["Test question"] ).input_ids
_UpperCamelCase : str = tokenizer(
["the fourth", "longer longer"] , add_special_tokens=__a , return_token_type_ids=__a , return_attention_mask=__a , ).input_ids
_UpperCamelCase : Union[str, Any] = config.reader_seq_len
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase : Optional[Any] = retriever(
__a , __a , answer_ids=__a , max_length=__a , return_tensors="np" )
self.assertEqual([False, True, True] , __a )
self.assertEqual([[-1, -1, -1], [6, -1, -1], [6, 7, 8]] , __a )
self.assertEqual([[-1, -1, -1], [7, -1, -1], [7, 8, 9]] , __a )
def __SCREAMING_SNAKE_CASE ( self : int ) -> List[str]:
_UpperCamelCase : List[Any] = self.get_dummy_retriever()
retriever.save_pretrained(os.path.join(self.tmpdirname , "realm_block_records" ) )
# Test local path
_UpperCamelCase : int = retriever.from_pretrained(os.path.join(self.tmpdirname , "realm_block_records" ) )
self.assertEqual(retriever.block_records[0] , b"This is the first record" )
# Test mocked remote path
with patch("transformers.models.realm.retrieval_realm.hf_hub_download" ) as mock_hf_hub_download:
_UpperCamelCase : List[Any] = os.path.join(
os.path.join(self.tmpdirname , "realm_block_records" ) , _REALM_BLOCK_RECORDS_FILENAME )
_UpperCamelCase : int = RealmRetriever.from_pretrained("google/realm-cc-news-pretrained-openqa" )
self.assertEqual(retriever.block_records[0] , b"This is the first record" )
| 51
| 1
|
"""simple docstring"""
from __future__ import annotations
from typing import TypedDict
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :str
SCREAMING_SNAKE_CASE__ :int
def lowercase__ ( lowercase_ ) -> list[str]:
"""simple docstring"""
if not isinstance(lowercase_ ,lowercase_ ):
raise TypeError("The parameter s type must be str." )
return [s[i:] + s[:i] for i in range(len(lowercase_ ) )]
def lowercase__ ( lowercase_ ) -> BWTTransformDict:
"""simple docstring"""
if not isinstance(lowercase_ ,lowercase_ ):
raise TypeError("The parameter s type must be str." )
if not s:
raise ValueError("The parameter s must not be empty." )
_UpperCamelCase : str = all_rotations(lowercase_ )
rotations.sort() # sort the list of rotations in alphabetically order
# make a string composed of the last char of each rotation
_UpperCamelCase : BWTTransformDict = {
"bwt_string": "".join([word[-1] for word in rotations] ),
"idx_original_string": rotations.index(lowercase_ ),
}
return response
def lowercase__ ( lowercase_ ,lowercase_ ) -> str:
"""simple docstring"""
if not isinstance(lowercase_ ,lowercase_ ):
raise TypeError("The parameter bwt_string type must be str." )
if not bwt_string:
raise ValueError("The parameter bwt_string must not be empty." )
try:
_UpperCamelCase : str = int(lowercase_ )
except ValueError:
raise TypeError(
"The parameter idx_original_string type must be int or passive"
" of cast to int." )
if idx_original_string < 0:
raise ValueError("The parameter idx_original_string must not be lower than 0." )
if idx_original_string >= len(lowercase_ ):
raise ValueError(
"The parameter idx_original_string must be lower than" " len(bwt_string)." )
_UpperCamelCase : Dict = [""] * len(lowercase_ )
for _ in range(len(lowercase_ ) ):
for i in range(len(lowercase_ ) ):
_UpperCamelCase : str = bwt_string[i] + ordered_rotations[i]
ordered_rotations.sort()
return ordered_rotations[idx_original_string]
if __name__ == "__main__":
lowerCamelCase__ = "Provide a string that I will generate its BWT transform: "
lowerCamelCase__ = input(entry_msg).strip()
lowerCamelCase__ = bwt_transform(s)
print(
f"""Burrows Wheeler transform for string '{s}' results """
f"""in '{result["bwt_string"]}'"""
)
lowerCamelCase__ = reverse_bwt(result["bwt_string"], result["idx_original_string"])
print(
f"""Reversing Burrows Wheeler transform for entry '{result["bwt_string"]}' """
f"""we get original string '{original_string}'"""
)
| 51
|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import LEDConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFLEDForConditionalGeneration, TFLEDModel
@require_tf
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Any = LEDConfig
SCREAMING_SNAKE_CASE__ :str = {}
SCREAMING_SNAKE_CASE__ :List[str] = "gelu"
def __init__( self : List[Any] , __a : Union[str, Any] , __a : List[Any]=13 , __a : int=7 , __a : str=True , __a : Any=False , __a : str=99 , __a : str=32 , __a : Union[str, Any]=2 , __a : Optional[Any]=4 , __a : List[Any]=37 , __a : List[Any]=0.1 , __a : Tuple=0.1 , __a : Dict=20 , __a : str=2 , __a : Dict=1 , __a : Any=0 , __a : List[Any]=4 , ) -> List[Any]:
_UpperCamelCase : Optional[Any] = parent
_UpperCamelCase : List[str] = batch_size
_UpperCamelCase : str = seq_length
_UpperCamelCase : str = is_training
_UpperCamelCase : Any = use_labels
_UpperCamelCase : Any = vocab_size
_UpperCamelCase : List[str] = hidden_size
_UpperCamelCase : Optional[Any] = num_hidden_layers
_UpperCamelCase : Dict = num_attention_heads
_UpperCamelCase : Optional[Any] = intermediate_size
_UpperCamelCase : int = hidden_dropout_prob
_UpperCamelCase : Dict = attention_probs_dropout_prob
_UpperCamelCase : str = max_position_embeddings
_UpperCamelCase : int = eos_token_id
_UpperCamelCase : Dict = pad_token_id
_UpperCamelCase : Optional[Any] = bos_token_id
_UpperCamelCase : str = attention_window
# `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size
# [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention
# returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1]
# because its local attention only attends to `self.attention_window` and one before and one after
_UpperCamelCase : List[str] = self.attention_window + 2
# because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for
# the `test_attention_outputs` and `test_hidden_states_output` tests
_UpperCamelCase : int = (
self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window
)
def __SCREAMING_SNAKE_CASE ( self : int ) -> str:
_UpperCamelCase : Tuple = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
_UpperCamelCase : Optional[Any] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
_UpperCamelCase : Tuple = tf.concat([input_ids, eos_tensor] , axis=1 )
_UpperCamelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCamelCase : List[Any] = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , attention_window=self.attention_window , **self.config_updates , )
_UpperCamelCase : Dict = prepare_led_inputs_dict(__a , __a , __a )
_UpperCamelCase : Union[str, Any] = tf.concat(
[tf.zeros_like(__a )[:, :-1], tf.ones_like(__a )[:, -1:]] , axis=-1 , )
_UpperCamelCase : Union[str, Any] = global_attention_mask
return config, inputs_dict
def __SCREAMING_SNAKE_CASE ( self : List[str] , __a : List[Any] , __a : int ) -> Tuple:
_UpperCamelCase : Tuple = TFLEDModel(config=__a ).get_decoder()
_UpperCamelCase : Tuple = inputs_dict["input_ids"]
_UpperCamelCase : int = input_ids[:1, :]
_UpperCamelCase : List[str] = inputs_dict["attention_mask"][:1, :]
_UpperCamelCase : List[Any] = 1
# first forward pass
_UpperCamelCase : Any = model(__a , attention_mask=__a , use_cache=__a )
_UpperCamelCase, _UpperCamelCase : Union[str, Any] = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_UpperCamelCase : Any = ids_tensor((self.batch_size, 3) , config.vocab_size )
_UpperCamelCase : Any = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
_UpperCamelCase : List[str] = tf.concat([input_ids, next_tokens] , axis=-1 )
_UpperCamelCase : Union[str, Any] = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
_UpperCamelCase : Tuple = model(__a , attention_mask=__a )[0]
_UpperCamelCase : int = model(__a , attention_mask=__a , past_key_values=__a )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
_UpperCamelCase : List[Any] = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
_UpperCamelCase : List[str] = output_from_no_past[:, -3:, random_slice_idx]
_UpperCamelCase : Optional[int] = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(__a , __a , rtol=1e-3 )
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_=None ,lowercase_=None ,lowercase_=None ,lowercase_=None ,) -> Dict:
"""simple docstring"""
if attention_mask is None:
_UpperCamelCase : str = tf.cast(tf.math.not_equal(lowercase_ ,config.pad_token_id ) ,tf.inta )
if decoder_attention_mask is None:
_UpperCamelCase : str = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape ,dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] ,config.pad_token_id ) ,tf.inta ),
] ,axis=-1 ,)
if head_mask is None:
_UpperCamelCase : List[str] = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
_UpperCamelCase : List[str] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"attention_mask": attention_mask,
"decoder_input_ids": decoder_input_ids,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
}
@require_tf
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Any = (TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else ()
SCREAMING_SNAKE_CASE__ :List[str] = (TFLEDForConditionalGeneration,) if is_tf_available() else ()
SCREAMING_SNAKE_CASE__ :List[str] = (
{
"conversational": TFLEDForConditionalGeneration,
"feature-extraction": TFLEDModel,
"summarization": TFLEDForConditionalGeneration,
"text2text-generation": TFLEDForConditionalGeneration,
"translation": TFLEDForConditionalGeneration,
}
if is_tf_available()
else {}
)
SCREAMING_SNAKE_CASE__ :Tuple = True
SCREAMING_SNAKE_CASE__ :str = False
SCREAMING_SNAKE_CASE__ :Optional[Any] = False
SCREAMING_SNAKE_CASE__ :int = False
def __SCREAMING_SNAKE_CASE ( self : str ) -> List[Any]:
_UpperCamelCase : int = TFLEDModelTester(self )
_UpperCamelCase : Any = ConfigTester(self , config_class=__a )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[Any]:
self.config_tester.run_common_tests()
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> int:
_UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*__a )
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[str]:
_UpperCamelCase, _UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCamelCase : Optional[int] = tf.zeros_like(inputs_dict["attention_mask"] )
_UpperCamelCase : Union[str, Any] = 2
_UpperCamelCase : str = tf.where(
tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices , 1 , inputs_dict["global_attention_mask"] , )
_UpperCamelCase : Dict = True
_UpperCamelCase : str = self.model_tester.seq_length
_UpperCamelCase : Union[str, Any] = self.model_tester.encoder_seq_length
def check_decoder_attentions_output(__a : Optional[int] ):
_UpperCamelCase : Optional[int] = outputs.decoder_attentions
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
def check_encoder_attentions_output(__a : Optional[Any] ):
_UpperCamelCase : Union[str, Any] = [t.numpy() for t in outputs.encoder_attentions]
_UpperCamelCase : List[Any] = [t.numpy() for t in outputs.encoder_global_attentions]
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
self.assertListEqual(
list(global_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices] , )
for model_class in self.all_model_classes:
_UpperCamelCase : Dict = True
_UpperCamelCase : Optional[Any] = False
_UpperCamelCase : int = False
_UpperCamelCase : Optional[int] = model_class(__a )
_UpperCamelCase : int = model(self._prepare_for_class(__a , __a ) )
_UpperCamelCase : Any = len(__a )
self.assertEqual(config.output_hidden_states , __a )
check_encoder_attentions_output(__a )
if self.is_encoder_decoder:
_UpperCamelCase : Optional[Any] = model_class(__a )
_UpperCamelCase : List[Any] = model(self._prepare_for_class(__a , __a ) )
self.assertEqual(config.output_hidden_states , __a )
check_decoder_attentions_output(__a )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
_UpperCamelCase : int = True
_UpperCamelCase : Tuple = model_class(__a )
_UpperCamelCase : str = model(self._prepare_for_class(__a , __a ) )
self.assertEqual(config.output_hidden_states , __a )
check_encoder_attentions_output(__a )
# Check attention is always last and order is fine
_UpperCamelCase : Any = True
_UpperCamelCase : List[str] = True
_UpperCamelCase : Tuple = model_class(__a )
_UpperCamelCase : int = model(self._prepare_for_class(__a , __a ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(__a ) )
self.assertEqual(model.config.output_hidden_states , __a )
check_encoder_attentions_output(__a )
@unittest.skip("LED keeps using potentially symbolic tensors in conditionals and breaks tracing." )
def __SCREAMING_SNAKE_CASE ( self : str ) -> Dict:
pass
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Tuple:
# TODO: Head-masking not yet implement
pass
def lowercase__ ( lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
return tf.constant(lowercase_ ,dtype=tf.intaa )
lowerCamelCase__ = 1E-4
@slow
@require_tf
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> Union[str, Any]:
_UpperCamelCase : Any = TFLEDForConditionalGeneration.from_pretrained("allenai/led-base-16384" ).led
# change to intended input here
_UpperCamelCase : int = _long_tensor([512 * [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69]] )
_UpperCamelCase : Tuple = _long_tensor([128 * [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69]] )
_UpperCamelCase : Optional[Any] = prepare_led_inputs_dict(model.config , __a , __a )
_UpperCamelCase : Optional[int] = model(**__a )[0]
_UpperCamelCase : Optional[int] = (1, 1024, 768)
self.assertEqual(output.shape , __a )
# change to expected output here
_UpperCamelCase : Tuple = tf.convert_to_tensor(
[[2.30_50, 2.82_79, 0.65_31], [-1.84_57, -0.14_55, -3.56_61], [-1.01_86, 0.45_86, -2.20_43]] , )
tf.debugging.assert_near(output[:, :3, :3] , __a , atol=1e-3 )
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> str:
_UpperCamelCase : Optional[int] = TFLEDForConditionalGeneration.from_pretrained("allenai/led-base-16384" )
# change to intended input here
_UpperCamelCase : Optional[int] = _long_tensor([512 * [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69]] )
_UpperCamelCase : List[str] = _long_tensor([128 * [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69]] )
_UpperCamelCase : Optional[Any] = prepare_led_inputs_dict(model.config , __a , __a )
_UpperCamelCase : Union[str, Any] = model(**__a )[0]
_UpperCamelCase : int = (1, 1024, model.config.vocab_size)
self.assertEqual(output.shape , __a )
# change to expected output here
_UpperCamelCase : Optional[int] = tf.convert_to_tensor(
[[33.65_07, 6.45_72, 16.80_89], [5.87_39, -2.42_38, 11.29_02], [-3.21_39, -4.31_49, 4.27_83]] , )
tf.debugging.assert_near(output[:, :3, :3] , __a , atol=1e-3 , rtol=1e-3 )
| 51
| 1
|
"""simple docstring"""
from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels
from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor
from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels
| 51
|
"""simple docstring"""
import json
import os
import unittest
from transformers.models.roc_bert.tokenization_roc_bert import (
VOCAB_FILES_NAMES,
RoCBertBasicTokenizer,
RoCBertTokenizer,
RoCBertWordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Union[str, Any] = RoCBertTokenizer
SCREAMING_SNAKE_CASE__ :Dict = None
SCREAMING_SNAKE_CASE__ :List[Any] = False
SCREAMING_SNAKE_CASE__ :Union[str, Any] = True
SCREAMING_SNAKE_CASE__ :Union[str, Any] = filter_non_english
def __SCREAMING_SNAKE_CASE ( self : str ) -> Dict:
super().setUp()
_UpperCamelCase : Any = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "你", "好", "是", "谁", "a", "b", "c", "d"]
_UpperCamelCase : List[str] = {}
_UpperCamelCase : Tuple = {}
for i, value in enumerate(__a ):
_UpperCamelCase : List[str] = i
_UpperCamelCase : Optional[Any] = i
_UpperCamelCase : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
_UpperCamelCase : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["word_shape_file"] )
_UpperCamelCase : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["word_pronunciation_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
with open(self.word_shape_file , "w" , encoding="utf-8" ) as word_shape_writer:
json.dump(__a , __a , ensure_ascii=__a )
with open(self.word_pronunciation_file , "w" , encoding="utf-8" ) as word_pronunciation_writer:
json.dump(__a , __a , ensure_ascii=__a )
def __SCREAMING_SNAKE_CASE ( self : int ) -> Optional[int]:
_UpperCamelCase : Tuple = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
_UpperCamelCase : int = tokenizer.tokenize("你好[SEP]你是谁" )
self.assertListEqual(__a , ["你", "好", "[SEP]", "你", "是", "谁"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__a ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_shape_ids(__a ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_pronunciation_ids(__a ) , [5, 6, 2, 5, 7, 8] )
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> Dict:
_UpperCamelCase : Dict = RoCBertBasicTokenizer()
self.assertListEqual(tokenizer.tokenize("ah\u535A\u63A8zz" ) , ["ah", "\u535A", "\u63A8", "zz"] )
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Any:
_UpperCamelCase : List[Any] = RoCBertBasicTokenizer(do_lower_case=__a )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["hello", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple:
_UpperCamelCase : Optional[Any] = RoCBertBasicTokenizer(do_lower_case=__a , strip_accents=__a )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hällo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["h\u00E9llo"] )
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> str:
_UpperCamelCase : Dict = RoCBertBasicTokenizer(do_lower_case=__a , strip_accents=__a )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Union[str, Any]:
_UpperCamelCase : List[str] = RoCBertBasicTokenizer(do_lower_case=__a )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def __SCREAMING_SNAKE_CASE ( self : str ) -> Optional[int]:
_UpperCamelCase : Tuple = RoCBertBasicTokenizer(do_lower_case=__a )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["HeLLo", "!", "how", "Are", "yoU", "?"] )
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[str]:
_UpperCamelCase : Union[str, Any] = RoCBertBasicTokenizer(do_lower_case=__a , strip_accents=__a )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HäLLo", "!", "how", "Are", "yoU", "?"] )
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[Any]:
_UpperCamelCase : Tuple = RoCBertBasicTokenizer(do_lower_case=__a , strip_accents=__a )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HaLLo", "!", "how", "Are", "yoU", "?"] )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> int:
_UpperCamelCase : int = RoCBertBasicTokenizer(do_lower_case=__a , never_split=["[UNK]"] )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? [UNK]" ) , ["HeLLo", "!", "how", "Are", "yoU", "?", "[UNK]"] )
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[Any]:
_UpperCamelCase : Optional[int] = ["[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", "##ing"]
_UpperCamelCase : Any = {}
for i, token in enumerate(__a ):
_UpperCamelCase : str = i
_UpperCamelCase : Optional[int] = RoCBertWordpieceTokenizer(vocab=__a , unk_token="[UNK]" )
self.assertListEqual(tokenizer.tokenize("" ) , [] )
self.assertListEqual(tokenizer.tokenize("unwanted running" ) , ["un", "##want", "##ed", "runn", "##ing"] )
self.assertListEqual(tokenizer.tokenize("unwantedX running" ) , ["[UNK]", "runn", "##ing"] )
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[Any]:
self.assertTrue(_is_whitespace(" " ) )
self.assertTrue(_is_whitespace("\t" ) )
self.assertTrue(_is_whitespace("\r" ) )
self.assertTrue(_is_whitespace("\n" ) )
self.assertTrue(_is_whitespace("\u00A0" ) )
self.assertFalse(_is_whitespace("A" ) )
self.assertFalse(_is_whitespace("-" ) )
def __SCREAMING_SNAKE_CASE ( self : Any ) -> Dict:
self.assertTrue(_is_control("\u0005" ) )
self.assertFalse(_is_control("A" ) )
self.assertFalse(_is_control(" " ) )
self.assertFalse(_is_control("\t" ) )
self.assertFalse(_is_control("\r" ) )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[int]:
self.assertTrue(_is_punctuation("-" ) )
self.assertTrue(_is_punctuation("$" ) )
self.assertTrue(_is_punctuation("`" ) )
self.assertTrue(_is_punctuation("." ) )
self.assertFalse(_is_punctuation("A" ) )
self.assertFalse(_is_punctuation(" " ) )
def __SCREAMING_SNAKE_CASE ( self : int ) -> List[Any]:
_UpperCamelCase : Optional[Any] = self.get_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(__a ) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]] )
if self.test_rust_tokenizer:
_UpperCamelCase : Tuple = self.get_rust_tokenizer()
self.assertListEqual(
[rust_tokenizer.tokenize(__a ) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]] )
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[Any]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
_UpperCamelCase : int = self.rust_tokenizer_class.from_pretrained(__a , **__a )
_UpperCamelCase : Union[str, Any] = F'''A, naïve {tokenizer_r.mask_token} AllenNLP sentence.'''
_UpperCamelCase : Optional[Any] = tokenizer_r.encode_plus(
__a , return_attention_mask=__a , return_token_type_ids=__a , return_offsets_mapping=__a , add_special_tokens=__a , )
_UpperCamelCase : List[Any] = tokenizer_r.do_lower_case if hasattr(__a , "do_lower_case" ) else False
_UpperCamelCase : Dict = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), "A"),
((1, 2), ","),
((3, 5), "na"),
((5, 6), "##ï"),
((6, 8), "##ve"),
((9, 15), tokenizer_r.mask_token),
((16, 21), "Allen"),
((21, 23), "##NL"),
((23, 24), "##P"),
((25, 33), "sentence"),
((33, 34), "."),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), "a"),
((1, 2), ","),
((3, 8), "naive"),
((9, 15), tokenizer_r.mask_token),
((16, 21), "allen"),
((21, 23), "##nl"),
((23, 24), "##p"),
((25, 33), "sentence"),
((33, 34), "."),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens["input_ids"] ) )
self.assertEqual([e[0] for e in expected_results] , tokens["offset_mapping"] )
def __SCREAMING_SNAKE_CASE ( self : int ) -> List[Any]:
_UpperCamelCase : Optional[Any] = ["的", "人", "有"]
_UpperCamelCase : int = "".join(__a )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
_UpperCamelCase : int = True
_UpperCamelCase : Any = self.tokenizer_class.from_pretrained(__a , **__a )
_UpperCamelCase : Optional[Any] = self.rust_tokenizer_class.from_pretrained(__a , **__a )
_UpperCamelCase : int = tokenizer_p.encode(__a , add_special_tokens=__a )
_UpperCamelCase : int = tokenizer_r.encode(__a , add_special_tokens=__a )
_UpperCamelCase : List[Any] = tokenizer_r.convert_ids_to_tokens(__a )
_UpperCamelCase : Union[str, Any] = tokenizer_p.convert_ids_to_tokens(__a )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(__a , __a )
self.assertListEqual(__a , __a )
_UpperCamelCase : Any = False
_UpperCamelCase : Dict = self.rust_tokenizer_class.from_pretrained(__a , **__a )
_UpperCamelCase : Any = self.tokenizer_class.from_pretrained(__a , **__a )
_UpperCamelCase : Any = tokenizer_r.encode(__a , add_special_tokens=__a )
_UpperCamelCase : Any = tokenizer_p.encode(__a , add_special_tokens=__a )
_UpperCamelCase : Union[str, Any] = tokenizer_r.convert_ids_to_tokens(__a )
_UpperCamelCase : Dict = tokenizer_p.convert_ids_to_tokens(__a )
# it is expected that only the first Chinese character is not preceded by "##".
_UpperCamelCase : Any = [
F'''##{token}''' if idx != 0 else token for idx, token in enumerate(__a )
]
self.assertListEqual(__a , __a )
self.assertListEqual(__a , __a )
@slow
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Any:
_UpperCamelCase : Dict = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
_UpperCamelCase : Optional[int] = tokenizer.encode("你好" , add_special_tokens=__a )
_UpperCamelCase : Dict = tokenizer.encode("你是谁" , add_special_tokens=__a )
_UpperCamelCase : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(__a )
_UpperCamelCase : Tuple = tokenizer.build_inputs_with_special_tokens(__a , __a )
assert encoded_sentence == [1] + text + [2]
assert encoded_pair == [1] + text + [2] + text_a + [2]
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> int:
_UpperCamelCase : Optional[Any] = self.get_tokenizers(do_lower_case=__a )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
_UpperCamelCase : int = "你好,你是谁"
_UpperCamelCase : Any = tokenizer.tokenize(__a )
_UpperCamelCase : Optional[Any] = tokenizer.convert_tokens_to_ids(__a )
_UpperCamelCase : List[str] = tokenizer.convert_tokens_to_shape_ids(__a )
_UpperCamelCase : Any = tokenizer.convert_tokens_to_pronunciation_ids(__a )
_UpperCamelCase : Optional[int] = tokenizer.prepare_for_model(
__a , __a , __a , add_special_tokens=__a )
_UpperCamelCase : Tuple = tokenizer.encode_plus(__a , add_special_tokens=__a )
self.assertEqual(__a , __a )
| 51
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase__ = {"configuration_xlnet": ["XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "XLNetConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["XLNetTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["XLNetTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"XLNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"XLNetForMultipleChoice",
"XLNetForQuestionAnswering",
"XLNetForQuestionAnsweringSimple",
"XLNetForSequenceClassification",
"XLNetForTokenClassification",
"XLNetLMHeadModel",
"XLNetModel",
"XLNetPreTrainedModel",
"load_tf_weights_in_xlnet",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFXLNetForMultipleChoice",
"TFXLNetForQuestionAnsweringSimple",
"TFXLNetForSequenceClassification",
"TFXLNetForTokenClassification",
"TFXLNetLMHeadModel",
"TFXLNetMainLayer",
"TFXLNetModel",
"TFXLNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet import XLNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet_fast import XLNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlnet import (
XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
XLNetForMultipleChoice,
XLNetForQuestionAnswering,
XLNetForQuestionAnsweringSimple,
XLNetForSequenceClassification,
XLNetForTokenClassification,
XLNetLMHeadModel,
XLNetModel,
XLNetPreTrainedModel,
load_tf_weights_in_xlnet,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlnet import (
TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLNetForMultipleChoice,
TFXLNetForQuestionAnsweringSimple,
TFXLNetForSequenceClassification,
TFXLNetForTokenClassification,
TFXLNetLMHeadModel,
TFXLNetMainLayer,
TFXLNetModel,
TFXLNetPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 51
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"hustvl/yolos-small": "https://huggingface.co/hustvl/yolos-small/resolve/main/config.json",
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Tuple = "yolos"
def __init__( self : Dict , __a : Optional[Any]=768 , __a : List[Any]=12 , __a : Any=12 , __a : List[Any]=3072 , __a : Optional[int]="gelu" , __a : Dict=0.0 , __a : Optional[Any]=0.0 , __a : Any=0.02 , __a : Optional[int]=1e-1_2 , __a : List[Any]=[512, 864] , __a : List[str]=16 , __a : str=3 , __a : Optional[Any]=True , __a : Optional[Any]=100 , __a : List[str]=True , __a : Any=False , __a : List[str]=1 , __a : str=5 , __a : Optional[Any]=2 , __a : Tuple=5 , __a : Any=2 , __a : Union[str, Any]=0.1 , **__a : List[str] , ) -> List[str]:
super().__init__(**__a )
_UpperCamelCase : Dict = hidden_size
_UpperCamelCase : Any = num_hidden_layers
_UpperCamelCase : str = num_attention_heads
_UpperCamelCase : Dict = intermediate_size
_UpperCamelCase : List[str] = hidden_act
_UpperCamelCase : List[str] = hidden_dropout_prob
_UpperCamelCase : str = attention_probs_dropout_prob
_UpperCamelCase : Tuple = initializer_range
_UpperCamelCase : List[str] = layer_norm_eps
_UpperCamelCase : Tuple = image_size
_UpperCamelCase : Tuple = patch_size
_UpperCamelCase : Dict = num_channels
_UpperCamelCase : Any = qkv_bias
_UpperCamelCase : str = num_detection_tokens
_UpperCamelCase : str = use_mid_position_embeddings
_UpperCamelCase : List[str] = auxiliary_loss
# Hungarian matcher
_UpperCamelCase : List[Any] = class_cost
_UpperCamelCase : int = bbox_cost
_UpperCamelCase : Optional[int] = giou_cost
# Loss coefficients
_UpperCamelCase : List[Any] = bbox_loss_coefficient
_UpperCamelCase : str = giou_loss_coefficient
_UpperCamelCase : Dict = eos_coefficient
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :List[str] = version.parse("1.11" )
@property
def __SCREAMING_SNAKE_CASE ( self : str ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> float:
return 1e-4
@property
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> int:
return 12
| 51
| 1
|
"""simple docstring"""
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
from .record_evaluation import evaluate as evaluate_record
lowerCamelCase__ = "\\n@article{wang2019superglue,\n title={SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems},\n author={Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R},\n journal={arXiv preprint arXiv:1905.00537},\n year={2019}\n}\n"
lowerCamelCase__ = "\\nSuperGLUE (https://super.gluebenchmark.com/) is a new benchmark styled after\nGLUE with a new set of more difficult language understanding tasks, improved\nresources, and a new public leaderboard.\n"
lowerCamelCase__ = "\nCompute SuperGLUE evaluation metric associated to each SuperGLUE dataset.\nArgs:\n predictions: list of predictions to score. Depending on the SuperGlUE subset:\n - for 'record': list of question-answer dictionaries with the following keys:\n - 'idx': index of the question as specified by the dataset\n - 'prediction_text': the predicted answer text\n - for 'multirc': list of question-answer dictionaries with the following keys:\n - 'idx': index of the question-answer pair as specified by the dataset\n - 'prediction': the predicted answer label\n - otherwise: list of predicted labels\n references: list of reference labels. Depending on the SuperGLUE subset:\n - for 'record': list of question-answers dictionaries with the following keys:\n - 'idx': index of the question as specified by the dataset\n - 'answers': list of possible answers\n - otherwise: list of reference labels\nReturns: depending on the SuperGLUE subset:\n - for 'record':\n - 'exact_match': Exact match between answer and gold answer\n - 'f1': F1 score\n - for 'multirc':\n - 'exact_match': Exact match between answer and gold answer\n - 'f1_m': Per-question macro-F1 score\n - 'f1_a': Average F1 score over all answers\n - for 'axb':\n 'matthews_correlation': Matthew Correlation\n - for 'cb':\n - 'accuracy': Accuracy\n - 'f1': F1 score\n - for all others:\n - 'accuracy': Accuracy\nExamples:\n\n >>> super_glue_metric = datasets.load_metric('super_glue', 'copa') # any of [\"copa\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"boolq\", \"axg\"]\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'accuracy': 1.0}\n\n >>> super_glue_metric = datasets.load_metric('super_glue', 'cb')\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'accuracy': 1.0, 'f1': 1.0}\n\n >>> super_glue_metric = datasets.load_metric('super_glue', 'record')\n >>> predictions = [{'idx': {'passage': 0, 'query': 0}, 'prediction_text': 'answer'}]\n >>> references = [{'idx': {'passage': 0, 'query': 0}, 'answers': ['answer', 'another_answer']}]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'exact_match': 1.0, 'f1': 1.0}\n\n >>> super_glue_metric = datasets.load_metric('super_glue', 'multirc')\n >>> predictions = [{'idx': {'answer': 0, 'paragraph': 0, 'question': 0}, 'prediction': 0}, {'idx': {'answer': 1, 'paragraph': 2, 'question': 3}, 'prediction': 1}]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'exact_match': 1.0, 'f1_m': 1.0, 'f1_a': 1.0}\n\n >>> super_glue_metric = datasets.load_metric('super_glue', 'axb')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'matthews_correlation': 1.0}\n"
def lowercase__ ( lowercase_ ,lowercase_ ) -> Optional[Any]:
"""simple docstring"""
return float((preds == labels).mean() )
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_="binary" ) -> str:
"""simple docstring"""
_UpperCamelCase : Tuple = simple_accuracy(lowercase_ ,lowercase_ )
_UpperCamelCase : Union[str, Any] = float(fa_score(y_true=lowercase_ ,y_pred=lowercase_ ,average=lowercase_ ) )
return {
"accuracy": acc,
"f1": fa,
}
def lowercase__ ( lowercase_ ,lowercase_ ) -> Tuple:
"""simple docstring"""
_UpperCamelCase : Optional[Any] = {}
for id_pred, label in zip(lowercase_ ,lowercase_ ):
_UpperCamelCase : int = F'''{id_pred['idx']['paragraph']}-{id_pred['idx']['question']}'''
_UpperCamelCase : List[Any] = id_pred["prediction"]
if question_id in question_map:
question_map[question_id].append((pred, label) )
else:
_UpperCamelCase : int = [(pred, label)]
_UpperCamelCase, _UpperCamelCase : Optional[Any] = [], []
for question, preds_labels in question_map.items():
_UpperCamelCase, _UpperCamelCase : str = zip(*lowercase_ )
_UpperCamelCase : Optional[Any] = fa_score(y_true=lowercase_ ,y_pred=lowercase_ ,average="macro" )
fas.append(lowercase_ )
_UpperCamelCase : List[Any] = int(sum(pred == label for pred, label in preds_labels ) == len(lowercase_ ) )
ems.append(lowercase_ )
_UpperCamelCase : Optional[Any] = float(sum(lowercase_ ) / len(lowercase_ ) )
_UpperCamelCase : int = sum(lowercase_ ) / len(lowercase_ )
_UpperCamelCase : Union[str, Any] = float(fa_score(y_true=lowercase_ ,y_pred=[id_pred["prediction"] for id_pred in ids_preds] ) )
return {"exact_match": em, "f1_m": fa_m, "f1_a": fa_a}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __SCREAMING_SNAKE_CASE ( datasets.Metric ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[Any]:
if self.config_name not in [
"boolq",
"cb",
"copa",
"multirc",
"record",
"rte",
"wic",
"wsc",
"wsc.fixed",
"axb",
"axg",
]:
raise KeyError(
"You should supply a configuration name selected in "
"[\"boolq\", \"cb\", \"copa\", \"multirc\", \"record\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"axb\", \"axg\",]" )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , codebase_urls=[] , reference_urls=[] , format="numpy" if not self.config_name == "record" and not self.config_name == "multirc" else None , )
def __SCREAMING_SNAKE_CASE ( self : str ) -> str:
if self.config_name == "record":
return {
"predictions": {
"idx": {
"passage": datasets.Value("int64" ),
"query": datasets.Value("int64" ),
},
"prediction_text": datasets.Value("string" ),
},
"references": {
"idx": {
"passage": datasets.Value("int64" ),
"query": datasets.Value("int64" ),
},
"answers": datasets.Sequence(datasets.Value("string" ) ),
},
}
elif self.config_name == "multirc":
return {
"predictions": {
"idx": {
"answer": datasets.Value("int64" ),
"paragraph": datasets.Value("int64" ),
"question": datasets.Value("int64" ),
},
"prediction": datasets.Value("int64" ),
},
"references": datasets.Value("int64" ),
}
else:
return {
"predictions": datasets.Value("int64" ),
"references": datasets.Value("int64" ),
}
def __SCREAMING_SNAKE_CASE ( self : Any , __a : Dict , __a : Dict ) -> List[Any]:
if self.config_name == "axb":
return {"matthews_correlation": matthews_corrcoef(__a , __a )}
elif self.config_name == "cb":
return acc_and_fa(__a , __a , fa_avg="macro" )
elif self.config_name == "record":
_UpperCamelCase : Optional[Any] = [
{
"qas": [
{"id": ref["idx"]["query"], "answers": [{"text": ans} for ans in ref["answers"]]}
for ref in references
]
}
]
_UpperCamelCase : Optional[int] = {pred["idx"]["query"]: pred["prediction_text"] for pred in predictions}
return evaluate_record(__a , __a )[0]
elif self.config_name == "multirc":
return evaluate_multirc(__a , __a )
elif self.config_name in ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]:
return {"accuracy": simple_accuracy(__a , __a )}
else:
raise KeyError(
"You should supply a configuration name selected in "
"[\"boolq\", \"cb\", \"copa\", \"multirc\", \"record\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"axb\", \"axg\",]" )
| 51
|
"""simple docstring"""
from __future__ import annotations
import string
from itertools import cycle, product
from pathlib import Path
lowerCamelCase__ = (
string.ascii_letters + string.digits + string.punctuation + string.whitespace
)
lowerCamelCase__ = [ord(letter) for letter in string.ascii_lowercase]
lowerCamelCase__ = {ord(char) for char in VALID_CHARS}
lowerCamelCase__ = ["the", "be", "to", "of", "and", "in", "that", "have"]
def lowercase__ ( lowercase_ ,lowercase_ ) -> str | None:
"""simple docstring"""
_UpperCamelCase : str = ""
_UpperCamelCase : int
_UpperCamelCase : int
_UpperCamelCase : int
for keychar, cipherchar in zip(cycle(lowercase_ ) ,lowercase_ ):
_UpperCamelCase : Dict = cipherchar ^ keychar
if decodedchar not in VALID_INTS:
return None
decoded += chr(lowercase_ )
return decoded
def lowercase__ ( lowercase_ ) -> list[str]:
"""simple docstring"""
_UpperCamelCase : list[str] = []
for key in product(lowercase_ ,repeat=3 ):
_UpperCamelCase : int = try_key(lowercase_ ,lowercase_ )
if encoded is not None:
possibles.append(lowercase_ )
return possibles
def lowercase__ ( lowercase_ ,lowercase_ ) -> list[str]:
"""simple docstring"""
return [possible for possible in possibles if common_word in possible.lower()]
def lowercase__ ( lowercase_ = "p059_cipher.txt" ) -> int:
"""simple docstring"""
_UpperCamelCase : list[int]
_UpperCamelCase : list[str]
_UpperCamelCase : str
_UpperCamelCase : str
_UpperCamelCase : str = Path(lowercase_ ).parent.joinpath(lowercase_ ).read_text(encoding="utf-8" )
_UpperCamelCase : Optional[Any] = [int(lowercase_ ) for number in data.strip().split("," )]
_UpperCamelCase : List[str] = filter_valid_chars(lowercase_ )
for common_word in COMMON_WORDS:
_UpperCamelCase : Union[str, Any] = filter_common_word(lowercase_ ,lowercase_ )
if len(lowercase_ ) == 1:
break
_UpperCamelCase : Union[str, Any] = possibles[0]
return sum(ord(lowercase_ ) for char in decoded_text )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 51
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase__ = {
"configuration_albert": ["ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "AlbertConfig", "AlbertOnnxConfig"],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["AlbertTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["AlbertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"AlbertForMaskedLM",
"AlbertForMultipleChoice",
"AlbertForPreTraining",
"AlbertForQuestionAnswering",
"AlbertForSequenceClassification",
"AlbertForTokenClassification",
"AlbertModel",
"AlbertPreTrainedModel",
"load_tf_weights_in_albert",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFAlbertForMaskedLM",
"TFAlbertForMultipleChoice",
"TFAlbertForPreTraining",
"TFAlbertForQuestionAnswering",
"TFAlbertForSequenceClassification",
"TFAlbertForTokenClassification",
"TFAlbertMainLayer",
"TFAlbertModel",
"TFAlbertPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"FlaxAlbertForMaskedLM",
"FlaxAlbertForMultipleChoice",
"FlaxAlbertForPreTraining",
"FlaxAlbertForQuestionAnswering",
"FlaxAlbertForSequenceClassification",
"FlaxAlbertForTokenClassification",
"FlaxAlbertModel",
"FlaxAlbertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_albert import ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, AlbertConfig, AlbertOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_albert import AlbertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_albert_fast import AlbertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_albert import (
ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
AlbertPreTrainedModel,
load_tf_weights_in_albert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_albert import (
TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFAlbertForMaskedLM,
TFAlbertForMultipleChoice,
TFAlbertForPreTraining,
TFAlbertForQuestionAnswering,
TFAlbertForSequenceClassification,
TFAlbertForTokenClassification,
TFAlbertMainLayer,
TFAlbertModel,
TFAlbertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
FlaxAlbertPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 51
|
"""simple docstring"""
def lowercase__ ( lowercase_ ,lowercase_ ) -> None:
"""simple docstring"""
_UpperCamelCase : List[Any] = len(lowercase_ )
print("The following activities are selected:" )
# The first activity is always selected
_UpperCamelCase : List[Any] = 0
print(lowercase_ ,end="," )
# Consider rest of the activities
for j in range(lowercase_ ):
# If this activity has start time greater than
# or equal to the finish time of previously
# selected activity, then select it
if start[j] >= finish[i]:
print(lowercase_ ,end="," )
_UpperCamelCase : Optional[Any] = j
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase__ = [1, 3, 0, 5, 8, 5]
lowerCamelCase__ = [2, 4, 6, 7, 9, 9]
print_max_activities(start, finish)
| 51
| 1
|
"""simple docstring"""
# coding=utf-8
# Copyright 2020 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import sys
import transformers
lowerCamelCase__ = "3"
print("Python version:", sys.version)
print("transformers version:", transformers.__version__)
try:
import torch
print("Torch version:", torch.__version__)
print("Cuda available:", torch.cuda.is_available())
print("Cuda version:", torch.version.cuda)
print("CuDNN version:", torch.backends.cudnn.version())
print("Number of GPUs available:", torch.cuda.device_count())
print("NCCL version:", torch.cuda.nccl.version())
except ImportError:
print("Torch version:", None)
try:
import deepspeed
print("DeepSpeed version:", deepspeed.__version__)
except ImportError:
print("DeepSpeed version:", None)
try:
import tensorflow as tf
print("TensorFlow version:", tf.__version__)
print("TF GPUs available:", bool(tf.config.list_physical_devices("GPU")))
print("Number of TF GPUs available:", len(tf.config.list_physical_devices("GPU")))
except ImportError:
print("TensorFlow version:", None)
| 51
|
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from ..utils import BaseOutput, is_torch_version, randn_tensor
from .attention_processor import SpatialNorm
from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block
@dataclass
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :torch.FloatTensor
class __SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self : Dict , __a : Dict=3 , __a : Any=3 , __a : Union[str, Any]=("DownEncoderBlock2D",) , __a : Optional[int]=(64,) , __a : int=2 , __a : Tuple=32 , __a : int="silu" , __a : str=True , ) -> Dict:
super().__init__()
_UpperCamelCase : List[str] = layers_per_block
_UpperCamelCase : Dict = torch.nn.Convad(
__a , block_out_channels[0] , kernel_size=3 , stride=1 , padding=1 , )
_UpperCamelCase : int = None
_UpperCamelCase : Any = nn.ModuleList([] )
# down
_UpperCamelCase : List[str] = block_out_channels[0]
for i, down_block_type in enumerate(__a ):
_UpperCamelCase : Tuple = output_channel
_UpperCamelCase : int = block_out_channels[i]
_UpperCamelCase : int = i == len(__a ) - 1
_UpperCamelCase : Dict = get_down_block(
__a , num_layers=self.layers_per_block , in_channels=__a , out_channels=__a , add_downsample=not is_final_block , resnet_eps=1e-6 , downsample_padding=0 , resnet_act_fn=__a , resnet_groups=__a , attention_head_dim=__a , temb_channels=__a , )
self.down_blocks.append(__a )
# mid
_UpperCamelCase : Union[str, Any] = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=__a , output_scale_factor=1 , resnet_time_scale_shift="default" , attention_head_dim=block_out_channels[-1] , resnet_groups=__a , temb_channels=__a , )
# out
_UpperCamelCase : Any = nn.GroupNorm(num_channels=block_out_channels[-1] , num_groups=__a , eps=1e-6 )
_UpperCamelCase : Any = nn.SiLU()
_UpperCamelCase : Union[str, Any] = 2 * out_channels if double_z else out_channels
_UpperCamelCase : Tuple = nn.Convad(block_out_channels[-1] , __a , 3 , padding=1 )
_UpperCamelCase : Optional[int] = False
def __SCREAMING_SNAKE_CASE ( self : List[str] , __a : Dict ) -> List[str]:
_UpperCamelCase : int = x
_UpperCamelCase : Optional[int] = self.conv_in(__a )
if self.training and self.gradient_checkpointing:
def create_custom_forward(__a : Tuple ):
def custom_forward(*__a : Any ):
return module(*__a )
return custom_forward
# down
if is_torch_version(">=" , "1.11.0" ):
for down_block in self.down_blocks:
_UpperCamelCase : Optional[int] = torch.utils.checkpoint.checkpoint(
create_custom_forward(__a ) , __a , use_reentrant=__a )
# middle
_UpperCamelCase : Tuple = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , __a , use_reentrant=__a )
else:
for down_block in self.down_blocks:
_UpperCamelCase : Any = torch.utils.checkpoint.checkpoint(create_custom_forward(__a ) , __a )
# middle
_UpperCamelCase : int = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) , __a )
else:
# down
for down_block in self.down_blocks:
_UpperCamelCase : int = down_block(__a )
# middle
_UpperCamelCase : int = self.mid_block(__a )
# post-process
_UpperCamelCase : Any = self.conv_norm_out(__a )
_UpperCamelCase : Any = self.conv_act(__a )
_UpperCamelCase : Optional[Any] = self.conv_out(__a )
return sample
class __SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self : Dict , __a : int=3 , __a : Any=3 , __a : str=("UpDecoderBlock2D",) , __a : Optional[int]=(64,) , __a : int=2 , __a : Optional[int]=32 , __a : Tuple="silu" , __a : Union[str, Any]="group" , ) -> str:
super().__init__()
_UpperCamelCase : List[Any] = layers_per_block
_UpperCamelCase : Tuple = nn.Convad(
__a , block_out_channels[-1] , kernel_size=3 , stride=1 , padding=1 , )
_UpperCamelCase : List[str] = None
_UpperCamelCase : Dict = nn.ModuleList([] )
_UpperCamelCase : List[Any] = in_channels if norm_type == "spatial" else None
# mid
_UpperCamelCase : Optional[Any] = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=__a , output_scale_factor=1 , resnet_time_scale_shift="default" if norm_type == "group" else norm_type , attention_head_dim=block_out_channels[-1] , resnet_groups=__a , temb_channels=__a , )
# up
_UpperCamelCase : List[str] = list(reversed(__a ) )
_UpperCamelCase : int = reversed_block_out_channels[0]
for i, up_block_type in enumerate(__a ):
_UpperCamelCase : int = output_channel
_UpperCamelCase : Union[str, Any] = reversed_block_out_channels[i]
_UpperCamelCase : Optional[Any] = i == len(__a ) - 1
_UpperCamelCase : Union[str, Any] = get_up_block(
__a , num_layers=self.layers_per_block + 1 , in_channels=__a , out_channels=__a , prev_output_channel=__a , add_upsample=not is_final_block , resnet_eps=1e-6 , resnet_act_fn=__a , resnet_groups=__a , attention_head_dim=__a , temb_channels=__a , resnet_time_scale_shift=__a , )
self.up_blocks.append(__a )
_UpperCamelCase : Optional[Any] = output_channel
# out
if norm_type == "spatial":
_UpperCamelCase : Optional[int] = SpatialNorm(block_out_channels[0] , __a )
else:
_UpperCamelCase : Optional[int] = nn.GroupNorm(num_channels=block_out_channels[0] , num_groups=__a , eps=1e-6 )
_UpperCamelCase : str = nn.SiLU()
_UpperCamelCase : str = nn.Convad(block_out_channels[0] , __a , 3 , padding=1 )
_UpperCamelCase : Dict = False
def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __a : List[Any] , __a : Union[str, Any]=None ) -> Tuple:
_UpperCamelCase : List[str] = z
_UpperCamelCase : Dict = self.conv_in(__a )
_UpperCamelCase : Any = next(iter(self.up_blocks.parameters() ) ).dtype
if self.training and self.gradient_checkpointing:
def create_custom_forward(__a : Any ):
def custom_forward(*__a : Tuple ):
return module(*__a )
return custom_forward
if is_torch_version(">=" , "1.11.0" ):
# middle
_UpperCamelCase : str = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , __a , __a , use_reentrant=__a )
_UpperCamelCase : Optional[int] = sample.to(__a )
# up
for up_block in self.up_blocks:
_UpperCamelCase : List[Any] = torch.utils.checkpoint.checkpoint(
create_custom_forward(__a ) , __a , __a , use_reentrant=__a )
else:
# middle
_UpperCamelCase : Optional[int] = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , __a , __a )
_UpperCamelCase : Union[str, Any] = sample.to(__a )
# up
for up_block in self.up_blocks:
_UpperCamelCase : str = torch.utils.checkpoint.checkpoint(create_custom_forward(__a ) , __a , __a )
else:
# middle
_UpperCamelCase : str = self.mid_block(__a , __a )
_UpperCamelCase : int = sample.to(__a )
# up
for up_block in self.up_blocks:
_UpperCamelCase : Any = up_block(__a , __a )
# post-process
if latent_embeds is None:
_UpperCamelCase : List[str] = self.conv_norm_out(__a )
else:
_UpperCamelCase : Optional[int] = self.conv_norm_out(__a , __a )
_UpperCamelCase : Tuple = self.conv_act(__a )
_UpperCamelCase : List[Any] = self.conv_out(__a )
return sample
class __SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self : Dict , __a : Tuple , __a : List[str] , __a : List[str] , __a : str=None , __a : Optional[int]="random" , __a : Any=False , __a : Optional[Any]=True ) -> List[Any]:
super().__init__()
_UpperCamelCase : Tuple = n_e
_UpperCamelCase : Tuple = vq_embed_dim
_UpperCamelCase : Union[str, Any] = beta
_UpperCamelCase : str = legacy
_UpperCamelCase : Dict = nn.Embedding(self.n_e , self.vq_embed_dim )
self.embedding.weight.data.uniform_(-1.0 / self.n_e , 1.0 / self.n_e )
_UpperCamelCase : Any = remap
if self.remap is not None:
self.register_buffer("used" , torch.tensor(np.load(self.remap ) ) )
_UpperCamelCase : Dict = self.used.shape[0]
_UpperCamelCase : Optional[int] = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
_UpperCamelCase : Optional[int] = self.re_embed
_UpperCamelCase : Any = self.re_embed + 1
print(
F'''Remapping {self.n_e} indices to {self.re_embed} indices. '''
F'''Using {self.unknown_index} for unknown indices.''' )
else:
_UpperCamelCase : Union[str, Any] = n_e
_UpperCamelCase : List[str] = sane_index_shape
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __a : Optional[Any] ) -> Optional[int]:
_UpperCamelCase : str = inds.shape
assert len(__a ) > 1
_UpperCamelCase : Union[str, Any] = inds.reshape(ishape[0] , -1 )
_UpperCamelCase : Optional[Any] = self.used.to(__a )
_UpperCamelCase : List[str] = (inds[:, :, None] == used[None, None, ...]).long()
_UpperCamelCase : Optional[Any] = match.argmax(-1 )
_UpperCamelCase : Any = match.sum(2 ) < 1
if self.unknown_index == "random":
_UpperCamelCase : Optional[int] = torch.randint(0 , self.re_embed , size=new[unknown].shape ).to(device=new.device )
else:
_UpperCamelCase : Dict = self.unknown_index
return new.reshape(__a )
def __SCREAMING_SNAKE_CASE ( self : Dict , __a : Optional[int] ) -> Optional[int]:
_UpperCamelCase : int = inds.shape
assert len(__a ) > 1
_UpperCamelCase : List[Any] = inds.reshape(ishape[0] , -1 )
_UpperCamelCase : Optional[int] = self.used.to(__a )
if self.re_embed > self.used.shape[0]: # extra token
_UpperCamelCase : int = 0 # simply set to zero
_UpperCamelCase : Union[str, Any] = torch.gather(used[None, :][inds.shape[0] * [0], :] , 1 , __a )
return back.reshape(__a )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __a : str ) -> Optional[int]:
# reshape z -> (batch, height, width, channel) and flatten
_UpperCamelCase : List[str] = z.permute(0 , 2 , 3 , 1 ).contiguous()
_UpperCamelCase : int = z.view(-1 , self.vq_embed_dim )
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
_UpperCamelCase : Optional[int] = torch.argmin(torch.cdist(__a , self.embedding.weight ) , dim=1 )
_UpperCamelCase : int = self.embedding(__a ).view(z.shape )
_UpperCamelCase : str = None
_UpperCamelCase : Any = None
# compute loss for embedding
if not self.legacy:
_UpperCamelCase : List[str] = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 )
else:
_UpperCamelCase : str = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 )
# preserve gradients
_UpperCamelCase : List[str] = z + (z_q - z).detach()
# reshape back to match original input shape
_UpperCamelCase : Optional[Any] = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
if self.remap is not None:
_UpperCamelCase : Tuple = min_encoding_indices.reshape(z.shape[0] , -1 ) # add batch axis
_UpperCamelCase : Dict = self.remap_to_used(__a )
_UpperCamelCase : List[str] = min_encoding_indices.reshape(-1 , 1 ) # flatten
if self.sane_index_shape:
_UpperCamelCase : str = min_encoding_indices.reshape(z_q.shape[0] , z_q.shape[2] , z_q.shape[3] )
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def __SCREAMING_SNAKE_CASE ( self : List[str] , __a : List[str] , __a : str ) -> Any:
# shape specifying (batch, height, width, channel)
if self.remap is not None:
_UpperCamelCase : str = indices.reshape(shape[0] , -1 ) # add batch axis
_UpperCamelCase : str = self.unmap_to_all(__a )
_UpperCamelCase : int = indices.reshape(-1 ) # flatten again
# get quantized latent vectors
_UpperCamelCase : Optional[int] = self.embedding(__a )
if shape is not None:
_UpperCamelCase : Tuple = z_q.view(__a )
# reshape back to match original input shape
_UpperCamelCase : Tuple = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
return z_q
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
def __init__( self : Optional[int] , __a : List[str] , __a : Optional[Any]=False ) -> int:
_UpperCamelCase : Dict = parameters
_UpperCamelCase, _UpperCamelCase : str = torch.chunk(__a , 2 , dim=1 )
_UpperCamelCase : Tuple = torch.clamp(self.logvar , -30.0 , 20.0 )
_UpperCamelCase : Union[str, Any] = deterministic
_UpperCamelCase : Dict = torch.exp(0.5 * self.logvar )
_UpperCamelCase : Any = torch.exp(self.logvar )
if self.deterministic:
_UpperCamelCase : List[Any] = torch.zeros_like(
self.mean , device=self.parameters.device , dtype=self.parameters.dtype )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __a : Optional[torch.Generator] = None ) -> torch.FloatTensor:
# make sure sample is on the same device as the parameters and has same dtype
_UpperCamelCase : List[Any] = randn_tensor(
self.mean.shape , generator=__a , device=self.parameters.device , dtype=self.parameters.dtype )
_UpperCamelCase : List[Any] = self.mean + self.std * sample
return x
def __SCREAMING_SNAKE_CASE ( self : Any , __a : List[str]=None ) -> List[Any]:
if self.deterministic:
return torch.Tensor([0.0] )
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean , 2 ) + self.var - 1.0 - self.logvar , dim=[1, 2, 3] )
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean , 2 ) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar , dim=[1, 2, 3] , )
def __SCREAMING_SNAKE_CASE ( self : str , __a : Tuple , __a : List[str]=[1, 2, 3] ) -> int:
if self.deterministic:
return torch.Tensor([0.0] )
_UpperCamelCase : List[str] = np.log(2.0 * np.pi )
return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean , 2 ) / self.var , dim=__a )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[int]:
return self.mean
| 51
| 1
|
"""simple docstring"""
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Union[str, Any] = ["image_processor", "tokenizer"]
SCREAMING_SNAKE_CASE__ :Dict = "CLIPImageProcessor"
SCREAMING_SNAKE_CASE__ :List[str] = ("XLMRobertaTokenizer", "XLMRobertaTokenizerFast")
def __init__( self : Union[str, Any] , __a : Any=None , __a : Dict=None , **__a : Dict ) -> Optional[Any]:
_UpperCamelCase : Any = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , __a , )
_UpperCamelCase : Dict = kwargs.pop("feature_extractor" )
_UpperCamelCase : int = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(__a , __a )
def __call__( self : Union[str, Any] , __a : Tuple=None , __a : Optional[int]=None , __a : Dict=None , **__a : List[str] ) -> Dict:
if text is None and images is None:
raise ValueError("You have to specify either text or images. Both cannot be none." )
if text is not None:
_UpperCamelCase : Optional[int] = self.tokenizer(__a , return_tensors=__a , **__a )
if images is not None:
_UpperCamelCase : str = self.image_processor(__a , return_tensors=__a , **__a )
if text is not None and images is not None:
_UpperCamelCase : Tuple = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__a ) , tensor_type=__a )
def __SCREAMING_SNAKE_CASE ( self : List[Any] , *__a : Tuple , **__a : Tuple ) -> Any:
return self.tokenizer.batch_decode(*__a , **__a )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] , *__a : List[Any] , **__a : Dict ) -> Dict:
return self.tokenizer.decode(*__a , **__a )
@property
def __SCREAMING_SNAKE_CASE ( self : Any ) -> Dict:
_UpperCamelCase : Tuple = self.tokenizer.model_input_names
_UpperCamelCase : Optional[Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 51
|
"""simple docstring"""
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=_UpperCamelCase )
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :str = field(default="summarization" , metadata={"include_in_asdict_even_if_is_default": True} )
SCREAMING_SNAKE_CASE__ :ClassVar[Features] = Features({"text": Value("string" )} )
SCREAMING_SNAKE_CASE__ :ClassVar[Features] = Features({"summary": Value("string" )} )
SCREAMING_SNAKE_CASE__ :str = "text"
SCREAMING_SNAKE_CASE__ :str = "summary"
@property
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Dict[str, str]:
return {self.text_column: "text", self.summary_column: "summary"}
| 51
| 1
|
"""simple docstring"""
from __future__ import annotations
from decimal import Decimal
from numpy import array
def lowercase__ ( lowercase_ ) -> list[list[float]]:
"""simple docstring"""
_UpperCamelCase : Dict = Decimal
# Check if the provided matrix has 2 rows and 2 columns
# since this implementation only works for 2x2 matrices
if len(lowercase_ ) == 2 and len(matrix[0] ) == 2 and len(matrix[1] ) == 2:
# Calculate the determinant of the matrix
_UpperCamelCase : Optional[int] = float(
d(matrix[0][0] ) * d(matrix[1][1] ) - d(matrix[1][0] ) * d(matrix[0][1] ) )
if determinant == 0:
raise ValueError("This matrix has no inverse." )
# Creates a copy of the matrix with swapped positions of the elements
_UpperCamelCase : List[Any] = [[0.0, 0.0], [0.0, 0.0]]
_UpperCamelCase, _UpperCamelCase : int = matrix[1][1], matrix[0][0]
_UpperCamelCase, _UpperCamelCase : List[str] = -matrix[1][0], -matrix[0][1]
# Calculate the inverse of the matrix
return [
[(float(d(lowercase_ ) ) / determinant) or 0.0 for n in row] for row in swapped_matrix
]
elif (
len(lowercase_ ) == 3
and len(matrix[0] ) == 3
and len(matrix[1] ) == 3
and len(matrix[2] ) == 3
):
# Calculate the determinant of the matrix using Sarrus rule
_UpperCamelCase : Optional[int] = float(
(
(d(matrix[0][0] ) * d(matrix[1][1] ) * d(matrix[2][2] ))
+ (d(matrix[0][1] ) * d(matrix[1][2] ) * d(matrix[2][0] ))
+ (d(matrix[0][2] ) * d(matrix[1][0] ) * d(matrix[2][1] ))
)
- (
(d(matrix[0][2] ) * d(matrix[1][1] ) * d(matrix[2][0] ))
+ (d(matrix[0][1] ) * d(matrix[1][0] ) * d(matrix[2][2] ))
+ (d(matrix[0][0] ) * d(matrix[1][2] ) * d(matrix[2][1] ))
) )
if determinant == 0:
raise ValueError("This matrix has no inverse." )
# Creating cofactor matrix
_UpperCamelCase : Optional[Any] = [
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
]
_UpperCamelCase : Optional[Any] = (d(matrix[1][1] ) * d(matrix[2][2] )) - (
d(matrix[1][2] ) * d(matrix[2][1] )
)
_UpperCamelCase : Optional[int] = -(
(d(matrix[1][0] ) * d(matrix[2][2] )) - (d(matrix[1][2] ) * d(matrix[2][0] ))
)
_UpperCamelCase : str = (d(matrix[1][0] ) * d(matrix[2][1] )) - (
d(matrix[1][1] ) * d(matrix[2][0] )
)
_UpperCamelCase : int = -(
(d(matrix[0][1] ) * d(matrix[2][2] )) - (d(matrix[0][2] ) * d(matrix[2][1] ))
)
_UpperCamelCase : Optional[int] = (d(matrix[0][0] ) * d(matrix[2][2] )) - (
d(matrix[0][2] ) * d(matrix[2][0] )
)
_UpperCamelCase : List[str] = -(
(d(matrix[0][0] ) * d(matrix[2][1] )) - (d(matrix[0][1] ) * d(matrix[2][0] ))
)
_UpperCamelCase : Any = (d(matrix[0][1] ) * d(matrix[1][2] )) - (
d(matrix[0][2] ) * d(matrix[1][1] )
)
_UpperCamelCase : Union[str, Any] = -(
(d(matrix[0][0] ) * d(matrix[1][2] )) - (d(matrix[0][2] ) * d(matrix[1][0] ))
)
_UpperCamelCase : List[str] = (d(matrix[0][0] ) * d(matrix[1][1] )) - (
d(matrix[0][1] ) * d(matrix[1][0] )
)
# Transpose the cofactor matrix (Adjoint matrix)
_UpperCamelCase : str = array(lowercase_ )
for i in range(3 ):
for j in range(3 ):
_UpperCamelCase : List[str] = cofactor_matrix[j][i]
# Inverse of the matrix using the formula (1/determinant) * adjoint matrix
_UpperCamelCase : List[str] = array(lowercase_ )
for i in range(3 ):
for j in range(3 ):
inverse_matrix[i][j] /= d(lowercase_ )
# Calculate the inverse of the matrix
return [[float(d(lowercase_ ) ) or 0.0 for n in row] for row in inverse_matrix]
raise ValueError("Please provide a matrix of size 2x2 or 3x3." )
| 51
|
"""simple docstring"""
def lowercase__ ( lowercase_ ) -> set:
"""simple docstring"""
_UpperCamelCase : Union[str, Any] = set()
# edges = list of graph's edges
_UpperCamelCase : Union[str, Any] = get_edges(lowercase_ )
# While there are still elements in edges list, take an arbitrary edge
# (from_node, to_node) and add his extremity to chosen_vertices and then
# remove all arcs adjacent to the from_node and to_node
while edges:
_UpperCamelCase, _UpperCamelCase : str = edges.pop()
chosen_vertices.add(lowercase_ )
chosen_vertices.add(lowercase_ )
for edge in edges.copy():
if from_node in edge or to_node in edge:
edges.discard(lowercase_ )
return chosen_vertices
def lowercase__ ( lowercase_ ) -> set:
"""simple docstring"""
_UpperCamelCase : List[str] = set()
for from_node, to_nodes in graph.items():
for to_node in to_nodes:
edges.add((from_node, to_node) )
return edges
if __name__ == "__main__":
import doctest
doctest.testmod()
# graph = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
# print(f"Matching vertex cover:\n{matching_min_vertex_cover(graph)}")
| 51
| 1
|
"""simple docstring"""
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
flip_channel_order,
get_resize_output_image_size,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, is_vision_available, logging
if is_vision_available():
import PIL
if is_torch_available():
import torch
lowerCamelCase__ = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :str = ["pixel_values"]
def __init__( self : List[Any] , __a : bool = True , __a : Dict[str, int] = None , __a : PILImageResampling = PILImageResampling.BILINEAR , __a : bool = True , __a : Union[int, float] = 1 / 255 , __a : bool = True , __a : Dict[str, int] = None , __a : bool = True , **__a : Any , ) -> None:
super().__init__(**__a )
_UpperCamelCase : Any = size if size is not None else {"shortest_edge": 224}
_UpperCamelCase : List[Any] = get_size_dict(__a , default_to_square=__a )
_UpperCamelCase : Optional[Any] = crop_size if crop_size is not None else {"height": 256, "width": 256}
_UpperCamelCase : List[Any] = get_size_dict(__a , param_name="crop_size" )
_UpperCamelCase : Optional[Any] = do_resize
_UpperCamelCase : Optional[Any] = size
_UpperCamelCase : str = resample
_UpperCamelCase : Optional[Any] = do_rescale
_UpperCamelCase : Optional[int] = rescale_factor
_UpperCamelCase : List[Any] = do_center_crop
_UpperCamelCase : Union[str, Any] = crop_size
_UpperCamelCase : Any = do_flip_channel_order
def __SCREAMING_SNAKE_CASE ( self : Any , __a : np.ndarray , __a : Dict[str, int] , __a : PILImageResampling = PIL.Image.BILINEAR , __a : Optional[Union[str, ChannelDimension]] = None , **__a : int , ) -> np.ndarray:
_UpperCamelCase : List[Any] = get_size_dict(__a , default_to_square=__a )
if "shortest_edge" not in size:
raise ValueError(F'''The `size` dictionary must contain the key `shortest_edge`. Got {size.keys()}''' )
_UpperCamelCase : int = get_resize_output_image_size(__a , size=size["shortest_edge"] , default_to_square=__a )
return resize(__a , size=__a , resample=__a , data_format=__a , **__a )
def __SCREAMING_SNAKE_CASE ( self : Any , __a : np.ndarray , __a : Dict[str, int] , __a : Optional[Union[str, ChannelDimension]] = None , **__a : Tuple , ) -> np.ndarray:
_UpperCamelCase : Union[str, Any] = get_size_dict(__a )
if "height" not in size or "width" not in size:
raise ValueError(F'''The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}''' )
return center_crop(__a , size=(size["height"], size["width"]) , data_format=__a , **__a )
def __SCREAMING_SNAKE_CASE ( self : Dict , __a : np.ndarray , __a : Union[int, float] , __a : Optional[Union[str, ChannelDimension]] = None , **__a : Tuple , ) -> Tuple:
return rescale(__a , scale=__a , data_format=__a , **__a )
def __SCREAMING_SNAKE_CASE ( self : int , __a : np.ndarray , __a : Optional[Union[str, ChannelDimension]] = None ) -> np.ndarray:
return flip_channel_order(__a , data_format=__a )
def __SCREAMING_SNAKE_CASE ( self : int , __a : ImageInput , __a : bool = None , __a : Dict[str, int] = None , __a : PILImageResampling = None , __a : bool = None , __a : float = None , __a : bool = None , __a : Dict[str, int] = None , __a : bool = None , __a : Optional[Union[str, TensorType]] = None , __a : ChannelDimension = ChannelDimension.FIRST , **__a : List[str] , ) -> PIL.Image.Image:
_UpperCamelCase : Optional[Any] = do_resize if do_resize is not None else self.do_resize
_UpperCamelCase : List[str] = resample if resample is not None else self.resample
_UpperCamelCase : Optional[int] = do_rescale if do_rescale is not None else self.do_rescale
_UpperCamelCase : Optional[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
_UpperCamelCase : Optional[int] = do_center_crop if do_center_crop is not None else self.do_center_crop
_UpperCamelCase : Dict = (
do_flip_channel_order if do_flip_channel_order is not None else self.do_flip_channel_order
)
_UpperCamelCase : Union[str, Any] = size if size is not None else self.size
_UpperCamelCase : Optional[int] = get_size_dict(__a , default_to_square=__a )
_UpperCamelCase : Optional[int] = crop_size if crop_size is not None else self.crop_size
_UpperCamelCase : List[Any] = get_size_dict(__a , param_name="crop_size" )
_UpperCamelCase : List[Any] = make_list_of_images(__a )
if not valid_images(__a ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
# All transformations expect numpy arrays.
_UpperCamelCase : Tuple = [to_numpy_array(__a ) for image in images]
if do_resize:
_UpperCamelCase : List[Any] = [self.resize(image=__a , size=__a , resample=__a ) for image in images]
if do_center_crop:
_UpperCamelCase : Union[str, Any] = [self.center_crop(image=__a , size=__a ) for image in images]
if do_rescale:
_UpperCamelCase : Optional[int] = [self.rescale(image=__a , scale=__a ) for image in images]
# the pretrained checkpoints assume images are BGR, not RGB
if do_flip_channel_order:
_UpperCamelCase : Optional[int] = [self.flip_channel_order(image=__a ) for image in images]
_UpperCamelCase : Optional[int] = [to_channel_dimension_format(__a , __a ) for image in images]
_UpperCamelCase : List[str] = {"pixel_values": images}
return BatchFeature(data=__a , tensor_type=__a )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __a : List[str] , __a : List[Tuple] = None ) -> List[str]:
_UpperCamelCase : Any = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(__a ) != len(__a ):
raise ValueError(
"Make sure that you pass in as many target sizes as the batch dimension of the logits" )
if is_torch_tensor(__a ):
_UpperCamelCase : Any = target_sizes.numpy()
_UpperCamelCase : Tuple = []
for idx in range(len(__a ) ):
_UpperCamelCase : Tuple = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="bilinear" , align_corners=__a )
_UpperCamelCase : Optional[Any] = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(__a )
else:
_UpperCamelCase : Optional[int] = logits.argmax(dim=1 )
_UpperCamelCase : Union[str, Any] = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 51
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
lowerCamelCase__ = {
"configuration_owlvit": [
"OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"OwlViTConfig",
"OwlViTOnnxConfig",
"OwlViTTextConfig",
"OwlViTVisionConfig",
],
"processing_owlvit": ["OwlViTProcessor"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["OwlViTFeatureExtractor"]
lowerCamelCase__ = ["OwlViTImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"OwlViTModel",
"OwlViTPreTrainedModel",
"OwlViTTextModel",
"OwlViTVisionModel",
"OwlViTForObjectDetection",
]
if TYPE_CHECKING:
from .configuration_owlvit import (
OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OwlViTConfig,
OwlViTOnnxConfig,
OwlViTTextConfig,
OwlViTVisionConfig,
)
from .processing_owlvit import OwlViTProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_owlvit import OwlViTFeatureExtractor
from .image_processing_owlvit import OwlViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_owlvit import (
OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
OwlViTForObjectDetection,
OwlViTModel,
OwlViTPreTrainedModel,
OwlViTTextModel,
OwlViTVisionModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 51
| 1
|
"""simple docstring"""
from math import pow
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ,) -> tuple[int, int]:
"""simple docstring"""
if current_sum == needed_sum:
# If the sum of the powers is equal to needed_sum, then we have a solution.
solutions_count += 1
return current_sum, solutions_count
_UpperCamelCase : List[Any] = int(pow(lowercase_ ,lowercase_ ) )
if current_sum + i_to_n <= needed_sum:
# If the sum of the powers is less than needed_sum, then continue adding powers.
current_sum += i_to_n
_UpperCamelCase, _UpperCamelCase : Union[str, Any] = backtrack(
lowercase_ ,lowercase_ ,current_number + 1 ,lowercase_ ,lowercase_ )
current_sum -= i_to_n
if i_to_n < needed_sum:
# If the power of i is less than needed_sum, then try with the next power.
_UpperCamelCase, _UpperCamelCase : int = backtrack(
lowercase_ ,lowercase_ ,current_number + 1 ,lowercase_ ,lowercase_ )
return current_sum, solutions_count
def lowercase__ ( lowercase_ ,lowercase_ ) -> int:
"""simple docstring"""
if not (1 <= needed_sum <= 1_000 and 2 <= power <= 10):
raise ValueError(
"Invalid input\n"
"needed_sum must be between 1 and 1000, power between 2 and 10." )
return backtrack(lowercase_ ,lowercase_ ,1 ,0 ,0 )[1] # Return the solutions_count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 51
|
"""simple docstring"""
from collections import defaultdict
from math import ceil, sqrt
def lowercase__ ( lowercase_ = 1_000_000 ,lowercase_ = 10 ) -> int:
"""simple docstring"""
_UpperCamelCase : defaultdict = defaultdict(lowercase_ )
for outer_width in range(3 ,(t_limit // 4) + 2 ):
if outer_width * outer_width > t_limit:
_UpperCamelCase : Any = max(
ceil(sqrt(outer_width * outer_width - t_limit ) ) ,1 )
else:
_UpperCamelCase : str = 1
hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2
for hole_width in range(lowercase_ ,outer_width - 1 ,2 ):
count[outer_width * outer_width - hole_width * hole_width] += 1
return sum(1 for n in count.values() if 1 <= n <= 10 )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 51
| 1
|
"""simple docstring"""
import argparse
import torch
from datasets import load_dataset
from donut import DonutModel
from transformers import (
DonutImageProcessor,
DonutProcessor,
DonutSwinConfig,
DonutSwinModel,
MBartConfig,
MBartForCausalLM,
VisionEncoderDecoderModel,
XLMRobertaTokenizerFast,
)
def lowercase__ ( lowercase_ ) -> List[str]:
"""simple docstring"""
_UpperCamelCase : int = model.config
_UpperCamelCase : Optional[int] = DonutSwinConfig(
image_size=original_config.input_size ,patch_size=4 ,depths=original_config.encoder_layer ,num_heads=[4, 8, 16, 32] ,window_size=original_config.window_size ,embed_dim=128 ,)
_UpperCamelCase : Any = MBartConfig(
is_decoder=lowercase_ ,is_encoder_decoder=lowercase_ ,add_cross_attention=lowercase_ ,decoder_layers=original_config.decoder_layer ,max_position_embeddings=original_config.max_position_embeddings ,vocab_size=len(
model.decoder.tokenizer ) ,scale_embedding=lowercase_ ,add_final_layer_norm=lowercase_ ,)
return encoder_config, decoder_config
def lowercase__ ( lowercase_ ) -> Optional[int]:
"""simple docstring"""
if "encoder.model" in name:
_UpperCamelCase : str = name.replace("encoder.model" ,"encoder" )
if "decoder.model" in name:
_UpperCamelCase : Dict = name.replace("decoder.model" ,"decoder" )
if "patch_embed.proj" in name:
_UpperCamelCase : List[Any] = name.replace("patch_embed.proj" ,"embeddings.patch_embeddings.projection" )
if "patch_embed.norm" in name:
_UpperCamelCase : str = name.replace("patch_embed.norm" ,"embeddings.norm" )
if name.startswith("encoder" ):
if "layers" in name:
_UpperCamelCase : Union[str, Any] = "encoder." + name
if "attn.proj" in name:
_UpperCamelCase : Any = name.replace("attn.proj" ,"attention.output.dense" )
if "attn" in name and "mask" not in name:
_UpperCamelCase : int = name.replace("attn" ,"attention.self" )
if "norm1" in name:
_UpperCamelCase : int = name.replace("norm1" ,"layernorm_before" )
if "norm2" in name:
_UpperCamelCase : int = name.replace("norm2" ,"layernorm_after" )
if "mlp.fc1" in name:
_UpperCamelCase : List[str] = name.replace("mlp.fc1" ,"intermediate.dense" )
if "mlp.fc2" in name:
_UpperCamelCase : Optional[Any] = name.replace("mlp.fc2" ,"output.dense" )
if name == "encoder.norm.weight":
_UpperCamelCase : Union[str, Any] = "encoder.layernorm.weight"
if name == "encoder.norm.bias":
_UpperCamelCase : Tuple = "encoder.layernorm.bias"
return name
def lowercase__ ( lowercase_ ,lowercase_ ) -> str:
"""simple docstring"""
for key in orig_state_dict.copy().keys():
_UpperCamelCase : Optional[Any] = orig_state_dict.pop(lowercase_ )
if "qkv" in key:
_UpperCamelCase : Tuple = key.split("." )
_UpperCamelCase : Dict = int(key_split[3] )
_UpperCamelCase : Optional[Any] = int(key_split[5] )
_UpperCamelCase : int = model.encoder.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
_UpperCamelCase : int = val[:dim, :]
_UpperCamelCase : Tuple = val[dim : dim * 2, :]
_UpperCamelCase : Optional[Any] = val[-dim:, :]
else:
_UpperCamelCase : Union[str, Any] = val[:dim]
_UpperCamelCase : Tuple = val[dim : dim * 2]
_UpperCamelCase : Any = val[-dim:]
elif "attn_mask" in key or key in ["encoder.model.norm.weight", "encoder.model.norm.bias"]:
# HuggingFace implementation doesn't use attn_mask buffer
# and model doesn't use final LayerNorms for the encoder
pass
else:
_UpperCamelCase : Dict = val
return orig_state_dict
def lowercase__ ( lowercase_ ,lowercase_=None ,lowercase_=False ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase : List[str] = DonutModel.from_pretrained(lowercase_ ).eval()
# load HuggingFace model
_UpperCamelCase, _UpperCamelCase : Dict = get_configs(lowercase_ )
_UpperCamelCase : Dict = DonutSwinModel(lowercase_ )
_UpperCamelCase : int = MBartForCausalLM(lowercase_ )
_UpperCamelCase : str = VisionEncoderDecoderModel(encoder=lowercase_ ,decoder=lowercase_ )
model.eval()
_UpperCamelCase : str = original_model.state_dict()
_UpperCamelCase : Union[str, Any] = convert_state_dict(lowercase_ ,lowercase_ )
model.load_state_dict(lowercase_ )
# verify results on scanned document
_UpperCamelCase : Any = load_dataset("hf-internal-testing/example-documents" )
_UpperCamelCase : Any = dataset["test"][0]["image"].convert("RGB" )
_UpperCamelCase : Dict = XLMRobertaTokenizerFast.from_pretrained(lowercase_ ,from_slow=lowercase_ )
_UpperCamelCase : Any = DonutImageProcessor(
do_align_long_axis=original_model.config.align_long_axis ,size=original_model.config.input_size[::-1] )
_UpperCamelCase : int = DonutProcessor(lowercase_ ,lowercase_ )
_UpperCamelCase : List[str] = processor(lowercase_ ,return_tensors="pt" ).pixel_values
if model_name == "naver-clova-ix/donut-base-finetuned-docvqa":
_UpperCamelCase : Optional[Any] = "<s_docvqa><s_question>{user_input}</s_question><s_answer>"
_UpperCamelCase : str = "When is the coffee break?"
_UpperCamelCase : str = task_prompt.replace("{user_input}" ,lowercase_ )
elif model_name == "naver-clova-ix/donut-base-finetuned-rvlcdip":
_UpperCamelCase : int = "<s_rvlcdip>"
elif model_name in [
"naver-clova-ix/donut-base-finetuned-cord-v1",
"naver-clova-ix/donut-base-finetuned-cord-v1-2560",
]:
_UpperCamelCase : Any = "<s_cord>"
elif model_name == "naver-clova-ix/donut-base-finetuned-cord-v2":
_UpperCamelCase : Tuple = "s_cord-v2>"
elif model_name == "naver-clova-ix/donut-base-finetuned-zhtrainticket":
_UpperCamelCase : Tuple = "<s_zhtrainticket>"
elif model_name in ["naver-clova-ix/donut-proto", "naver-clova-ix/donut-base"]:
# use a random prompt
_UpperCamelCase : List[Any] = "hello world"
else:
raise ValueError("Model name not supported" )
_UpperCamelCase : str = original_model.decoder.tokenizer(lowercase_ ,add_special_tokens=lowercase_ ,return_tensors="pt" )[
"input_ids"
]
_UpperCamelCase : List[Any] = original_model.encoder.model.patch_embed(lowercase_ )
_UpperCamelCase, _UpperCamelCase : List[Any] = model.encoder.embeddings(lowercase_ )
assert torch.allclose(lowercase_ ,lowercase_ ,atol=1e-3 )
# verify encoder hidden states
_UpperCamelCase : Any = original_model.encoder(lowercase_ )
_UpperCamelCase : Any = model.encoder(lowercase_ ).last_hidden_state
assert torch.allclose(lowercase_ ,lowercase_ ,atol=1e-2 )
# verify decoder hidden states
_UpperCamelCase : Any = original_model(lowercase_ ,lowercase_ ,lowercase_ ).logits
_UpperCamelCase : List[str] = model(lowercase_ ,decoder_input_ids=lowercase_ ).logits
assert torch.allclose(lowercase_ ,lowercase_ ,atol=1e-3 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(F'''Saving model and processor to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowercase_ )
processor.save_pretrained(lowercase_ )
if push_to_hub:
model.push_to_hub("nielsr/" + model_name.split("/" )[-1] ,commit_message="Update model" )
processor.push_to_hub("nielsr/" + model_name.split("/" )[-1] ,commit_message="Update model" )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="naver-clova-ix/donut-base-finetuned-docvqa",
required=False,
type=str,
help="Name of the original model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
required=False,
type=str,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether or not to push the converted model and processor to the 🤗 hub.",
)
lowerCamelCase__ = parser.parse_args()
convert_donut_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 51
|
"""simple docstring"""
from collections.abc import Iterator, MutableMapping
from dataclasses import dataclass
from typing import Generic, TypeVar
lowerCamelCase__ = TypeVar("KEY")
lowerCamelCase__ = TypeVar("VAL")
@dataclass(frozen=_UpperCamelCase , slots=_UpperCamelCase )
class __SCREAMING_SNAKE_CASE ( Generic[KEY, VAL] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :KEY
SCREAMING_SNAKE_CASE__ :VAL
class __SCREAMING_SNAKE_CASE ( _Item ):
'''simple docstring'''
def __init__( self : List[str] ) -> None:
super().__init__(__a , __a )
def __bool__( self : Dict ) -> bool:
return False
lowerCamelCase__ = _DeletedItem()
class __SCREAMING_SNAKE_CASE ( MutableMapping[KEY, VAL] ):
'''simple docstring'''
def __init__( self : int , __a : int = 8 , __a : float = 0.75 ) -> None:
_UpperCamelCase : str = initial_block_size
_UpperCamelCase : list[_Item | None] = [None] * initial_block_size
assert 0.0 < capacity_factor < 1.0
_UpperCamelCase : List[str] = capacity_factor
_UpperCamelCase : Dict = 0
def __SCREAMING_SNAKE_CASE ( self : int , __a : KEY ) -> int:
return hash(__a ) % len(self._buckets )
def __SCREAMING_SNAKE_CASE ( self : List[Any] , __a : int ) -> int:
return (ind + 1) % len(self._buckets )
def __SCREAMING_SNAKE_CASE ( self : Tuple , __a : int , __a : KEY , __a : VAL ) -> bool:
_UpperCamelCase : List[Any] = self._buckets[ind]
if not stored:
_UpperCamelCase : Tuple = _Item(__a , __a )
self._len += 1
return True
elif stored.key == key:
_UpperCamelCase : Union[str, Any] = _Item(__a , __a )
return True
else:
return False
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> bool:
_UpperCamelCase : Any = len(self._buckets ) * self._capacity_factor
return len(self ) >= int(__a )
def __SCREAMING_SNAKE_CASE ( self : str ) -> bool:
if len(self._buckets ) <= self._initial_block_size:
return False
_UpperCamelCase : List[str] = len(self._buckets ) * self._capacity_factor / 2
return len(self ) < limit
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __a : int ) -> None:
_UpperCamelCase : Any = self._buckets
_UpperCamelCase : List[Any] = [None] * new_size
_UpperCamelCase : List[str] = 0
for item in old_buckets:
if item:
self._add_item(item.key , item.val )
def __SCREAMING_SNAKE_CASE ( self : int ) -> None:
self._resize(len(self._buckets ) * 2 )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> None:
self._resize(len(self._buckets ) // 2 )
def __SCREAMING_SNAKE_CASE ( self : List[Any] , __a : KEY ) -> Iterator[int]:
_UpperCamelCase : str = self._get_bucket_index(__a )
for _ in range(len(self._buckets ) ):
yield ind
_UpperCamelCase : Tuple = self._get_next_ind(__a )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __a : KEY , __a : VAL ) -> None:
for ind in self._iterate_buckets(__a ):
if self._try_set(__a , __a , __a ):
break
def __setitem__( self : int , __a : KEY , __a : VAL ) -> None:
if self._is_full():
self._size_up()
self._add_item(__a , __a )
def __delitem__( self : str , __a : KEY ) -> None:
for ind in self._iterate_buckets(__a ):
_UpperCamelCase : Tuple = self._buckets[ind]
if item is None:
raise KeyError(__a )
if item is _deleted:
continue
if item.key == key:
_UpperCamelCase : List[Any] = _deleted
self._len -= 1
break
if self._is_sparse():
self._size_down()
def __getitem__( self : str , __a : KEY ) -> VAL:
for ind in self._iterate_buckets(__a ):
_UpperCamelCase : Tuple = self._buckets[ind]
if item is None:
break
if item is _deleted:
continue
if item.key == key:
return item.val
raise KeyError(__a )
def __len__( self : List[Any] ) -> int:
return self._len
def __iter__( self : List[str] ) -> Iterator[KEY]:
yield from (item.key for item in self._buckets if item)
def __repr__( self : List[str] ) -> str:
_UpperCamelCase : Optional[int] = " ,".join(
F'''{item.key}: {item.val}''' for item in self._buckets if item )
return F'''HashMap({val_string})'''
| 51
| 1
|
"""simple docstring"""
import argparse
import collections
import os
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_table.py
lowerCamelCase__ = "src/transformers"
lowerCamelCase__ = "docs/source/en"
lowerCamelCase__ = "."
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ) -> List[str]:
"""simple docstring"""
with open(lowercase_ ,"r" ,encoding="utf-8" ,newline="\n" ) as f:
_UpperCamelCase : Union[str, Any] = f.readlines()
# Find the start prompt.
_UpperCamelCase : Dict = 0
while not lines[start_index].startswith(lowercase_ ):
start_index += 1
start_index += 1
_UpperCamelCase : Optional[int] = start_index
while not lines[end_index].startswith(lowercase_ ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# Add here suffixes that are used to identify models, separated by |
lowerCamelCase__ = "Model|Encoder|Decoder|ForConditionalGeneration"
# Regexes that match TF/Flax/PT model names.
lowerCamelCase__ = re.compile(R"TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
lowerCamelCase__ = re.compile(R"Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
lowerCamelCase__ = re.compile(R"(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
# This is to make sure the transformers module imported is the one in the repo.
lowerCamelCase__ = direct_transformers_import(TRANSFORMERS_PATH)
def lowercase__ ( lowercase_ ) -> Any:
"""simple docstring"""
_UpperCamelCase : Tuple = re.finditer(".+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)" ,lowercase_ )
return [m.group(0 ) for m in matches]
def lowercase__ ( lowercase_ ,lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase : str = 2 if text == "✅" or text == "❌" else len(lowercase_ )
_UpperCamelCase : Union[str, Any] = (width - text_length) // 2
_UpperCamelCase : Dict = width - text_length - left_indent
return " " * left_indent + text + " " * right_indent
def lowercase__ ( ) -> str:
"""simple docstring"""
_UpperCamelCase : Optional[Any] = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
_UpperCamelCase : str = {
name: config_maping_names[code]
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if code in config_maping_names
}
_UpperCamelCase : Dict = {name: config.replace("Config" ,"" ) for name, config in model_name_to_config.items()}
# Dictionaries flagging if each model prefix has a slow/fast tokenizer, backend in PT/TF/Flax.
_UpperCamelCase : int = collections.defaultdict(lowercase_ )
_UpperCamelCase : Dict = collections.defaultdict(lowercase_ )
_UpperCamelCase : Dict = collections.defaultdict(lowercase_ )
_UpperCamelCase : int = collections.defaultdict(lowercase_ )
_UpperCamelCase : str = collections.defaultdict(lowercase_ )
# Let's lookup through all transformers object (once).
for attr_name in dir(lowercase_ ):
_UpperCamelCase : List[str] = None
if attr_name.endswith("Tokenizer" ):
_UpperCamelCase : Tuple = slow_tokenizers
_UpperCamelCase : Any = attr_name[:-9]
elif attr_name.endswith("TokenizerFast" ):
_UpperCamelCase : Optional[Any] = fast_tokenizers
_UpperCamelCase : List[str] = attr_name[:-13]
elif _re_tf_models.match(lowercase_ ) is not None:
_UpperCamelCase : List[Any] = tf_models
_UpperCamelCase : Dict = _re_tf_models.match(lowercase_ ).groups()[0]
elif _re_flax_models.match(lowercase_ ) is not None:
_UpperCamelCase : Dict = flax_models
_UpperCamelCase : Union[str, Any] = _re_flax_models.match(lowercase_ ).groups()[0]
elif _re_pt_models.match(lowercase_ ) is not None:
_UpperCamelCase : Optional[int] = pt_models
_UpperCamelCase : Any = _re_pt_models.match(lowercase_ ).groups()[0]
if lookup_dict is not None:
while len(lowercase_ ) > 0:
if attr_name in model_name_to_prefix.values():
_UpperCamelCase : Dict = True
break
# Try again after removing the last word in the name
_UpperCamelCase : List[str] = "".join(camel_case_split(lowercase_ )[:-1] )
# Let's build that table!
_UpperCamelCase : Any = list(model_name_to_config.keys() )
model_names.sort(key=str.lower )
_UpperCamelCase : List[str] = ["Model", "Tokenizer slow", "Tokenizer fast", "PyTorch support", "TensorFlow support", "Flax Support"]
# We'll need widths to properly display everything in the center (+2 is to leave one extra space on each side).
_UpperCamelCase : Union[str, Any] = [len(lowercase_ ) + 2 for c in columns]
_UpperCamelCase : Any = max([len(lowercase_ ) for name in model_names] ) + 2
# Build the table per se
_UpperCamelCase : Tuple = "|" + "|".join([_center_text(lowercase_ ,lowercase_ ) for c, w in zip(lowercase_ ,lowercase_ )] ) + "|\n"
# Use ":-----:" format to center-aligned table cell texts
table += "|" + "|".join([":" + "-" * (w - 2) + ":" for w in widths] ) + "|\n"
_UpperCamelCase : Union[str, Any] = {True: "✅", False: "❌"}
for name in model_names:
_UpperCamelCase : Optional[int] = model_name_to_prefix[name]
_UpperCamelCase : Tuple = [
name,
check[slow_tokenizers[prefix]],
check[fast_tokenizers[prefix]],
check[pt_models[prefix]],
check[tf_models[prefix]],
check[flax_models[prefix]],
]
table += "|" + "|".join([_center_text(lowercase_ ,lowercase_ ) for l, w in zip(lowercase_ ,lowercase_ )] ) + "|\n"
return table
def lowercase__ ( lowercase_=False ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase : str = _find_text_in_file(
filename=os.path.join(lowercase_ ,"index.md" ) ,start_prompt="<!--This table is updated automatically from the auto modules" ,end_prompt="<!-- End table-->" ,)
_UpperCamelCase : Any = get_model_table_from_auto_modules()
if current_table != new_table:
if overwrite:
with open(os.path.join(lowercase_ ,"index.md" ) ,"w" ,encoding="utf-8" ,newline="\n" ) as f:
f.writelines(lines[:start_index] + [new_table] + lines[end_index:] )
else:
raise ValueError(
"The model table in the `index.md` has not been updated. Run `make fix-copies` to fix this." )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
lowerCamelCase__ = parser.parse_args()
check_model_table(args.fix_and_overwrite)
| 51
|
"""simple docstring"""
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : Optional[Any] , __a : list[int] ) -> None:
_UpperCamelCase : Tuple = len(__a )
_UpperCamelCase : Dict = [0] * len_array
if len_array > 0:
_UpperCamelCase : Optional[Any] = array[0]
for i in range(1 , __a ):
_UpperCamelCase : Tuple = self.prefix_sum[i - 1] + array[i]
def __SCREAMING_SNAKE_CASE ( self : Dict , __a : int , __a : int ) -> int:
if start == 0:
return self.prefix_sum[end]
return self.prefix_sum[end] - self.prefix_sum[start - 1]
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __a : int ) -> bool:
_UpperCamelCase : int = {0}
for sum_item in self.prefix_sum:
if sum_item - target_sum in sums:
return True
sums.add(__a )
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 51
| 1
|
"""simple docstring"""
import random
import torch
from huggingface_hub import HfApi
from diffusers import UNetaDModel
lowerCamelCase__ = HfApi()
lowerCamelCase__ = {}
# fmt: off
lowerCamelCase__ = torch.tensor([
-0.7_5_1_5, -1.6_8_8_3, 0.2_4_2_0, 0.0_3_0_0, 0.6_3_4_7, 1.3_4_3_3, -1.1_7_4_3, -3.7_4_6_7,
1.2_3_4_2, -2.2_4_8_5, 0.4_6_3_6, 0.8_0_7_6, -0.7_9_9_1, 0.3_9_6_9, 0.8_4_9_8, 0.9_1_8_9,
-1.8_8_8_7, -3.3_5_2_2, 0.7_6_3_9, 0.2_0_4_0, 0.6_2_7_1, -2.7_1_4_8, -1.6_3_1_6, 3.0_8_3_9,
0.3_1_8_6, 0.2_7_2_1, -0.9_7_5_9, -1.2_4_6_1, 2.6_2_5_7, 1.3_5_5_7
])
lowerCamelCase__ = torch.tensor([
-2.3_6_3_9, -2.5_3_4_4, 0.0_0_5_4, -0.6_6_7_4, 1.5_9_9_0, 1.0_1_5_8, 0.3_1_2_4, -2.1_4_3_6,
1.8_7_9_5, -2.5_4_2_9, -0.1_5_6_6, -0.3_9_7_3, 1.2_4_9_0, 2.6_4_4_7, 1.2_2_8_3, -0.5_2_0_8,
-2.8_1_5_4, -3.5_1_1_9, 2.3_8_3_8, 1.2_0_3_3, 1.7_2_0_1, -2.1_2_5_6, -1.4_5_7_6, 2.7_9_4_8,
2.4_2_0_4, -0.9_7_5_2, -1.2_5_4_6, 0.8_0_2_7, 3.2_7_5_8, 3.1_3_6_5
])
lowerCamelCase__ = torch.tensor([
-0.6_5_3_1, -0.6_8_9_1, -0.3_1_7_2, -0.5_3_7_5, -0.9_1_4_0, -0.5_3_6_7, -0.1_1_7_5, -0.7_8_6_9,
-0.3_8_0_8, -0.4_5_1_3, -0.2_0_9_8, -0.0_0_8_3, 0.3_1_8_3, 0.5_1_4_0, 0.2_2_4_7, -0.1_3_0_4,
-0.1_3_0_2, -0.2_8_0_2, -0.2_0_8_4, -0.2_0_2_5, -0.4_9_6_7, -0.4_8_7_3, -0.0_8_6_1, 0.6_9_2_5,
0.0_2_5_0, 0.1_2_9_0, -0.1_5_4_3, 0.6_3_1_6, 1.0_4_6_0, 1.4_9_4_3
])
lowerCamelCase__ = torch.tensor([
0.0_9_1_1, 0.1_1_0_7, 0.0_1_8_2, 0.0_4_3_5, -0.0_8_0_5, -0.0_6_0_8, 0.0_3_8_1, 0.2_1_7_2,
-0.0_2_8_0, 0.1_3_2_7, -0.0_2_9_9, -0.0_2_5_5, -0.0_0_5_0, -0.1_1_7_0, -0.1_0_4_6, 0.0_3_0_9,
0.1_3_6_7, 0.1_7_2_8, -0.0_5_3_3, -0.0_7_4_8, -0.0_5_3_4, 0.1_6_2_4, 0.0_3_8_4, -0.1_8_0_5,
-0.0_7_0_7, 0.0_6_4_2, 0.0_2_2_0, -0.0_1_3_4, -0.1_3_3_3, -0.1_5_0_5
])
lowerCamelCase__ = torch.tensor([
0.1_3_2_1, 0.1_3_3_7, 0.0_4_4_0, 0.0_6_2_2, -0.0_5_9_1, -0.0_3_7_0, 0.0_5_0_3, 0.2_1_3_3,
-0.0_1_7_7, 0.1_4_1_5, -0.0_1_1_6, -0.0_1_1_2, 0.0_0_4_4, -0.0_9_8_0, -0.0_7_8_9, 0.0_3_9_5,
0.1_5_0_2, 0.1_7_8_5, -0.0_4_8_8, -0.0_5_1_4, -0.0_4_0_4, 0.1_5_3_9, 0.0_4_5_4, -0.1_5_5_9,
-0.0_6_6_5, 0.0_6_5_9, 0.0_3_8_3, -0.0_0_0_5, -0.1_2_6_6, -0.1_3_8_6
])
lowerCamelCase__ = torch.tensor([
0.1_1_5_4, 0.1_2_1_8, 0.0_3_0_7, 0.0_5_2_6, -0.0_7_1_1, -0.0_5_4_1, 0.0_3_6_6, 0.2_0_7_8,
-0.0_2_6_7, 0.1_3_1_7, -0.0_2_2_6, -0.0_1_9_3, -0.0_0_1_4, -0.1_0_5_5, -0.0_9_0_2, 0.0_3_3_0,
0.1_3_9_1, 0.1_7_0_9, -0.0_5_6_2, -0.0_6_9_3, -0.0_5_6_0, 0.1_4_8_2, 0.0_3_8_1, -0.1_6_8_3,
-0.0_6_8_1, 0.0_6_6_1, 0.0_3_3_1, -0.0_0_4_6, -0.1_2_6_8, -0.1_4_3_1
])
lowerCamelCase__ = torch.tensor([
0.1_1_9_2, 0.1_2_4_0, 0.0_4_1_4, 0.0_6_0_6, -0.0_5_5_7, -0.0_4_1_2, 0.0_4_3_0, 0.2_0_4_2,
-0.0_2_0_0, 0.1_3_8_5, -0.0_1_1_5, -0.0_1_3_2, 0.0_0_1_7, -0.0_9_6_5, -0.0_8_0_2, 0.0_3_9_8,
0.1_4_3_3, 0.1_7_4_7, -0.0_4_5_8, -0.0_5_3_3, -0.0_4_0_7, 0.1_5_4_5, 0.0_4_1_9, -0.1_5_7_4,
-0.0_6_4_5, 0.0_6_2_6, 0.0_3_4_1, -0.0_0_1_0, -0.1_1_9_9, -0.1_3_9_0
])
lowerCamelCase__ = torch.tensor([
0.1_0_7_5, 0.1_0_7_4, 0.0_2_0_5, 0.0_4_3_1, -0.0_7_7_4, -0.0_6_0_7, 0.0_2_9_8, 0.2_0_4_2,
-0.0_3_2_0, 0.1_2_6_7, -0.0_2_8_1, -0.0_2_5_0, -0.0_0_6_4, -0.1_0_9_1, -0.0_9_4_6, 0.0_2_9_0,
0.1_3_2_8, 0.1_6_5_0, -0.0_5_8_0, -0.0_7_3_8, -0.0_5_8_6, 0.1_4_4_0, 0.0_3_3_7, -0.1_7_4_6,
-0.0_7_1_2, 0.0_6_0_5, 0.0_2_5_0, -0.0_0_9_9, -0.1_3_1_6, -0.1_4_7_3
])
lowerCamelCase__ = torch.tensor([
-1.4_5_7_2, -2.0_4_8_1, -0.0_4_1_4, -0.6_0_0_5, 1.4_1_3_6, 0.5_8_4_8, 0.4_0_2_8, -2.7_3_3_0,
1.2_2_1_2, -2.1_2_2_8, 0.2_1_5_5, 0.4_0_3_9, 0.7_6_6_2, 2.0_5_3_5, 0.7_4_7_7, -0.3_2_4_3,
-2.1_7_5_8, -2.7_6_4_8, 1.6_9_4_7, 0.7_0_2_6, 1.2_3_3_8, -1.6_0_7_8, -0.8_6_8_2, 2.2_8_1_0,
1.8_5_7_4, -0.5_7_1_8, -0.5_5_8_6, -0.0_1_8_6, 2.3_4_1_5, 2.1_2_5_1])
lowerCamelCase__ = torch.tensor([
-1.3_6_9_0, -1.9_7_2_0, -0.4_0_9_0, -0.6_9_6_6, 1.4_6_6_0, 0.9_9_3_8, -0.1_3_8_5, -2.7_3_2_4,
0.7_7_3_6, -1.8_9_1_7, 0.2_9_2_3, 0.4_2_9_3, 0.1_6_9_3, 1.4_1_1_2, 1.1_8_8_7, -0.3_1_8_1,
-2.2_1_6_0, -2.6_3_8_1, 1.3_1_7_0, 0.8_1_6_3, 0.9_2_4_0, -1.6_5_4_4, -0.6_0_9_9, 2.5_2_5_9,
1.6_4_3_0, -0.9_0_9_0, -0.9_3_9_2, -0.0_1_2_6, 2.4_2_6_8, 2.3_2_6_6
])
lowerCamelCase__ = torch.tensor([
-1.3_5_2_5, -1.9_6_2_8, -0.3_9_5_6, -0.6_8_6_0, 1.4_6_6_4, 1.0_0_1_4, -0.1_2_5_9, -2.7_2_1_2,
0.7_7_7_2, -1.8_8_1_1, 0.2_9_9_6, 0.4_3_8_8, 0.1_7_0_4, 1.4_0_2_9, 1.1_7_0_1, -0.3_0_2_7,
-2.2_0_5_3, -2.6_2_8_7, 1.3_3_5_0, 0.8_1_3_1, 0.9_2_7_4, -1.6_2_9_2, -0.6_0_9_8, 2.5_1_3_1,
1.6_5_0_5, -0.8_9_5_8, -0.9_2_9_8, -0.0_1_5_1, 2.4_2_5_7, 2.3_3_5_5
])
lowerCamelCase__ = torch.tensor([
-2.0_5_8_5, -2.7_8_9_7, -0.2_8_5_0, -0.8_9_4_0, 1.9_0_5_2, 0.5_7_0_2, 0.6_3_4_5, -3.8_9_5_9,
1.5_9_3_2, -3.2_3_1_9, 0.1_9_7_4, 0.0_2_8_7, 1.7_5_6_6, 2.6_5_4_3, 0.8_3_8_7, -0.5_3_5_1,
-3.2_7_3_6, -4.3_3_7_5, 2.9_0_2_9, 1.6_3_9_0, 1.4_6_4_0, -2.1_7_0_1, -1.9_0_1_3, 2.9_3_4_1,
3.4_9_8_1, -0.6_2_5_5, -1.1_6_4_4, -0.1_5_9_1, 3.7_0_9_7, 3.2_0_6_6
])
lowerCamelCase__ = torch.tensor([
-2.3_1_3_9, -2.5_5_9_4, -0.0_1_9_7, -0.6_7_8_5, 1.7_0_0_1, 1.1_6_0_6, 0.3_0_7_5, -2.1_7_4_0,
1.8_0_7_1, -2.5_6_3_0, -0.0_9_2_6, -0.3_8_1_1, 1.2_1_1_6, 2.6_2_4_6, 1.2_7_3_1, -0.5_3_9_8,
-2.8_1_5_3, -3.6_1_4_0, 2.3_8_9_3, 1.3_2_6_2, 1.6_2_5_8, -2.1_8_5_6, -1.3_2_6_7, 2.8_3_9_5,
2.3_7_7_9, -1.0_6_2_3, -1.2_4_6_8, 0.8_9_5_9, 3.3_3_6_7, 3.2_2_4_3
])
lowerCamelCase__ = torch.tensor([
-2.0_6_2_8, -2.7_6_6_7, -0.2_0_8_9, -0.8_2_6_3, 2.0_5_3_9, 0.5_9_9_2, 0.6_4_9_5, -3.8_3_3_6,
1.6_0_2_5, -3.2_8_1_7, 0.1_7_2_1, -0.0_6_3_3, 1.7_5_1_6, 2.7_0_3_9, 0.8_1_0_0, -0.5_9_0_8,
-3.2_1_1_3, -4.4_3_4_3, 2.9_2_5_7, 1.3_6_3_2, 1.5_5_6_2, -2.1_4_8_9, -1.9_8_9_4, 3.0_5_6_0,
3.3_3_9_6, -0.7_3_2_8, -1.0_4_1_7, 0.0_3_8_3, 3.7_0_9_3, 3.2_3_4_3
])
lowerCamelCase__ = torch.tensor([
-1.4_5_7_4, -2.0_5_6_9, -0.0_4_7_3, -0.6_1_1_7, 1.4_0_1_8, 0.5_7_6_9, 0.4_1_2_9, -2.7_3_4_4,
1.2_2_4_1, -2.1_3_9_7, 0.2_0_0_0, 0.3_9_3_7, 0.7_6_1_6, 2.0_4_5_3, 0.7_3_2_4, -0.3_3_9_1,
-2.1_7_4_6, -2.7_7_4_4, 1.6_9_6_3, 0.6_9_2_1, 1.2_1_8_7, -1.6_1_7_2, -0.8_8_7_7, 2.2_4_3_9,
1.8_4_7_1, -0.5_8_3_9, -0.5_6_0_5, -0.0_4_6_4, 2.3_2_5_0, 2.1_2_1_9
])
# fmt: on
lowerCamelCase__ = api.list_models(filter="diffusers")
for mod in models:
if "google" in mod.author or mod.modelId == "CompVis/ldm-celebahq-256":
lowerCamelCase__ = "/home/patrick/google_checkpoints/" + mod.modelId.split("/")[-1]
print(f"""Started running {mod.modelId}!!!""")
if mod.modelId.startswith("CompVis"):
lowerCamelCase__ = UNetaDModel.from_pretrained(local_checkpoint, subfolder="unet")
else:
lowerCamelCase__ = UNetaDModel.from_pretrained(local_checkpoint)
torch.manual_seed(0)
random.seed(0)
lowerCamelCase__ = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size)
lowerCamelCase__ = torch.tensor([10] * noise.shape[0])
with torch.no_grad():
lowerCamelCase__ = model(noise, time_step).sample
assert torch.allclose(
logits[0, 0, 0, :30], results["_".join("_".join(mod.modelId.split("/")).split("-"))], atol=1E-3
)
print(f"""{mod.modelId} has passed successfully!!!""")
| 51
|
"""simple docstring"""
import os
import zipfile
import requests
from get_ci_error_statistics import download_artifact, get_artifacts_links
def lowercase__ ( lowercase_ ,lowercase_=7 ) -> Tuple:
"""simple docstring"""
_UpperCamelCase : Optional[int] = None
if token is not None:
_UpperCamelCase : Optional[Any] = {"Accept": "application/vnd.github+json", "Authorization": F'''Bearer {token}'''}
# The id of a workflow (not of a workflow run)
_UpperCamelCase : Any = "636036"
_UpperCamelCase : Tuple = F'''https://api.github.com/repos/huggingface/transformers/actions/workflows/{workflow_id}/runs'''
# On `main` branch + event being `schedule` + not returning PRs + only `num_runs` results
url += F'''?branch=main&event=schedule&exclude_pull_requests=true&per_page={num_runs}'''
_UpperCamelCase : Dict = requests.get(lowercase_ ,headers=lowercase_ ).json()
return result["workflow_runs"]
def lowercase__ ( lowercase_ ) -> List[str]:
"""simple docstring"""
_UpperCamelCase : List[Any] = get_daily_ci_runs(lowercase_ )
_UpperCamelCase : Tuple = None
for workflow_run in workflow_runs:
if workflow_run["status"] == "completed":
_UpperCamelCase : Union[str, Any] = workflow_run["id"]
break
return workflow_run_id
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase : str = get_last_daily_ci_runs(lowercase_ )
if workflow_run_id is not None:
_UpperCamelCase : int = get_artifacts_links(worflow_run_id=lowercase_ ,token=lowercase_ )
for artifact_name in artifact_names:
if artifact_name in artifacts_links:
_UpperCamelCase : Dict = artifacts_links[artifact_name]
download_artifact(
artifact_name=lowercase_ ,artifact_url=lowercase_ ,output_dir=lowercase_ ,token=lowercase_ )
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ) -> int:
"""simple docstring"""
get_last_daily_ci_artifacts(lowercase_ ,lowercase_ ,lowercase_ )
_UpperCamelCase : Dict = {}
for artifact_name in artifact_names:
_UpperCamelCase : Union[str, Any] = os.path.join(lowercase_ ,F'''{artifact_name}.zip''' )
if os.path.isfile(lowercase_ ):
_UpperCamelCase : int = {}
with zipfile.ZipFile(lowercase_ ) as z:
for filename in z.namelist():
if not os.path.isdir(lowercase_ ):
# read the file
with z.open(lowercase_ ) as f:
_UpperCamelCase : int = f.read().decode("UTF-8" )
return results
| 51
| 1
|
"""simple docstring"""
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import subprocess
from packaging.version import Version, parse
from accelerate.commands.config.config_args import default_config_file, load_config_from_file
lowerCamelCase__ = "Run commands across TPU VMs for initial setup before running `accelerate launch`."
def lowercase__ ( lowercase_=None ) -> Optional[int]:
"""simple docstring"""
if subparsers is not None:
_UpperCamelCase : str = subparsers.add_parser("tpu-config" ,description=_description )
else:
_UpperCamelCase : str = argparse.ArgumentParser("Accelerate tpu-config command" ,description=_description )
# Core arguments
_UpperCamelCase : Tuple = parser.add_argument_group(
"Config Arguments" ,"Arguments that can be configured through `accelerate config`." )
config_args.add_argument(
"--config_file" ,type=lowercase_ ,default=lowercase_ ,help="Path to the config file to use for accelerate." ,)
config_args.add_argument(
"--tpu_name" ,default=lowercase_ ,help="The name of the TPU to use. If not specified, will use the TPU specified in the config file." ,)
config_args.add_argument(
"--tpu_zone" ,default=lowercase_ ,help="The zone of the TPU to use. If not specified, will use the zone specified in the config file." ,)
_UpperCamelCase : List[Any] = parser.add_argument_group("TPU Arguments" ,"Arguments for options ran inside the TPU." )
pod_args.add_argument(
"--use_alpha" ,action="store_true" ,help="Whether to use `gcloud alpha` when running the TPU training script instead of `gcloud`." ,)
pod_args.add_argument(
"--command_file" ,default=lowercase_ ,help="The path to the file containing the commands to run on the pod on startup." ,)
pod_args.add_argument(
"--command" ,action="append" ,nargs="+" ,help="A command to run on the pod. Can be passed multiple times." ,)
pod_args.add_argument(
"--install_accelerate" ,action="store_true" ,help="Whether to install accelerate on the pod. Defaults to False." ,)
pod_args.add_argument(
"--accelerate_version" ,default="latest" ,help="The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify 'dev' to install from GitHub." ,)
pod_args.add_argument(
"--debug" ,action="store_true" ,help="If set, will print the command that would be run instead of running it." )
if subparsers is not None:
parser.set_defaults(func=lowercase_ )
return parser
def lowercase__ ( lowercase_ ) -> str:
"""simple docstring"""
_UpperCamelCase : Tuple = None
# Get the default from the config file if it exists.
if args.config_file is not None or os.path.isfile(lowercase_ ):
_UpperCamelCase : Optional[int] = load_config_from_file(args.config_file )
if not args.command_file and defaults.command_file is not None and not args.command:
_UpperCamelCase : Optional[Any] = defaults.command_file
if not args.command and defaults.commands is not None:
_UpperCamelCase : List[str] = defaults.commands
if not args.tpu_name:
_UpperCamelCase : Optional[Any] = defaults.tpu_name
if not args.tpu_zone:
_UpperCamelCase : Optional[Any] = defaults.tpu_zone
if args.accelerate_version == "dev":
_UpperCamelCase : List[str] = "git+https://github.com/huggingface/accelerate.git"
elif args.accelerate_version == "latest":
_UpperCamelCase : int = "accelerate -U"
elif isinstance(parse(args.accelerate_version ) ,lowercase_ ):
_UpperCamelCase : str = F'''accelerate=={args.accelerate_version}'''
if not args.command_file and not args.command:
raise ValueError("You must specify either a command file or a command to run on the pod." )
if args.command_file:
with open(args.command_file ,"r" ) as f:
_UpperCamelCase : int = [f.read().splitlines()]
# To turn list of lists into list of strings
if isinstance(args.command[0] ,lowercase_ ):
_UpperCamelCase : Optional[int] = [line for cmd in args.command for line in cmd]
# Default to the shared folder and install accelerate
_UpperCamelCase : Optional[Any] = ["cd /usr/share"]
if args.install_accelerate:
new_cmd += [F'''pip install {args.accelerate_version}''']
new_cmd += args.command
_UpperCamelCase : Optional[int] = "; ".join(lowercase_ )
# Then send it to gcloud
# Eventually try to use google-api-core to do this instead of subprocess
_UpperCamelCase : Any = ["gcloud"]
if args.use_alpha:
cmd += ["alpha"]
cmd += [
"compute",
"tpus",
"tpu-vm",
"ssh",
args.tpu_name,
"--zone",
args.tpu_zone,
"--command",
args.command,
"--worker",
"all",
]
if args.debug:
print(F'''Running {' '.join(lowercase_ )}''' )
return
subprocess.run(lowercase_ )
print("Successfully setup pod." )
def lowercase__ ( ) -> str:
"""simple docstring"""
_UpperCamelCase : str = tpu_command_parser()
_UpperCamelCase : str = parser.parse_args()
tpu_command_launcher(lowercase_ )
| 51
|
"""simple docstring"""
import math
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self : Any , __a : list[list[float]] , __a : list[int] ) -> int:
_UpperCamelCase : List[Any] = 0.0
_UpperCamelCase : Union[str, Any] = 0.0
for i in range(len(__a ) ):
da += math.pow((sample[i] - weights[0][i]) , 2 )
da += math.pow((sample[i] - weights[1][i]) , 2 )
return 0 if da > da else 1
return 0
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __a : list[list[int | float]] , __a : list[int] , __a : int , __a : float ) -> list[list[int | float]]:
for i in range(len(__a ) ):
weights[j][i] += alpha * (sample[i] - weights[j][i])
return weights
def lowercase__ ( ) -> None:
"""simple docstring"""
_UpperCamelCase : Optional[int] = [[1, 1, 0, 0], [0, 0, 0, 1], [1, 0, 0, 0], [0, 0, 1, 1]]
# weight initialization ( n, C )
_UpperCamelCase : List[str] = [[0.2, 0.6, 0.5, 0.9], [0.8, 0.4, 0.7, 0.3]]
# training
_UpperCamelCase : List[Any] = SelfOrganizingMap()
_UpperCamelCase : int = 3
_UpperCamelCase : List[Any] = 0.5
for _ in range(lowercase_ ):
for j in range(len(lowercase_ ) ):
# training sample
_UpperCamelCase : int = training_samples[j]
# Compute the winning vector
_UpperCamelCase : Tuple = self_organizing_map.get_winner(lowercase_ ,lowercase_ )
# Update the winning vector
_UpperCamelCase : int = self_organizing_map.update(lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ )
# classify test sample
_UpperCamelCase : Optional[int] = [0, 0, 0, 1]
_UpperCamelCase : Dict = self_organizing_map.get_winner(lowercase_ ,lowercase_ )
# results
print(F'''Clusters that the test sample belongs to : {winner}''' )
print(F'''Weights that have been trained : {weights}''' )
# running the main() function
if __name__ == "__main__":
main()
| 51
| 1
|
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class __SCREAMING_SNAKE_CASE ( metaclass=_UpperCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :int = ["torch", "transformers", "onnx"]
def __init__( self : Optional[int] , *__a : List[str] , **__a : str ) -> int:
requires_backends(self , ["torch", "transformers", "onnx"] )
@classmethod
def __SCREAMING_SNAKE_CASE ( cls : Optional[int] , *__a : Dict , **__a : Dict ) -> Optional[Any]:
requires_backends(cls , ["torch", "transformers", "onnx"] )
@classmethod
def __SCREAMING_SNAKE_CASE ( cls : Any , *__a : Any , **__a : List[str] ) -> List[Any]:
requires_backends(cls , ["torch", "transformers", "onnx"] )
class __SCREAMING_SNAKE_CASE ( metaclass=_UpperCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :str = ["torch", "transformers", "onnx"]
def __init__( self : Tuple , *__a : List[str] , **__a : List[str] ) -> Union[str, Any]:
requires_backends(self , ["torch", "transformers", "onnx"] )
@classmethod
def __SCREAMING_SNAKE_CASE ( cls : List[Any] , *__a : List[Any] , **__a : Any ) -> List[Any]:
requires_backends(cls , ["torch", "transformers", "onnx"] )
@classmethod
def __SCREAMING_SNAKE_CASE ( cls : Optional[int] , *__a : str , **__a : Any ) -> Union[str, Any]:
requires_backends(cls , ["torch", "transformers", "onnx"] )
class __SCREAMING_SNAKE_CASE ( metaclass=_UpperCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Tuple = ["torch", "transformers", "onnx"]
def __init__( self : Optional[Any] , *__a : int , **__a : int ) -> int:
requires_backends(self , ["torch", "transformers", "onnx"] )
@classmethod
def __SCREAMING_SNAKE_CASE ( cls : Optional[Any] , *__a : Tuple , **__a : Union[str, Any] ) -> int:
requires_backends(cls , ["torch", "transformers", "onnx"] )
@classmethod
def __SCREAMING_SNAKE_CASE ( cls : Dict , *__a : Union[str, Any] , **__a : List[str] ) -> Optional[int]:
requires_backends(cls , ["torch", "transformers", "onnx"] )
class __SCREAMING_SNAKE_CASE ( metaclass=_UpperCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Optional[int] = ["torch", "transformers", "onnx"]
def __init__( self : Union[str, Any] , *__a : List[Any] , **__a : Optional[Any] ) -> Optional[Any]:
requires_backends(self , ["torch", "transformers", "onnx"] )
@classmethod
def __SCREAMING_SNAKE_CASE ( cls : List[Any] , *__a : Tuple , **__a : Optional[Any] ) -> str:
requires_backends(cls , ["torch", "transformers", "onnx"] )
@classmethod
def __SCREAMING_SNAKE_CASE ( cls : str , *__a : Optional[Any] , **__a : str ) -> Union[str, Any]:
requires_backends(cls , ["torch", "transformers", "onnx"] )
class __SCREAMING_SNAKE_CASE ( metaclass=_UpperCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :int = ["torch", "transformers", "onnx"]
def __init__( self : Optional[int] , *__a : Optional[int] , **__a : Dict ) -> Tuple:
requires_backends(self , ["torch", "transformers", "onnx"] )
@classmethod
def __SCREAMING_SNAKE_CASE ( cls : Tuple , *__a : Optional[Any] , **__a : List[str] ) -> List[str]:
requires_backends(cls , ["torch", "transformers", "onnx"] )
@classmethod
def __SCREAMING_SNAKE_CASE ( cls : Optional[Any] , *__a : Optional[Any] , **__a : Tuple ) -> Optional[int]:
requires_backends(cls , ["torch", "transformers", "onnx"] )
class __SCREAMING_SNAKE_CASE ( metaclass=_UpperCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :int = ["torch", "transformers", "onnx"]
def __init__( self : str , *__a : List[str] , **__a : Union[str, Any] ) -> List[Any]:
requires_backends(self , ["torch", "transformers", "onnx"] )
@classmethod
def __SCREAMING_SNAKE_CASE ( cls : Union[str, Any] , *__a : Optional[Any] , **__a : List[Any] ) -> Tuple:
requires_backends(cls , ["torch", "transformers", "onnx"] )
@classmethod
def __SCREAMING_SNAKE_CASE ( cls : Optional[int] , *__a : Optional[Any] , **__a : Optional[int] ) -> str:
requires_backends(cls , ["torch", "transformers", "onnx"] )
| 51
|
"""simple docstring"""
import argparse
import collections
import os
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_table.py
lowerCamelCase__ = "src/transformers"
lowerCamelCase__ = "docs/source/en"
lowerCamelCase__ = "."
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ) -> List[str]:
"""simple docstring"""
with open(lowercase_ ,"r" ,encoding="utf-8" ,newline="\n" ) as f:
_UpperCamelCase : Union[str, Any] = f.readlines()
# Find the start prompt.
_UpperCamelCase : Dict = 0
while not lines[start_index].startswith(lowercase_ ):
start_index += 1
start_index += 1
_UpperCamelCase : Optional[int] = start_index
while not lines[end_index].startswith(lowercase_ ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# Add here suffixes that are used to identify models, separated by |
lowerCamelCase__ = "Model|Encoder|Decoder|ForConditionalGeneration"
# Regexes that match TF/Flax/PT model names.
lowerCamelCase__ = re.compile(R"TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
lowerCamelCase__ = re.compile(R"Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
lowerCamelCase__ = re.compile(R"(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
# This is to make sure the transformers module imported is the one in the repo.
lowerCamelCase__ = direct_transformers_import(TRANSFORMERS_PATH)
def lowercase__ ( lowercase_ ) -> Any:
"""simple docstring"""
_UpperCamelCase : Tuple = re.finditer(".+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)" ,lowercase_ )
return [m.group(0 ) for m in matches]
def lowercase__ ( lowercase_ ,lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase : str = 2 if text == "✅" or text == "❌" else len(lowercase_ )
_UpperCamelCase : Union[str, Any] = (width - text_length) // 2
_UpperCamelCase : Dict = width - text_length - left_indent
return " " * left_indent + text + " " * right_indent
def lowercase__ ( ) -> str:
"""simple docstring"""
_UpperCamelCase : Optional[Any] = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
_UpperCamelCase : str = {
name: config_maping_names[code]
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if code in config_maping_names
}
_UpperCamelCase : Dict = {name: config.replace("Config" ,"" ) for name, config in model_name_to_config.items()}
# Dictionaries flagging if each model prefix has a slow/fast tokenizer, backend in PT/TF/Flax.
_UpperCamelCase : int = collections.defaultdict(lowercase_ )
_UpperCamelCase : Dict = collections.defaultdict(lowercase_ )
_UpperCamelCase : Dict = collections.defaultdict(lowercase_ )
_UpperCamelCase : int = collections.defaultdict(lowercase_ )
_UpperCamelCase : str = collections.defaultdict(lowercase_ )
# Let's lookup through all transformers object (once).
for attr_name in dir(lowercase_ ):
_UpperCamelCase : List[str] = None
if attr_name.endswith("Tokenizer" ):
_UpperCamelCase : Tuple = slow_tokenizers
_UpperCamelCase : Any = attr_name[:-9]
elif attr_name.endswith("TokenizerFast" ):
_UpperCamelCase : Optional[Any] = fast_tokenizers
_UpperCamelCase : List[str] = attr_name[:-13]
elif _re_tf_models.match(lowercase_ ) is not None:
_UpperCamelCase : List[Any] = tf_models
_UpperCamelCase : Dict = _re_tf_models.match(lowercase_ ).groups()[0]
elif _re_flax_models.match(lowercase_ ) is not None:
_UpperCamelCase : Dict = flax_models
_UpperCamelCase : Union[str, Any] = _re_flax_models.match(lowercase_ ).groups()[0]
elif _re_pt_models.match(lowercase_ ) is not None:
_UpperCamelCase : Optional[int] = pt_models
_UpperCamelCase : Any = _re_pt_models.match(lowercase_ ).groups()[0]
if lookup_dict is not None:
while len(lowercase_ ) > 0:
if attr_name in model_name_to_prefix.values():
_UpperCamelCase : Dict = True
break
# Try again after removing the last word in the name
_UpperCamelCase : List[str] = "".join(camel_case_split(lowercase_ )[:-1] )
# Let's build that table!
_UpperCamelCase : Any = list(model_name_to_config.keys() )
model_names.sort(key=str.lower )
_UpperCamelCase : List[str] = ["Model", "Tokenizer slow", "Tokenizer fast", "PyTorch support", "TensorFlow support", "Flax Support"]
# We'll need widths to properly display everything in the center (+2 is to leave one extra space on each side).
_UpperCamelCase : Union[str, Any] = [len(lowercase_ ) + 2 for c in columns]
_UpperCamelCase : Any = max([len(lowercase_ ) for name in model_names] ) + 2
# Build the table per se
_UpperCamelCase : Tuple = "|" + "|".join([_center_text(lowercase_ ,lowercase_ ) for c, w in zip(lowercase_ ,lowercase_ )] ) + "|\n"
# Use ":-----:" format to center-aligned table cell texts
table += "|" + "|".join([":" + "-" * (w - 2) + ":" for w in widths] ) + "|\n"
_UpperCamelCase : Union[str, Any] = {True: "✅", False: "❌"}
for name in model_names:
_UpperCamelCase : Optional[int] = model_name_to_prefix[name]
_UpperCamelCase : Tuple = [
name,
check[slow_tokenizers[prefix]],
check[fast_tokenizers[prefix]],
check[pt_models[prefix]],
check[tf_models[prefix]],
check[flax_models[prefix]],
]
table += "|" + "|".join([_center_text(lowercase_ ,lowercase_ ) for l, w in zip(lowercase_ ,lowercase_ )] ) + "|\n"
return table
def lowercase__ ( lowercase_=False ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase : str = _find_text_in_file(
filename=os.path.join(lowercase_ ,"index.md" ) ,start_prompt="<!--This table is updated automatically from the auto modules" ,end_prompt="<!-- End table-->" ,)
_UpperCamelCase : Any = get_model_table_from_auto_modules()
if current_table != new_table:
if overwrite:
with open(os.path.join(lowercase_ ,"index.md" ) ,"w" ,encoding="utf-8" ,newline="\n" ) as f:
f.writelines(lines[:start_index] + [new_table] + lines[end_index:] )
else:
raise ValueError(
"The model table in the `index.md` has not been updated. Run `make fix-copies` to fix this." )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
lowerCamelCase__ = parser.parse_args()
check_model_table(args.fix_and_overwrite)
| 51
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import _LazyModule
lowerCamelCase__ = {"processing_wav2vec2_with_lm": ["Wav2Vec2ProcessorWithLM"]}
if TYPE_CHECKING:
from .processing_wavaveca_with_lm import WavaVecaProcessorWithLM
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 51
|
"""simple docstring"""
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
lowerCamelCase__ = 200
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
lowerCamelCase__ = 50
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
lowerCamelCase__ = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 1000))
def lowercase__ ( lowercase_ ,lowercase_ ) -> tuple[str, float]:
"""simple docstring"""
_UpperCamelCase : str = len([g for position, g in enumerate(lowercase_ ) if g == main_target[position]] )
return (item, float(lowercase_ ))
def lowercase__ ( lowercase_ ,lowercase_ ) -> tuple[str, str]:
"""simple docstring"""
_UpperCamelCase : Tuple = random.randint(0 ,len(lowercase_ ) - 1 )
_UpperCamelCase : Dict = parent_a[:random_slice] + parent_a[random_slice:]
_UpperCamelCase : Tuple = parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def lowercase__ ( lowercase_ ,lowercase_ ) -> str:
"""simple docstring"""
_UpperCamelCase : int = list(lowercase_ )
if random.uniform(0 ,1 ) < MUTATION_PROBABILITY:
_UpperCamelCase : int = random.choice(lowercase_ )
return "".join(lowercase_ )
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,) -> list[str]:
"""simple docstring"""
_UpperCamelCase : Optional[Any] = []
# Generate more children proportionally to the fitness score.
_UpperCamelCase : List[str] = int(parent_a[1] * 100 ) + 1
_UpperCamelCase : Union[str, Any] = 10 if child_n >= 10 else child_n
for _ in range(lowercase_ ):
_UpperCamelCase : Dict = population_score[random.randint(0 ,lowercase_ )][0]
_UpperCamelCase, _UpperCamelCase : Dict = crossover(parent_a[0] ,lowercase_ )
# Append new string to the population list.
pop.append(mutate(lowercase_ ,lowercase_ ) )
pop.append(mutate(lowercase_ ,lowercase_ ) )
return pop
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ = True ) -> tuple[int, int, str]:
"""simple docstring"""
if N_POPULATION < N_SELECTED:
_UpperCamelCase : List[str] = F'''{N_POPULATION} must be bigger than {N_SELECTED}'''
raise ValueError(lowercase_ )
# Verify that the target contains no genes besides the ones inside genes variable.
_UpperCamelCase : int = sorted({c for c in target if c not in genes} )
if not_in_genes_list:
_UpperCamelCase : int = F'''{not_in_genes_list} is not in genes list, evolution cannot converge'''
raise ValueError(lowercase_ )
# Generate random starting population.
_UpperCamelCase : Union[str, Any] = []
for _ in range(lowercase_ ):
population.append("".join([random.choice(lowercase_ ) for i in range(len(lowercase_ ) )] ) )
# Just some logs to know what the algorithms is doing.
_UpperCamelCase, _UpperCamelCase : str = 0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(lowercase_ )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
_UpperCamelCase : int = [evaluate(lowercase_ ,lowercase_ ) for item in population]
# Check if there is a matching evolution.
_UpperCamelCase : Optional[Any] = sorted(lowercase_ ,key=lambda lowercase_ : x[1] ,reverse=lowercase_ )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
F'''\nGeneration: {generation}'''
F'''\nTotal Population:{total_population}'''
F'''\nBest score: {population_score[0][1]}'''
F'''\nBest string: {population_score[0][0]}''' )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
_UpperCamelCase : str = population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(lowercase_ )
# Normalize population score to be between 0 and 1.
_UpperCamelCase : str = [
(item, score / len(lowercase_ )) for item, score in population_score
]
# This is selection
for i in range(lowercase_ ):
population.extend(select(population_score[int(lowercase_ )] ,lowercase_ ,lowercase_ ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(lowercase_ ) > N_POPULATION:
break
if __name__ == "__main__":
lowerCamelCase__ = (
"This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!"
)
lowerCamelCase__ = list(
" ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm"
"nopqrstuvwxyz.,;!?+-*#@^'èéòà€ù=)(&%$£/\\"
)
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = basic(target_str, genes_list)
print(
f"""\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}"""
)
| 51
| 1
|
"""simple docstring"""
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=_UpperCamelCase )
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :str = field(default="summarization" , metadata={"include_in_asdict_even_if_is_default": True} )
SCREAMING_SNAKE_CASE__ :ClassVar[Features] = Features({"text": Value("string" )} )
SCREAMING_SNAKE_CASE__ :ClassVar[Features] = Features({"summary": Value("string" )} )
SCREAMING_SNAKE_CASE__ :str = "text"
SCREAMING_SNAKE_CASE__ :str = "summary"
@property
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Dict[str, str]:
return {self.text_column: "text", self.summary_column: "summary"}
| 51
|
"""simple docstring"""
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = ["model.decoder.embed_positions.weights"]
def lowercase__ ( lowercase_ ) -> Optional[Any]:
"""simple docstring"""
if "emb" in name:
_UpperCamelCase : List[str] = name.replace("emb" ,"model.decoder.embed_tokens" )
if "transformer" in name:
_UpperCamelCase : Optional[int] = name.replace("transformer" ,"model.decoder" )
if "cross_attention" in name:
_UpperCamelCase : Optional[int] = name.replace("cross_attention" ,"encoder_attn" )
if "linear1" in name:
_UpperCamelCase : Optional[Any] = name.replace("linear1" ,"fc1" )
if "linear2" in name:
_UpperCamelCase : Union[str, Any] = name.replace("linear2" ,"fc2" )
if "norm1" in name:
_UpperCamelCase : Optional[Any] = name.replace("norm1" ,"self_attn_layer_norm" )
if "norm_cross" in name:
_UpperCamelCase : Dict = name.replace("norm_cross" ,"encoder_attn_layer_norm" )
if "norm2" in name:
_UpperCamelCase : Union[str, Any] = name.replace("norm2" ,"final_layer_norm" )
if "out_norm" in name:
_UpperCamelCase : Union[str, Any] = name.replace("out_norm" ,"model.decoder.layer_norm" )
if "linears" in name:
_UpperCamelCase : List[str] = name.replace("linears" ,"lm_heads" )
if "condition_provider.conditioners.description.output_proj" in name:
_UpperCamelCase : Any = name.replace("condition_provider.conditioners.description.output_proj" ,"enc_to_dec_proj" )
return name
def lowercase__ ( lowercase_ ,lowercase_ ) -> Tuple[Dict, Dict]:
"""simple docstring"""
_UpperCamelCase : str = list(state_dict.keys() )
_UpperCamelCase : Optional[Any] = {}
for key in keys:
_UpperCamelCase : Optional[int] = state_dict.pop(lowercase_ )
_UpperCamelCase : List[Any] = rename_keys(lowercase_ )
if "in_proj_weight" in key:
# split fused qkv proj
_UpperCamelCase : Tuple = val[:hidden_size, :]
_UpperCamelCase : Optional[Any] = val[hidden_size : 2 * hidden_size, :]
_UpperCamelCase : Optional[Any] = val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
_UpperCamelCase : Optional[Any] = val
else:
_UpperCamelCase : List[str] = val
return state_dict, enc_dec_proj_state_dict
def lowercase__ ( lowercase_ ) -> MusicgenDecoderConfig:
"""simple docstring"""
if checkpoint == "small":
# default config values
_UpperCamelCase : List[Any] = 1_024
_UpperCamelCase : List[str] = 24
_UpperCamelCase : Any = 16
elif checkpoint == "medium":
_UpperCamelCase : Tuple = 1_536
_UpperCamelCase : Dict = 48
_UpperCamelCase : Tuple = 24
elif checkpoint == "large":
_UpperCamelCase : int = 2_048
_UpperCamelCase : Optional[int] = 48
_UpperCamelCase : Dict = 32
else:
raise ValueError(F'''Checkpoint should be one of `[\'small\', \'medium\', \'large\']`, got {checkpoint}.''' )
_UpperCamelCase : str = MusicgenDecoderConfig(
hidden_size=lowercase_ ,ffn_dim=hidden_size * 4 ,num_hidden_layers=lowercase_ ,num_attention_heads=lowercase_ ,)
return config
@torch.no_grad()
def lowercase__ ( lowercase_ ,lowercase_=None ,lowercase_=None ,lowercase_="cpu" ) -> List[str]:
"""simple docstring"""
_UpperCamelCase : str = MusicGen.get_pretrained(lowercase_ ,device=lowercase_ )
_UpperCamelCase : Union[str, Any] = decoder_config_from_checkpoint(lowercase_ )
_UpperCamelCase : Optional[int] = fairseq_model.lm.state_dict()
_UpperCamelCase, _UpperCamelCase : Optional[Any] = rename_state_dict(
lowercase_ ,hidden_size=decoder_config.hidden_size )
_UpperCamelCase : Tuple = TaEncoderModel.from_pretrained("t5-base" )
_UpperCamelCase : Union[str, Any] = EncodecModel.from_pretrained("facebook/encodec_32khz" )
_UpperCamelCase : str = MusicgenForCausalLM(lowercase_ ).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
_UpperCamelCase, _UpperCamelCase : str = decoder.load_state_dict(lowercase_ ,strict=lowercase_ )
for key in missing_keys.copy():
if key.startswith(("text_encoder", "audio_encoder") ) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(lowercase_ )
if len(lowercase_ ) > 0:
raise ValueError(F'''Missing key(s) in state_dict: {missing_keys}''' )
if len(lowercase_ ) > 0:
raise ValueError(F'''Unexpected key(s) in state_dict: {unexpected_keys}''' )
# init the composite model
_UpperCamelCase : str = MusicgenForConditionalGeneration(text_encoder=lowercase_ ,audio_encoder=lowercase_ ,decoder=lowercase_ )
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(lowercase_ )
# check we can do a forward pass
_UpperCamelCase : List[str] = torch.arange(0 ,8 ,dtype=torch.long ).reshape(2 ,-1 )
_UpperCamelCase : Dict = input_ids.reshape(2 * 4 ,-1 )
with torch.no_grad():
_UpperCamelCase : Tuple = model(input_ids=lowercase_ ,decoder_input_ids=lowercase_ ).logits
if logits.shape != (8, 1, 2_048):
raise ValueError("Incorrect shape for logits" )
# now construct the processor
_UpperCamelCase : int = AutoTokenizer.from_pretrained("t5-base" )
_UpperCamelCase : str = AutoFeatureExtractor.from_pretrained("facebook/encodec_32khz" ,padding_side="left" )
_UpperCamelCase : Optional[int] = MusicgenProcessor(feature_extractor=lowercase_ ,tokenizer=lowercase_ )
# set the appropriate bos/pad token ids
_UpperCamelCase : str = 2_048
_UpperCamelCase : str = 2_048
# set other default generation config params
_UpperCamelCase : Optional[Any] = int(30 * audio_encoder.config.frame_rate )
_UpperCamelCase : List[str] = True
_UpperCamelCase : int = 3.0
if pytorch_dump_folder is not None:
Path(lowercase_ ).mkdir(exist_ok=lowercase_ )
logger.info(F'''Saving model {checkpoint} to {pytorch_dump_folder}''' )
model.save_pretrained(lowercase_ )
processor.save_pretrained(lowercase_ )
if repo_id:
logger.info(F'''Pushing model {checkpoint} to {repo_id}''' )
model.push_to_hub(lowercase_ )
processor.push_to_hub(lowercase_ )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint",
default="small",
type=str,
help="Checkpoint size of the MusicGen model you'd like to convert. Can be one of: `['small', 'medium', 'large']`.",
)
parser.add_argument(
"--pytorch_dump_folder",
required=True,
default=None,
type=str,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
parser.add_argument(
"--device", default="cpu", type=str, help="Torch device to run the conversion, either cpu or cuda."
)
lowerCamelCase__ = parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
| 51
| 1
|
"""simple docstring"""
lowerCamelCase__ = {
"A": ["B", "C", "E"],
"B": ["A", "D", "E"],
"C": ["A", "F", "G"],
"D": ["B"],
"E": ["A", "B", "D"],
"F": ["C"],
"G": ["C"],
}
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ) -> list[str]:
"""simple docstring"""
_UpperCamelCase : List[Any] = set()
# keep track of all the paths to be checked
_UpperCamelCase : Tuple = [[start]]
# return path if start is goal
if start == goal:
return [start]
# keeps looping until all possible paths have been checked
while queue:
# pop the first path from the queue
_UpperCamelCase : Tuple = queue.pop(0 )
# get the last node from the path
_UpperCamelCase : Dict = path[-1]
if node not in explored:
_UpperCamelCase : int = graph[node]
# go through all neighbour nodes, construct a new path and
# push it into the queue
for neighbour in neighbours:
_UpperCamelCase : Optional[int] = list(lowercase_ )
new_path.append(lowercase_ )
queue.append(lowercase_ )
# return path if neighbour is goal
if neighbour == goal:
return new_path
# mark node as explored
explored.add(lowercase_ )
# in case there's no path between the 2 nodes
return []
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ) -> int:
"""simple docstring"""
if not graph or start not in graph or target not in graph:
return -1
if start == target:
return 0
_UpperCamelCase : int = [start]
_UpperCamelCase : Tuple = set(lowercase_ )
# Keep tab on distances from `start` node.
_UpperCamelCase : str = {start: 0, target: -1}
while queue:
_UpperCamelCase : Union[str, Any] = queue.pop(0 )
if node == target:
_UpperCamelCase : Optional[Any] = (
dist[node] if dist[target] == -1 else min(dist[target] ,dist[node] )
)
for adjacent in graph[node]:
if adjacent not in visited:
visited.add(lowercase_ )
queue.append(lowercase_ )
_UpperCamelCase : Tuple = dist[node] + 1
return dist[target]
if __name__ == "__main__":
print(bfs_shortest_path(demo_graph, "G", "D")) # returns ['G', 'C', 'A', 'B', 'D']
print(bfs_shortest_path_distance(demo_graph, "G", "D")) # returns 4
| 51
|
"""simple docstring"""
from datetime import datetime
import requests
from bsa import BeautifulSoup
if __name__ == "__main__":
lowerCamelCase__ = input("Enter image url: ").strip()
print(f"""Downloading image from {url} ...""")
lowerCamelCase__ = BeautifulSoup(requests.get(url).content, "html.parser")
# The image URL is in the content field of the first meta tag with property og:image
lowerCamelCase__ = soup.find("meta", {"property": "og:image"})["content"]
lowerCamelCase__ = requests.get(image_url).content
lowerCamelCase__ = f"""{datetime.now():%Y-%m-%d_%H:%M:%S}.jpg"""
with open(file_name, "wb") as fp:
fp.write(image_data)
print(f"""Done. Image saved to disk as {file_name}.""")
| 51
| 1
|
"""simple docstring"""
def lowercase__ ( lowercase_ ,lowercase_ ) -> None:
"""simple docstring"""
_UpperCamelCase : List[Any] = len(lowercase_ )
print("The following activities are selected:" )
# The first activity is always selected
_UpperCamelCase : List[Any] = 0
print(lowercase_ ,end="," )
# Consider rest of the activities
for j in range(lowercase_ ):
# If this activity has start time greater than
# or equal to the finish time of previously
# selected activity, then select it
if start[j] >= finish[i]:
print(lowercase_ ,end="," )
_UpperCamelCase : Optional[Any] = j
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase__ = [1, 3, 0, 5, 8, 5]
lowerCamelCase__ = [2, 4, 6, 7, 9, 9]
print_max_activities(start, finish)
| 51
|
"""simple docstring"""
import importlib
import os
import sys
# This is required to make the module import works (when the python process is running from the root of the repo)
sys.path.append(".")
def lowercase__ ( lowercase_ ) -> int:
"""simple docstring"""
_UpperCamelCase : Any = test_file.split(os.path.sep )
if components[0:2] != ["tests", "models"]:
raise ValueError(
"`test_file` should start with `tests/models/` (with `/` being the OS specific path separator). Got "
F'''{test_file} instead.''' )
_UpperCamelCase : str = components[-1]
if not test_fn.endswith("py" ):
raise ValueError(F'''`test_file` should be a python file. Got {test_fn} instead.''' )
if not test_fn.startswith("test_modeling_" ):
raise ValueError(
F'''`test_file` should point to a file name of the form `test_modeling_*.py`. Got {test_fn} instead.''' )
_UpperCamelCase : Dict = components[:-1] + [test_fn.replace(".py" ,"" )]
_UpperCamelCase : List[str] = ".".join(lowercase_ )
return test_module_path
def lowercase__ ( lowercase_ ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase : Optional[Any] = get_module_path(lowercase_ )
_UpperCamelCase : str = importlib.import_module(lowercase_ )
return test_module
def lowercase__ ( lowercase_ ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase : Union[str, Any] = []
_UpperCamelCase : List[Any] = get_test_module(lowercase_ )
for attr in dir(lowercase_ ):
if attr.endswith("ModelTester" ):
tester_classes.append(getattr(lowercase_ ,lowercase_ ) )
# sort with class names
return sorted(lowercase_ ,key=lambda lowercase_ : x.__name__ )
def lowercase__ ( lowercase_ ) -> Tuple:
"""simple docstring"""
_UpperCamelCase : Optional[Any] = []
_UpperCamelCase : Any = get_test_module(lowercase_ )
for attr in dir(lowercase_ ):
_UpperCamelCase : int = getattr(lowercase_ ,lowercase_ )
# (TF/Flax)ModelTesterMixin is also an attribute in specific model test module. Let's exclude them by checking
# `all_model_classes` is not empty (which also excludes other special classes).
_UpperCamelCase : Optional[Any] = getattr(lowercase_ ,"all_model_classes" ,[] )
if len(lowercase_ ) > 0:
test_classes.append(lowercase_ )
# sort with class names
return sorted(lowercase_ ,key=lambda lowercase_ : x.__name__ )
def lowercase__ ( lowercase_ ) -> Any:
"""simple docstring"""
_UpperCamelCase : Dict = get_test_classes(lowercase_ )
_UpperCamelCase : int = set()
for test_class in test_classes:
model_classes.update(test_class.all_model_classes )
# sort with class names
return sorted(lowercase_ ,key=lambda lowercase_ : x.__name__ )
def lowercase__ ( lowercase_ ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase : List[str] = test_class()
if hasattr(lowercase_ ,"setUp" ):
test.setUp()
_UpperCamelCase : Tuple = None
if hasattr(lowercase_ ,"model_tester" ):
# `(TF/Flax)ModelTesterMixin` has this attribute default to `None`. Let's skip this case.
if test.model_tester is not None:
_UpperCamelCase : Tuple = test.model_tester.__class__
return model_tester
def lowercase__ ( lowercase_ ,lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase : str = get_test_classes(lowercase_ )
_UpperCamelCase : Dict = []
for test_class in test_classes:
if model_class in test_class.all_model_classes:
target_test_classes.append(lowercase_ )
# sort with class names
return sorted(lowercase_ ,key=lambda lowercase_ : x.__name__ )
def lowercase__ ( lowercase_ ,lowercase_ ) -> Dict:
"""simple docstring"""
_UpperCamelCase : Any = get_test_classes_for_model(lowercase_ ,lowercase_ )
_UpperCamelCase : List[Any] = []
for test_class in test_classes:
_UpperCamelCase : List[Any] = get_model_tester_from_test_class(lowercase_ )
if tester_class is not None:
tester_classes.append(lowercase_ )
# sort with class names
return sorted(lowercase_ ,key=lambda lowercase_ : x.__name__ )
def lowercase__ ( lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase : Any = get_test_classes(lowercase_ )
_UpperCamelCase : Tuple = {test_class: get_model_tester_from_test_class(lowercase_ ) for test_class in test_classes}
return test_tester_mapping
def lowercase__ ( lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase : List[Any] = get_model_classes(lowercase_ )
_UpperCamelCase : Optional[int] = {
model_class: get_test_classes_for_model(lowercase_ ,lowercase_ ) for model_class in model_classes
}
return model_test_mapping
def lowercase__ ( lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase : Optional[Any] = get_model_classes(lowercase_ )
_UpperCamelCase : Tuple = {
model_class: get_tester_classes_for_model(lowercase_ ,lowercase_ ) for model_class in model_classes
}
return model_to_tester_mapping
def lowercase__ ( lowercase_ ) -> Optional[int]:
"""simple docstring"""
if isinstance(lowercase_ ,lowercase_ ):
return o
elif isinstance(lowercase_ ,lowercase_ ):
return o.__name__
elif isinstance(lowercase_ ,(list, tuple) ):
return [to_json(lowercase_ ) for x in o]
elif isinstance(lowercase_ ,lowercase_ ):
return {to_json(lowercase_ ): to_json(lowercase_ ) for k, v in o.items()}
else:
return o
| 51
| 1
|
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .modeling_utils import ModelMixin
@dataclass
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :torch.FloatTensor
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
@register_to_config
def __init__( self : Optional[Any] , __a : int = 16 , __a : int = 88 , __a : Optional[int] = None , __a : Optional[int] = None , __a : int = 1 , __a : float = 0.0 , __a : int = 32 , __a : Optional[int] = None , __a : bool = False , __a : Optional[int] = None , __a : str = "geglu" , __a : bool = True , __a : bool = True , ) -> str:
super().__init__()
_UpperCamelCase : List[str] = num_attention_heads
_UpperCamelCase : List[str] = attention_head_dim
_UpperCamelCase : Optional[int] = num_attention_heads * attention_head_dim
_UpperCamelCase : List[str] = in_channels
_UpperCamelCase : str = torch.nn.GroupNorm(num_groups=__a , num_channels=__a , eps=1e-6 , affine=__a )
_UpperCamelCase : List[str] = nn.Linear(__a , __a )
# 3. Define transformers blocks
_UpperCamelCase : str = nn.ModuleList(
[
BasicTransformerBlock(
__a , __a , __a , dropout=__a , cross_attention_dim=__a , activation_fn=__a , attention_bias=__a , double_self_attention=__a , norm_elementwise_affine=__a , )
for d in range(__a )
] )
_UpperCamelCase : List[Any] = nn.Linear(__a , __a )
def __SCREAMING_SNAKE_CASE ( self : List[Any] , __a : Optional[int] , __a : str=None , __a : Tuple=None , __a : List[str]=None , __a : List[str]=1 , __a : Union[str, Any]=None , __a : bool = True , ) -> Dict:
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase : Optional[Any] = hidden_states.shape
_UpperCamelCase : Optional[Any] = batch_frames // num_frames
_UpperCamelCase : int = hidden_states
_UpperCamelCase : Any = hidden_states[None, :].reshape(__a , __a , __a , __a , __a )
_UpperCamelCase : List[Any] = hidden_states.permute(0 , 2 , 1 , 3 , 4 )
_UpperCamelCase : str = self.norm(__a )
_UpperCamelCase : Union[str, Any] = hidden_states.permute(0 , 3 , 4 , 2 , 1 ).reshape(batch_size * height * width , __a , __a )
_UpperCamelCase : Optional[Any] = self.proj_in(__a )
# 2. Blocks
for block in self.transformer_blocks:
_UpperCamelCase : int = block(
__a , encoder_hidden_states=__a , timestep=__a , cross_attention_kwargs=__a , class_labels=__a , )
# 3. Output
_UpperCamelCase : Any = self.proj_out(__a )
_UpperCamelCase : Optional[Any] = (
hidden_states[None, None, :]
.reshape(__a , __a , __a , __a , __a )
.permute(0 , 3 , 4 , 1 , 2 )
.contiguous()
)
_UpperCamelCase : Any = hidden_states.reshape(__a , __a , __a , __a )
_UpperCamelCase : Optional[int] = hidden_states + residual
if not return_dict:
return (output,)
return TransformerTemporalModelOutput(sample=__a )
| 51
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCamelCase__ = {
"configuration_mask2former": [
"MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"Mask2FormerConfig",
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["Mask2FormerImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"Mask2FormerForUniversalSegmentation",
"Mask2FormerModel",
"Mask2FormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_maskaformer import MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskaFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_maskaformer import MaskaFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskaformer import (
MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskaFormerForUniversalSegmentation,
MaskaFormerModel,
MaskaFormerPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 51
| 1
|
"""simple docstring"""
from argparse import ArgumentParser
from .add_new_model import AddNewModelCommand
from .add_new_model_like import AddNewModelLikeCommand
from .convert import ConvertCommand
from .download import DownloadCommand
from .env import EnvironmentCommand
from .lfs import LfsCommands
from .pt_to_tf import PTtoTFCommand
from .run import RunCommand
from .serving import ServeCommand
from .user import UserCommands
def lowercase__ ( ) -> List[str]:
"""simple docstring"""
_UpperCamelCase : int = ArgumentParser("Transformers CLI tool" ,usage="transformers-cli <command> [<args>]" )
_UpperCamelCase : List[Any] = parser.add_subparsers(help="transformers-cli command helpers" )
# Register commands
ConvertCommand.register_subcommand(lowercase_ )
DownloadCommand.register_subcommand(lowercase_ )
EnvironmentCommand.register_subcommand(lowercase_ )
RunCommand.register_subcommand(lowercase_ )
ServeCommand.register_subcommand(lowercase_ )
UserCommands.register_subcommand(lowercase_ )
AddNewModelCommand.register_subcommand(lowercase_ )
AddNewModelLikeCommand.register_subcommand(lowercase_ )
LfsCommands.register_subcommand(lowercase_ )
PTtoTFCommand.register_subcommand(lowercase_ )
# Let's go
_UpperCamelCase : Optional[Any] = parser.parse_args()
if not hasattr(lowercase_ ,"func" ):
parser.print_help()
exit(1 )
# Run
_UpperCamelCase : str = args.func(lowercase_ )
service.run()
if __name__ == "__main__":
main()
| 51
|
"""simple docstring"""
lowerCamelCase__ = "\n# Installazione di Transformers\n! pip install transformers datasets\n# Per installare dalla fonte invece dell'ultima versione rilasciata, commenta il comando sopra e\n# rimuovi la modalità commento al comando seguente.\n# ! pip install git+https://github.com/huggingface/transformers.git\n"
lowerCamelCase__ = [{"type": "code", "content": INSTALL_CONTENT}]
lowerCamelCase__ = {
"{processor_class}": "FakeProcessorClass",
"{model_class}": "FakeModelClass",
"{object_class}": "FakeObjectClass",
}
| 51
| 1
|
"""simple docstring"""
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : Optional[int] , __a : Optional[int] ) -> Union[str, Any]:
_UpperCamelCase : List[Any] = val
_UpperCamelCase : str = None
_UpperCamelCase : int = None
def __SCREAMING_SNAKE_CASE ( self : List[str] , __a : Dict ) -> Optional[int]:
if self.val:
if val < self.val:
if self.left is None:
_UpperCamelCase : Tuple = Node(__a )
else:
self.left.insert(__a )
elif val > self.val:
if self.right is None:
_UpperCamelCase : List[str] = Node(__a )
else:
self.right.insert(__a )
else:
_UpperCamelCase : str = val
def lowercase__ ( lowercase_ ,lowercase_ ) -> Optional[int]:
"""simple docstring"""
if root:
inorder(root.left ,lowercase_ )
res.append(root.val )
inorder(root.right ,lowercase_ )
def lowercase__ ( lowercase_ ) -> str:
"""simple docstring"""
if len(lowercase_ ) == 0:
return arr
_UpperCamelCase : Optional[int] = Node(arr[0] )
for i in range(1 ,len(lowercase_ ) ):
root.insert(arr[i] )
# Traverse BST in order.
_UpperCamelCase : List[Any] = []
inorder(lowercase_ ,lowercase_ )
return res
if __name__ == "__main__":
print(tree_sort([10, 1, 3, 2, 9, 14, 13]))
| 51
|
"""simple docstring"""
import os
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers.models.realm.configuration_realm import RealmConfig
from transformers.models.realm.retrieval_realm import _REALM_BLOCK_RECORDS_FILENAME, RealmRetriever
from transformers.models.realm.tokenization_realm import VOCAB_FILES_NAMES, RealmTokenizer
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[Any]:
_UpperCamelCase : Tuple = tempfile.mkdtemp()
_UpperCamelCase : str = 5
# Realm tok
_UpperCamelCase : Tuple = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"test",
"question",
"this",
"is",
"the",
"first",
"second",
"third",
"fourth",
"fifth",
"record",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
_UpperCamelCase : Optional[int] = os.path.join(self.tmpdirname , "realm_tokenizer" )
os.makedirs(__a , exist_ok=__a )
_UpperCamelCase : Optional[Any] = os.path.join(__a , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
_UpperCamelCase : Optional[int] = os.path.join(self.tmpdirname , "realm_block_records" )
os.makedirs(__a , exist_ok=__a )
def __SCREAMING_SNAKE_CASE ( self : str ) -> RealmTokenizer:
return RealmTokenizer.from_pretrained(os.path.join(self.tmpdirname , "realm_tokenizer" ) )
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Dict:
shutil.rmtree(self.tmpdirname )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[str]:
_UpperCamelCase : Optional[Any] = RealmConfig(num_block_records=self.num_block_records )
return config
def __SCREAMING_SNAKE_CASE ( self : int ) -> int:
_UpperCamelCase : Any = Dataset.from_dict(
{
"id": ["0", "1"],
"question": ["foo", "bar"],
"answers": [["Foo", "Bar"], ["Bar"]],
} )
return dataset
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> str:
_UpperCamelCase : int = np.array(
[
b"This is the first record",
b"This is the second record",
b"This is the third record",
b"This is the fourth record",
b"This is the fifth record",
b"This is a longer longer longer record",
] , dtype=__a , )
return block_records
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[Any]:
_UpperCamelCase : List[str] = RealmRetriever(
block_records=self.get_dummy_block_records() , tokenizer=self.get_tokenizer() , )
return retriever
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Tuple:
_UpperCamelCase : Tuple = self.get_config()
_UpperCamelCase : int = self.get_dummy_retriever()
_UpperCamelCase : Tuple = retriever.tokenizer
_UpperCamelCase : List[str] = np.array([0, 3] , dtype="long" )
_UpperCamelCase : Union[str, Any] = tokenizer(["Test question"] ).input_ids
_UpperCamelCase : List[str] = tokenizer(
["the fourth"] , add_special_tokens=__a , return_token_type_ids=__a , return_attention_mask=__a , ).input_ids
_UpperCamelCase : str = config.reader_seq_len
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase : List[Any] = retriever(
__a , __a , answer_ids=__a , max_length=__a , return_tensors="np" )
self.assertEqual(len(__a ) , 2 )
self.assertEqual(len(__a ) , 2 )
self.assertEqual(len(__a ) , 2 )
self.assertEqual(concat_inputs.input_ids.shape , (2, 10) )
self.assertEqual(concat_inputs.attention_mask.shape , (2, 10) )
self.assertEqual(concat_inputs.token_type_ids.shape , (2, 10) )
self.assertEqual(concat_inputs.special_tokens_mask.shape , (2, 10) )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[0] ) , ["[CLS]", "test", "question", "[SEP]", "this", "is", "the", "first", "record", "[SEP]"] , )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[1] ) , ["[CLS]", "test", "question", "[SEP]", "this", "is", "the", "fourth", "record", "[SEP]"] , )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[Any]:
_UpperCamelCase : Any = self.get_config()
_UpperCamelCase : Dict = self.get_dummy_retriever()
_UpperCamelCase : Dict = retriever.tokenizer
_UpperCamelCase : List[Any] = np.array([0, 3, 5] , dtype="long" )
_UpperCamelCase : Optional[int] = tokenizer(["Test question"] ).input_ids
_UpperCamelCase : str = tokenizer(
["the fourth", "longer longer"] , add_special_tokens=__a , return_token_type_ids=__a , return_attention_mask=__a , ).input_ids
_UpperCamelCase : Union[str, Any] = config.reader_seq_len
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase : Optional[Any] = retriever(
__a , __a , answer_ids=__a , max_length=__a , return_tensors="np" )
self.assertEqual([False, True, True] , __a )
self.assertEqual([[-1, -1, -1], [6, -1, -1], [6, 7, 8]] , __a )
self.assertEqual([[-1, -1, -1], [7, -1, -1], [7, 8, 9]] , __a )
def __SCREAMING_SNAKE_CASE ( self : int ) -> List[str]:
_UpperCamelCase : List[Any] = self.get_dummy_retriever()
retriever.save_pretrained(os.path.join(self.tmpdirname , "realm_block_records" ) )
# Test local path
_UpperCamelCase : int = retriever.from_pretrained(os.path.join(self.tmpdirname , "realm_block_records" ) )
self.assertEqual(retriever.block_records[0] , b"This is the first record" )
# Test mocked remote path
with patch("transformers.models.realm.retrieval_realm.hf_hub_download" ) as mock_hf_hub_download:
_UpperCamelCase : List[Any] = os.path.join(
os.path.join(self.tmpdirname , "realm_block_records" ) , _REALM_BLOCK_RECORDS_FILENAME )
_UpperCamelCase : int = RealmRetriever.from_pretrained("google/realm-cc-news-pretrained-openqa" )
self.assertEqual(retriever.block_records[0] , b"This is the first record" )
| 51
| 1
|
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from diffusers import (
AudioDiffusionPipeline,
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
DiffusionPipeline,
Mel,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self : int ) -> str:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> Tuple:
torch.manual_seed(0 )
_UpperCamelCase : Dict = UNetaDModel(
sample_size=(32, 64) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=("AttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "AttnUpBlock2D") , )
return model
@property
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[str]:
torch.manual_seed(0 )
_UpperCamelCase : int = UNetaDConditionModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=("CrossAttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "CrossAttnUpBlock2D") , cross_attention_dim=10 , )
return model
@property
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Union[str, Any]:
torch.manual_seed(0 )
_UpperCamelCase : Any = AutoencoderKL(
sample_size=(128, 64) , in_channels=1 , out_channels=1 , latent_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=("DownEncoderBlock2D", "DownEncoderBlock2D") , up_block_types=("UpDecoderBlock2D", "UpDecoderBlock2D") , )
_UpperCamelCase : List[Any] = UNetaDModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=("AttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "AttnUpBlock2D") , )
return vqvae, unet
@slow
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Tuple:
_UpperCamelCase : List[str] = "cpu" # ensure determinism for the device-dependent torch.Generator
_UpperCamelCase : Union[str, Any] = Mel(
x_res=self.dummy_unet.config.sample_size[1] , y_res=self.dummy_unet.config.sample_size[0] , )
_UpperCamelCase : Union[str, Any] = DDPMScheduler()
_UpperCamelCase : int = AudioDiffusionPipeline(vqvae=__a , unet=self.dummy_unet , mel=__a , scheduler=__a )
_UpperCamelCase : Optional[int] = pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
_UpperCamelCase : List[str] = torch.Generator(device=__a ).manual_seed(42 )
_UpperCamelCase : List[str] = pipe(generator=__a , steps=4 )
_UpperCamelCase : int = output.audios[0]
_UpperCamelCase : Tuple = output.images[0]
_UpperCamelCase : str = torch.Generator(device=__a ).manual_seed(42 )
_UpperCamelCase : str = pipe(generator=__a , steps=4 , return_dict=__a )
_UpperCamelCase : Optional[int] = output[0][0]
assert audio.shape == (1, (self.dummy_unet.config.sample_size[1] - 1) * mel.hop_length)
assert (
image.height == self.dummy_unet.config.sample_size[0]
and image.width == self.dummy_unet.config.sample_size[1]
)
_UpperCamelCase : Tuple = np.frombuffer(image.tobytes() , dtype="uint8" )[:10]
_UpperCamelCase : Union[str, Any] = np.frombuffer(image_from_tuple.tobytes() , dtype="uint8" )[:10]
_UpperCamelCase : List[Any] = np.array([69, 255, 255, 255, 0, 0, 77, 181, 12, 127] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() == 0
_UpperCamelCase : Optional[Any] = Mel(
x_res=self.dummy_vqvae_and_unet[0].config.sample_size[1] , y_res=self.dummy_vqvae_and_unet[0].config.sample_size[0] , )
_UpperCamelCase : Tuple = DDIMScheduler()
_UpperCamelCase : Any = self.dummy_vqvae_and_unet
_UpperCamelCase : str = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=dummy_vqvae_and_unet[1] , mel=__a , scheduler=__a )
_UpperCamelCase : Union[str, Any] = pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
np.random.seed(0 )
_UpperCamelCase : Optional[Any] = np.random.uniform(-1 , 1 , ((dummy_vqvae_and_unet[0].config.sample_size[1] - 1) * mel.hop_length,) )
_UpperCamelCase : Dict = torch.Generator(device=__a ).manual_seed(42 )
_UpperCamelCase : Dict = pipe(raw_audio=__a , generator=__a , start_step=5 , steps=10 )
_UpperCamelCase : Optional[int] = output.images[0]
assert (
image.height == self.dummy_vqvae_and_unet[0].config.sample_size[0]
and image.width == self.dummy_vqvae_and_unet[0].config.sample_size[1]
)
_UpperCamelCase : int = np.frombuffer(image.tobytes() , dtype="uint8" )[:10]
_UpperCamelCase : int = np.array([120, 117, 110, 109, 138, 167, 138, 148, 132, 121] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
_UpperCamelCase : int = self.dummy_unet_condition
_UpperCamelCase : Optional[Any] = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=__a , mel=__a , scheduler=__a )
_UpperCamelCase : str = pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
np.random.seed(0 )
_UpperCamelCase : Optional[int] = torch.rand((1, 1, 10) )
_UpperCamelCase : Any = pipe(generator=__a , encoding=__a )
_UpperCamelCase : int = output.images[0]
_UpperCamelCase : str = np.frombuffer(image.tobytes() , dtype="uint8" )[:10]
_UpperCamelCase : Dict = np.array([107, 103, 120, 127, 142, 122, 113, 122, 97, 111] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
@slow
@require_torch_gpu
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[int]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __SCREAMING_SNAKE_CASE ( self : int ) -> str:
_UpperCamelCase : Any = torch_device
_UpperCamelCase : Optional[Any] = DiffusionPipeline.from_pretrained("teticio/audio-diffusion-ddim-256" )
_UpperCamelCase : Any = pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
_UpperCamelCase : Any = torch.Generator(device=__a ).manual_seed(42 )
_UpperCamelCase : List[str] = pipe(generator=__a )
_UpperCamelCase : Any = output.audios[0]
_UpperCamelCase : Optional[int] = output.images[0]
assert audio.shape == (1, (pipe.unet.config.sample_size[1] - 1) * pipe.mel.hop_length)
assert image.height == pipe.unet.config.sample_size[0] and image.width == pipe.unet.config.sample_size[1]
_UpperCamelCase : List[Any] = np.frombuffer(image.tobytes() , dtype="uint8" )[:10]
_UpperCamelCase : int = np.array([151, 167, 154, 144, 122, 134, 121, 105, 70, 26] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
| 51
|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import LEDConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFLEDForConditionalGeneration, TFLEDModel
@require_tf
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Any = LEDConfig
SCREAMING_SNAKE_CASE__ :str = {}
SCREAMING_SNAKE_CASE__ :List[str] = "gelu"
def __init__( self : List[Any] , __a : Union[str, Any] , __a : List[Any]=13 , __a : int=7 , __a : str=True , __a : Any=False , __a : str=99 , __a : str=32 , __a : Union[str, Any]=2 , __a : Optional[Any]=4 , __a : List[Any]=37 , __a : List[Any]=0.1 , __a : Tuple=0.1 , __a : Dict=20 , __a : str=2 , __a : Dict=1 , __a : Any=0 , __a : List[Any]=4 , ) -> List[Any]:
_UpperCamelCase : Optional[Any] = parent
_UpperCamelCase : List[str] = batch_size
_UpperCamelCase : str = seq_length
_UpperCamelCase : str = is_training
_UpperCamelCase : Any = use_labels
_UpperCamelCase : Any = vocab_size
_UpperCamelCase : List[str] = hidden_size
_UpperCamelCase : Optional[Any] = num_hidden_layers
_UpperCamelCase : Dict = num_attention_heads
_UpperCamelCase : Optional[Any] = intermediate_size
_UpperCamelCase : int = hidden_dropout_prob
_UpperCamelCase : Dict = attention_probs_dropout_prob
_UpperCamelCase : str = max_position_embeddings
_UpperCamelCase : int = eos_token_id
_UpperCamelCase : Dict = pad_token_id
_UpperCamelCase : Optional[Any] = bos_token_id
_UpperCamelCase : str = attention_window
# `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size
# [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention
# returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1]
# because its local attention only attends to `self.attention_window` and one before and one after
_UpperCamelCase : List[str] = self.attention_window + 2
# because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for
# the `test_attention_outputs` and `test_hidden_states_output` tests
_UpperCamelCase : int = (
self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window
)
def __SCREAMING_SNAKE_CASE ( self : int ) -> str:
_UpperCamelCase : Tuple = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
_UpperCamelCase : Optional[Any] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
_UpperCamelCase : Tuple = tf.concat([input_ids, eos_tensor] , axis=1 )
_UpperCamelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCamelCase : List[Any] = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , attention_window=self.attention_window , **self.config_updates , )
_UpperCamelCase : Dict = prepare_led_inputs_dict(__a , __a , __a )
_UpperCamelCase : Union[str, Any] = tf.concat(
[tf.zeros_like(__a )[:, :-1], tf.ones_like(__a )[:, -1:]] , axis=-1 , )
_UpperCamelCase : Union[str, Any] = global_attention_mask
return config, inputs_dict
def __SCREAMING_SNAKE_CASE ( self : List[str] , __a : List[Any] , __a : int ) -> Tuple:
_UpperCamelCase : Tuple = TFLEDModel(config=__a ).get_decoder()
_UpperCamelCase : Tuple = inputs_dict["input_ids"]
_UpperCamelCase : int = input_ids[:1, :]
_UpperCamelCase : List[str] = inputs_dict["attention_mask"][:1, :]
_UpperCamelCase : List[Any] = 1
# first forward pass
_UpperCamelCase : Any = model(__a , attention_mask=__a , use_cache=__a )
_UpperCamelCase, _UpperCamelCase : Union[str, Any] = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_UpperCamelCase : Any = ids_tensor((self.batch_size, 3) , config.vocab_size )
_UpperCamelCase : Any = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
_UpperCamelCase : List[str] = tf.concat([input_ids, next_tokens] , axis=-1 )
_UpperCamelCase : Union[str, Any] = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
_UpperCamelCase : Tuple = model(__a , attention_mask=__a )[0]
_UpperCamelCase : int = model(__a , attention_mask=__a , past_key_values=__a )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
_UpperCamelCase : List[Any] = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
_UpperCamelCase : List[str] = output_from_no_past[:, -3:, random_slice_idx]
_UpperCamelCase : Optional[int] = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(__a , __a , rtol=1e-3 )
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_=None ,lowercase_=None ,lowercase_=None ,lowercase_=None ,) -> Dict:
"""simple docstring"""
if attention_mask is None:
_UpperCamelCase : str = tf.cast(tf.math.not_equal(lowercase_ ,config.pad_token_id ) ,tf.inta )
if decoder_attention_mask is None:
_UpperCamelCase : str = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape ,dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] ,config.pad_token_id ) ,tf.inta ),
] ,axis=-1 ,)
if head_mask is None:
_UpperCamelCase : List[str] = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
_UpperCamelCase : List[str] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"attention_mask": attention_mask,
"decoder_input_ids": decoder_input_ids,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
}
@require_tf
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Any = (TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else ()
SCREAMING_SNAKE_CASE__ :List[str] = (TFLEDForConditionalGeneration,) if is_tf_available() else ()
SCREAMING_SNAKE_CASE__ :List[str] = (
{
"conversational": TFLEDForConditionalGeneration,
"feature-extraction": TFLEDModel,
"summarization": TFLEDForConditionalGeneration,
"text2text-generation": TFLEDForConditionalGeneration,
"translation": TFLEDForConditionalGeneration,
}
if is_tf_available()
else {}
)
SCREAMING_SNAKE_CASE__ :Tuple = True
SCREAMING_SNAKE_CASE__ :str = False
SCREAMING_SNAKE_CASE__ :Optional[Any] = False
SCREAMING_SNAKE_CASE__ :int = False
def __SCREAMING_SNAKE_CASE ( self : str ) -> List[Any]:
_UpperCamelCase : int = TFLEDModelTester(self )
_UpperCamelCase : Any = ConfigTester(self , config_class=__a )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[Any]:
self.config_tester.run_common_tests()
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> int:
_UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*__a )
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[str]:
_UpperCamelCase, _UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCamelCase : Optional[int] = tf.zeros_like(inputs_dict["attention_mask"] )
_UpperCamelCase : Union[str, Any] = 2
_UpperCamelCase : str = tf.where(
tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices , 1 , inputs_dict["global_attention_mask"] , )
_UpperCamelCase : Dict = True
_UpperCamelCase : str = self.model_tester.seq_length
_UpperCamelCase : Union[str, Any] = self.model_tester.encoder_seq_length
def check_decoder_attentions_output(__a : Optional[int] ):
_UpperCamelCase : Optional[int] = outputs.decoder_attentions
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
def check_encoder_attentions_output(__a : Optional[Any] ):
_UpperCamelCase : Union[str, Any] = [t.numpy() for t in outputs.encoder_attentions]
_UpperCamelCase : List[Any] = [t.numpy() for t in outputs.encoder_global_attentions]
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
self.assertListEqual(
list(global_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices] , )
for model_class in self.all_model_classes:
_UpperCamelCase : Dict = True
_UpperCamelCase : Optional[Any] = False
_UpperCamelCase : int = False
_UpperCamelCase : Optional[int] = model_class(__a )
_UpperCamelCase : int = model(self._prepare_for_class(__a , __a ) )
_UpperCamelCase : Any = len(__a )
self.assertEqual(config.output_hidden_states , __a )
check_encoder_attentions_output(__a )
if self.is_encoder_decoder:
_UpperCamelCase : Optional[Any] = model_class(__a )
_UpperCamelCase : List[Any] = model(self._prepare_for_class(__a , __a ) )
self.assertEqual(config.output_hidden_states , __a )
check_decoder_attentions_output(__a )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
_UpperCamelCase : int = True
_UpperCamelCase : Tuple = model_class(__a )
_UpperCamelCase : str = model(self._prepare_for_class(__a , __a ) )
self.assertEqual(config.output_hidden_states , __a )
check_encoder_attentions_output(__a )
# Check attention is always last and order is fine
_UpperCamelCase : Any = True
_UpperCamelCase : List[str] = True
_UpperCamelCase : Tuple = model_class(__a )
_UpperCamelCase : int = model(self._prepare_for_class(__a , __a ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(__a ) )
self.assertEqual(model.config.output_hidden_states , __a )
check_encoder_attentions_output(__a )
@unittest.skip("LED keeps using potentially symbolic tensors in conditionals and breaks tracing." )
def __SCREAMING_SNAKE_CASE ( self : str ) -> Dict:
pass
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Tuple:
# TODO: Head-masking not yet implement
pass
def lowercase__ ( lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
return tf.constant(lowercase_ ,dtype=tf.intaa )
lowerCamelCase__ = 1E-4
@slow
@require_tf
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> Union[str, Any]:
_UpperCamelCase : Any = TFLEDForConditionalGeneration.from_pretrained("allenai/led-base-16384" ).led
# change to intended input here
_UpperCamelCase : int = _long_tensor([512 * [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69]] )
_UpperCamelCase : Tuple = _long_tensor([128 * [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69]] )
_UpperCamelCase : Optional[Any] = prepare_led_inputs_dict(model.config , __a , __a )
_UpperCamelCase : Optional[int] = model(**__a )[0]
_UpperCamelCase : Optional[int] = (1, 1024, 768)
self.assertEqual(output.shape , __a )
# change to expected output here
_UpperCamelCase : Tuple = tf.convert_to_tensor(
[[2.30_50, 2.82_79, 0.65_31], [-1.84_57, -0.14_55, -3.56_61], [-1.01_86, 0.45_86, -2.20_43]] , )
tf.debugging.assert_near(output[:, :3, :3] , __a , atol=1e-3 )
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> str:
_UpperCamelCase : Optional[int] = TFLEDForConditionalGeneration.from_pretrained("allenai/led-base-16384" )
# change to intended input here
_UpperCamelCase : Optional[int] = _long_tensor([512 * [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69]] )
_UpperCamelCase : List[str] = _long_tensor([128 * [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69]] )
_UpperCamelCase : Optional[Any] = prepare_led_inputs_dict(model.config , __a , __a )
_UpperCamelCase : Union[str, Any] = model(**__a )[0]
_UpperCamelCase : int = (1, 1024, model.config.vocab_size)
self.assertEqual(output.shape , __a )
# change to expected output here
_UpperCamelCase : Optional[int] = tf.convert_to_tensor(
[[33.65_07, 6.45_72, 16.80_89], [5.87_39, -2.42_38, 11.29_02], [-3.21_39, -4.31_49, 4.27_83]] , )
tf.debugging.assert_near(output[:, :3, :3] , __a , atol=1e-3 , rtol=1e-3 )
| 51
| 1
|
"""simple docstring"""
def lowercase__ ( lowercase_ = 50 ) -> int:
"""simple docstring"""
_UpperCamelCase : Union[str, Any] = [1] * (length + 1)
for row_length in range(3 ,length + 1 ):
for block_length in range(3 ,row_length + 1 ):
for block_start in range(row_length - block_length ):
ways_number[row_length] += ways_number[
row_length - block_start - block_length - 1
]
ways_number[row_length] += 1
return ways_number[length]
if __name__ == "__main__":
print(f"""{solution() = }""")
| 51
|
"""simple docstring"""
import json
import os
import unittest
from transformers.models.roc_bert.tokenization_roc_bert import (
VOCAB_FILES_NAMES,
RoCBertBasicTokenizer,
RoCBertTokenizer,
RoCBertWordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Union[str, Any] = RoCBertTokenizer
SCREAMING_SNAKE_CASE__ :Dict = None
SCREAMING_SNAKE_CASE__ :List[Any] = False
SCREAMING_SNAKE_CASE__ :Union[str, Any] = True
SCREAMING_SNAKE_CASE__ :Union[str, Any] = filter_non_english
def __SCREAMING_SNAKE_CASE ( self : str ) -> Dict:
super().setUp()
_UpperCamelCase : Any = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "你", "好", "是", "谁", "a", "b", "c", "d"]
_UpperCamelCase : List[str] = {}
_UpperCamelCase : Tuple = {}
for i, value in enumerate(__a ):
_UpperCamelCase : List[str] = i
_UpperCamelCase : Optional[Any] = i
_UpperCamelCase : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
_UpperCamelCase : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["word_shape_file"] )
_UpperCamelCase : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["word_pronunciation_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
with open(self.word_shape_file , "w" , encoding="utf-8" ) as word_shape_writer:
json.dump(__a , __a , ensure_ascii=__a )
with open(self.word_pronunciation_file , "w" , encoding="utf-8" ) as word_pronunciation_writer:
json.dump(__a , __a , ensure_ascii=__a )
def __SCREAMING_SNAKE_CASE ( self : int ) -> Optional[int]:
_UpperCamelCase : Tuple = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
_UpperCamelCase : int = tokenizer.tokenize("你好[SEP]你是谁" )
self.assertListEqual(__a , ["你", "好", "[SEP]", "你", "是", "谁"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__a ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_shape_ids(__a ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_pronunciation_ids(__a ) , [5, 6, 2, 5, 7, 8] )
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> Dict:
_UpperCamelCase : Dict = RoCBertBasicTokenizer()
self.assertListEqual(tokenizer.tokenize("ah\u535A\u63A8zz" ) , ["ah", "\u535A", "\u63A8", "zz"] )
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Any:
_UpperCamelCase : List[Any] = RoCBertBasicTokenizer(do_lower_case=__a )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["hello", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple:
_UpperCamelCase : Optional[Any] = RoCBertBasicTokenizer(do_lower_case=__a , strip_accents=__a )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hällo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["h\u00E9llo"] )
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> str:
_UpperCamelCase : Dict = RoCBertBasicTokenizer(do_lower_case=__a , strip_accents=__a )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Union[str, Any]:
_UpperCamelCase : List[str] = RoCBertBasicTokenizer(do_lower_case=__a )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def __SCREAMING_SNAKE_CASE ( self : str ) -> Optional[int]:
_UpperCamelCase : Tuple = RoCBertBasicTokenizer(do_lower_case=__a )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["HeLLo", "!", "how", "Are", "yoU", "?"] )
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[str]:
_UpperCamelCase : Union[str, Any] = RoCBertBasicTokenizer(do_lower_case=__a , strip_accents=__a )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HäLLo", "!", "how", "Are", "yoU", "?"] )
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[Any]:
_UpperCamelCase : Tuple = RoCBertBasicTokenizer(do_lower_case=__a , strip_accents=__a )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HaLLo", "!", "how", "Are", "yoU", "?"] )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> int:
_UpperCamelCase : int = RoCBertBasicTokenizer(do_lower_case=__a , never_split=["[UNK]"] )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? [UNK]" ) , ["HeLLo", "!", "how", "Are", "yoU", "?", "[UNK]"] )
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[Any]:
_UpperCamelCase : Optional[int] = ["[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", "##ing"]
_UpperCamelCase : Any = {}
for i, token in enumerate(__a ):
_UpperCamelCase : str = i
_UpperCamelCase : Optional[int] = RoCBertWordpieceTokenizer(vocab=__a , unk_token="[UNK]" )
self.assertListEqual(tokenizer.tokenize("" ) , [] )
self.assertListEqual(tokenizer.tokenize("unwanted running" ) , ["un", "##want", "##ed", "runn", "##ing"] )
self.assertListEqual(tokenizer.tokenize("unwantedX running" ) , ["[UNK]", "runn", "##ing"] )
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[Any]:
self.assertTrue(_is_whitespace(" " ) )
self.assertTrue(_is_whitespace("\t" ) )
self.assertTrue(_is_whitespace("\r" ) )
self.assertTrue(_is_whitespace("\n" ) )
self.assertTrue(_is_whitespace("\u00A0" ) )
self.assertFalse(_is_whitespace("A" ) )
self.assertFalse(_is_whitespace("-" ) )
def __SCREAMING_SNAKE_CASE ( self : Any ) -> Dict:
self.assertTrue(_is_control("\u0005" ) )
self.assertFalse(_is_control("A" ) )
self.assertFalse(_is_control(" " ) )
self.assertFalse(_is_control("\t" ) )
self.assertFalse(_is_control("\r" ) )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[int]:
self.assertTrue(_is_punctuation("-" ) )
self.assertTrue(_is_punctuation("$" ) )
self.assertTrue(_is_punctuation("`" ) )
self.assertTrue(_is_punctuation("." ) )
self.assertFalse(_is_punctuation("A" ) )
self.assertFalse(_is_punctuation(" " ) )
def __SCREAMING_SNAKE_CASE ( self : int ) -> List[Any]:
_UpperCamelCase : Optional[Any] = self.get_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(__a ) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]] )
if self.test_rust_tokenizer:
_UpperCamelCase : Tuple = self.get_rust_tokenizer()
self.assertListEqual(
[rust_tokenizer.tokenize(__a ) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]] )
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[Any]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
_UpperCamelCase : int = self.rust_tokenizer_class.from_pretrained(__a , **__a )
_UpperCamelCase : Union[str, Any] = F'''A, naïve {tokenizer_r.mask_token} AllenNLP sentence.'''
_UpperCamelCase : Optional[Any] = tokenizer_r.encode_plus(
__a , return_attention_mask=__a , return_token_type_ids=__a , return_offsets_mapping=__a , add_special_tokens=__a , )
_UpperCamelCase : List[Any] = tokenizer_r.do_lower_case if hasattr(__a , "do_lower_case" ) else False
_UpperCamelCase : Dict = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), "A"),
((1, 2), ","),
((3, 5), "na"),
((5, 6), "##ï"),
((6, 8), "##ve"),
((9, 15), tokenizer_r.mask_token),
((16, 21), "Allen"),
((21, 23), "##NL"),
((23, 24), "##P"),
((25, 33), "sentence"),
((33, 34), "."),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), "a"),
((1, 2), ","),
((3, 8), "naive"),
((9, 15), tokenizer_r.mask_token),
((16, 21), "allen"),
((21, 23), "##nl"),
((23, 24), "##p"),
((25, 33), "sentence"),
((33, 34), "."),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens["input_ids"] ) )
self.assertEqual([e[0] for e in expected_results] , tokens["offset_mapping"] )
def __SCREAMING_SNAKE_CASE ( self : int ) -> List[Any]:
_UpperCamelCase : Optional[Any] = ["的", "人", "有"]
_UpperCamelCase : int = "".join(__a )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
_UpperCamelCase : int = True
_UpperCamelCase : Any = self.tokenizer_class.from_pretrained(__a , **__a )
_UpperCamelCase : Optional[Any] = self.rust_tokenizer_class.from_pretrained(__a , **__a )
_UpperCamelCase : int = tokenizer_p.encode(__a , add_special_tokens=__a )
_UpperCamelCase : int = tokenizer_r.encode(__a , add_special_tokens=__a )
_UpperCamelCase : List[Any] = tokenizer_r.convert_ids_to_tokens(__a )
_UpperCamelCase : Union[str, Any] = tokenizer_p.convert_ids_to_tokens(__a )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(__a , __a )
self.assertListEqual(__a , __a )
_UpperCamelCase : Any = False
_UpperCamelCase : Dict = self.rust_tokenizer_class.from_pretrained(__a , **__a )
_UpperCamelCase : Any = self.tokenizer_class.from_pretrained(__a , **__a )
_UpperCamelCase : Any = tokenizer_r.encode(__a , add_special_tokens=__a )
_UpperCamelCase : Any = tokenizer_p.encode(__a , add_special_tokens=__a )
_UpperCamelCase : Union[str, Any] = tokenizer_r.convert_ids_to_tokens(__a )
_UpperCamelCase : Dict = tokenizer_p.convert_ids_to_tokens(__a )
# it is expected that only the first Chinese character is not preceded by "##".
_UpperCamelCase : Any = [
F'''##{token}''' if idx != 0 else token for idx, token in enumerate(__a )
]
self.assertListEqual(__a , __a )
self.assertListEqual(__a , __a )
@slow
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Any:
_UpperCamelCase : Dict = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
_UpperCamelCase : Optional[int] = tokenizer.encode("你好" , add_special_tokens=__a )
_UpperCamelCase : Dict = tokenizer.encode("你是谁" , add_special_tokens=__a )
_UpperCamelCase : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(__a )
_UpperCamelCase : Tuple = tokenizer.build_inputs_with_special_tokens(__a , __a )
assert encoded_sentence == [1] + text + [2]
assert encoded_pair == [1] + text + [2] + text_a + [2]
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> int:
_UpperCamelCase : Optional[Any] = self.get_tokenizers(do_lower_case=__a )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
_UpperCamelCase : int = "你好,你是谁"
_UpperCamelCase : Any = tokenizer.tokenize(__a )
_UpperCamelCase : Optional[Any] = tokenizer.convert_tokens_to_ids(__a )
_UpperCamelCase : List[str] = tokenizer.convert_tokens_to_shape_ids(__a )
_UpperCamelCase : Any = tokenizer.convert_tokens_to_pronunciation_ids(__a )
_UpperCamelCase : Optional[int] = tokenizer.prepare_for_model(
__a , __a , __a , add_special_tokens=__a )
_UpperCamelCase : Tuple = tokenizer.encode_plus(__a , add_special_tokens=__a )
self.assertEqual(__a , __a )
| 51
| 1
|
"""simple docstring"""
from typing import List, Optional
import numpy as np
from ...processing_utils import ProcessorMixin
from ...utils import to_numpy
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :List[Any] = "EncodecFeatureExtractor"
SCREAMING_SNAKE_CASE__ :Union[str, Any] = ("T5Tokenizer", "T5TokenizerFast")
def __init__( self : int , __a : Union[str, Any] , __a : Tuple ) -> Union[str, Any]:
super().__init__(__a , __a )
_UpperCamelCase : str = self.feature_extractor
_UpperCamelCase : int = False
def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __a : Any=None , __a : Any=None , __a : Any=True ) -> Dict:
return self.tokenizer.get_decoder_prompt_ids(task=__a , language=__a , no_timestamps=__a )
def __call__( self : Optional[Any] , *__a : Union[str, Any] , **__a : str ) -> Any:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*__a , **__a )
_UpperCamelCase : Optional[int] = kwargs.pop("audio" , __a )
_UpperCamelCase : str = kwargs.pop("sampling_rate" , __a )
_UpperCamelCase : str = kwargs.pop("text" , __a )
if len(__a ) > 0:
_UpperCamelCase : Dict = args[0]
_UpperCamelCase : Union[str, Any] = args[1:]
if audio is None and text is None:
raise ValueError("You need to specify either an `audio` or `text` input to process." )
if text is not None:
_UpperCamelCase : List[str] = self.tokenizer(__a , **__a )
if audio is not None:
_UpperCamelCase : Optional[Any] = self.feature_extractor(__a , *__a , sampling_rate=__a , **__a )
if audio is None:
return inputs
elif text is None:
return audio_inputs
else:
_UpperCamelCase : Dict = audio_inputs["input_values"]
if "padding_mask" in audio_inputs:
_UpperCamelCase : Optional[Any] = audio_inputs["padding_mask"]
return inputs
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , *__a : List[str] , **__a : Any ) -> Optional[Any]:
_UpperCamelCase : Dict = kwargs.pop("audio" , __a )
_UpperCamelCase : List[Any] = kwargs.pop("padding_mask" , __a )
if len(__a ) > 0:
_UpperCamelCase : List[str] = args[0]
_UpperCamelCase : Optional[int] = args[1:]
if audio_values is not None:
return self._decode_audio(__a , padding_mask=__a )
else:
return self.tokenizer.batch_decode(*__a , **__a )
def __SCREAMING_SNAKE_CASE ( self : Any , *__a : List[Any] , **__a : List[str] ) -> Union[str, Any]:
return self.tokenizer.decode(*__a , **__a )
def __SCREAMING_SNAKE_CASE ( self : List[str] , __a : Union[str, Any] , __a : Optional = None ) -> List[np.ndarray]:
_UpperCamelCase : str = to_numpy(__a )
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase : Optional[Any] = audio_values.shape
if padding_mask is None:
return list(__a )
_UpperCamelCase : Optional[int] = to_numpy(__a )
# match the sequence length of the padding mask to the generated audio arrays by padding with the **non-padding**
# token (so that the generated audio values are **not** treated as padded tokens)
_UpperCamelCase : List[str] = seq_len - padding_mask.shape[-1]
_UpperCamelCase : List[str] = 1 - self.feature_extractor.padding_value
_UpperCamelCase : Optional[Any] = np.pad(__a , ((0, 0), (0, difference)) , "constant" , constant_values=__a )
_UpperCamelCase : Union[str, Any] = audio_values.tolist()
for i in range(__a ):
_UpperCamelCase : Optional[int] = np.asarray(audio_values[i] )[
padding_mask[i][None, :] != self.feature_extractor.padding_value
]
_UpperCamelCase : Any = sliced_audio.reshape(__a , -1 )
return audio_values
| 51
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"hustvl/yolos-small": "https://huggingface.co/hustvl/yolos-small/resolve/main/config.json",
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Tuple = "yolos"
def __init__( self : Dict , __a : Optional[Any]=768 , __a : List[Any]=12 , __a : Any=12 , __a : List[Any]=3072 , __a : Optional[int]="gelu" , __a : Dict=0.0 , __a : Optional[Any]=0.0 , __a : Any=0.02 , __a : Optional[int]=1e-1_2 , __a : List[Any]=[512, 864] , __a : List[str]=16 , __a : str=3 , __a : Optional[Any]=True , __a : Optional[Any]=100 , __a : List[str]=True , __a : Any=False , __a : List[str]=1 , __a : str=5 , __a : Optional[Any]=2 , __a : Tuple=5 , __a : Any=2 , __a : Union[str, Any]=0.1 , **__a : List[str] , ) -> List[str]:
super().__init__(**__a )
_UpperCamelCase : Dict = hidden_size
_UpperCamelCase : Any = num_hidden_layers
_UpperCamelCase : str = num_attention_heads
_UpperCamelCase : Dict = intermediate_size
_UpperCamelCase : List[str] = hidden_act
_UpperCamelCase : List[str] = hidden_dropout_prob
_UpperCamelCase : str = attention_probs_dropout_prob
_UpperCamelCase : Tuple = initializer_range
_UpperCamelCase : List[str] = layer_norm_eps
_UpperCamelCase : Tuple = image_size
_UpperCamelCase : Tuple = patch_size
_UpperCamelCase : Dict = num_channels
_UpperCamelCase : Any = qkv_bias
_UpperCamelCase : str = num_detection_tokens
_UpperCamelCase : str = use_mid_position_embeddings
_UpperCamelCase : List[str] = auxiliary_loss
# Hungarian matcher
_UpperCamelCase : List[Any] = class_cost
_UpperCamelCase : int = bbox_cost
_UpperCamelCase : Optional[int] = giou_cost
# Loss coefficients
_UpperCamelCase : List[Any] = bbox_loss_coefficient
_UpperCamelCase : str = giou_loss_coefficient
_UpperCamelCase : Dict = eos_coefficient
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :List[str] = version.parse("1.11" )
@property
def __SCREAMING_SNAKE_CASE ( self : str ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> float:
return 1e-4
@property
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> int:
return 12
| 51
| 1
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"google/canine-s": "https://huggingface.co/google/canine-s/resolve/main/config.json",
# See all CANINE models at https://huggingface.co/models?filter=canine
}
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Union[str, Any] = "canine"
def __init__( self : Tuple , __a : Dict=768 , __a : Dict=12 , __a : int=12 , __a : str=3072 , __a : str="gelu" , __a : List[str]=0.1 , __a : Optional[Any]=0.1 , __a : int=1_6384 , __a : Dict=16 , __a : Optional[Any]=0.02 , __a : Any=1e-1_2 , __a : Union[str, Any]=0 , __a : Optional[int]=0Xe000 , __a : Union[str, Any]=0Xe001 , __a : str=4 , __a : str=4 , __a : Union[str, Any]=8 , __a : int=1_6384 , __a : Optional[Any]=128 , **__a : Dict , ) -> Optional[int]:
super().__init__(pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , **__a )
_UpperCamelCase : List[str] = max_position_embeddings
_UpperCamelCase : int = hidden_size
_UpperCamelCase : List[str] = num_hidden_layers
_UpperCamelCase : int = num_attention_heads
_UpperCamelCase : Tuple = intermediate_size
_UpperCamelCase : Union[str, Any] = hidden_act
_UpperCamelCase : Union[str, Any] = hidden_dropout_prob
_UpperCamelCase : Tuple = attention_probs_dropout_prob
_UpperCamelCase : Any = initializer_range
_UpperCamelCase : Optional[int] = type_vocab_size
_UpperCamelCase : str = layer_norm_eps
# Character config:
_UpperCamelCase : Optional[int] = downsampling_rate
_UpperCamelCase : Optional[int] = upsampling_kernel_size
_UpperCamelCase : List[Any] = num_hash_functions
_UpperCamelCase : List[Any] = num_hash_buckets
_UpperCamelCase : Tuple = local_transformer_stride
| 51
|
"""simple docstring"""
from __future__ import annotations
import string
from itertools import cycle, product
from pathlib import Path
lowerCamelCase__ = (
string.ascii_letters + string.digits + string.punctuation + string.whitespace
)
lowerCamelCase__ = [ord(letter) for letter in string.ascii_lowercase]
lowerCamelCase__ = {ord(char) for char in VALID_CHARS}
lowerCamelCase__ = ["the", "be", "to", "of", "and", "in", "that", "have"]
def lowercase__ ( lowercase_ ,lowercase_ ) -> str | None:
"""simple docstring"""
_UpperCamelCase : str = ""
_UpperCamelCase : int
_UpperCamelCase : int
_UpperCamelCase : int
for keychar, cipherchar in zip(cycle(lowercase_ ) ,lowercase_ ):
_UpperCamelCase : Dict = cipherchar ^ keychar
if decodedchar not in VALID_INTS:
return None
decoded += chr(lowercase_ )
return decoded
def lowercase__ ( lowercase_ ) -> list[str]:
"""simple docstring"""
_UpperCamelCase : list[str] = []
for key in product(lowercase_ ,repeat=3 ):
_UpperCamelCase : int = try_key(lowercase_ ,lowercase_ )
if encoded is not None:
possibles.append(lowercase_ )
return possibles
def lowercase__ ( lowercase_ ,lowercase_ ) -> list[str]:
"""simple docstring"""
return [possible for possible in possibles if common_word in possible.lower()]
def lowercase__ ( lowercase_ = "p059_cipher.txt" ) -> int:
"""simple docstring"""
_UpperCamelCase : list[int]
_UpperCamelCase : list[str]
_UpperCamelCase : str
_UpperCamelCase : str
_UpperCamelCase : str = Path(lowercase_ ).parent.joinpath(lowercase_ ).read_text(encoding="utf-8" )
_UpperCamelCase : Optional[Any] = [int(lowercase_ ) for number in data.strip().split("," )]
_UpperCamelCase : List[str] = filter_valid_chars(lowercase_ )
for common_word in COMMON_WORDS:
_UpperCamelCase : Union[str, Any] = filter_common_word(lowercase_ ,lowercase_ )
if len(lowercase_ ) == 1:
break
_UpperCamelCase : Union[str, Any] = possibles[0]
return sum(ord(lowercase_ ) for char in decoded_text )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 51
| 1
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"microsoft/biogpt": "https://huggingface.co/microsoft/biogpt/resolve/main/config.json",
# See all BioGPT models at https://huggingface.co/models?filter=biogpt
}
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Tuple = "biogpt"
def __init__( self : List[Any] , __a : List[Any]=4_2384 , __a : Optional[Any]=1024 , __a : Dict=24 , __a : List[str]=16 , __a : Any=4096 , __a : Tuple="gelu" , __a : List[str]=0.1 , __a : Optional[int]=0.1 , __a : Any=1024 , __a : Dict=0.02 , __a : str=1e-1_2 , __a : Optional[int]=True , __a : Union[str, Any]=True , __a : Optional[int]=0.0 , __a : Dict=0.0 , __a : List[str]=1 , __a : Dict=0 , __a : Any=2 , **__a : int , ) -> Dict:
_UpperCamelCase : Union[str, Any] = vocab_size
_UpperCamelCase : Optional[Any] = max_position_embeddings
_UpperCamelCase : List[Any] = hidden_size
_UpperCamelCase : int = num_hidden_layers
_UpperCamelCase : Optional[Any] = num_attention_heads
_UpperCamelCase : Optional[int] = intermediate_size
_UpperCamelCase : Tuple = hidden_act
_UpperCamelCase : Dict = hidden_dropout_prob
_UpperCamelCase : Optional[int] = attention_probs_dropout_prob
_UpperCamelCase : Tuple = initializer_range
_UpperCamelCase : List[Any] = layer_norm_eps
_UpperCamelCase : Tuple = scale_embedding
_UpperCamelCase : List[str] = use_cache
_UpperCamelCase : Any = layerdrop
_UpperCamelCase : Optional[Any] = activation_dropout
super().__init__(pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , **__a )
| 51
|
"""simple docstring"""
def lowercase__ ( lowercase_ ,lowercase_ ) -> None:
"""simple docstring"""
_UpperCamelCase : List[Any] = len(lowercase_ )
print("The following activities are selected:" )
# The first activity is always selected
_UpperCamelCase : List[Any] = 0
print(lowercase_ ,end="," )
# Consider rest of the activities
for j in range(lowercase_ ):
# If this activity has start time greater than
# or equal to the finish time of previously
# selected activity, then select it
if start[j] >= finish[i]:
print(lowercase_ ,end="," )
_UpperCamelCase : Optional[Any] = j
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase__ = [1, 3, 0, 5, 8, 5]
lowerCamelCase__ = [2, 4, 6, 7, 9, 9]
print_max_activities(start, finish)
| 51
| 1
|
"""simple docstring"""
from __future__ import annotations
import requests
def lowercase__ ( lowercase_ ) -> dict:
"""simple docstring"""
_UpperCamelCase : List[Any] = F'''https://hacker-news.firebaseio.com/v0/item/{story_id}.json?print=pretty'''
return requests.get(lowercase_ ).json()
def lowercase__ ( lowercase_ = 10 ) -> list[dict]:
"""simple docstring"""
_UpperCamelCase : Optional[int] = "https://hacker-news.firebaseio.com/v0/topstories.json?print=pretty"
_UpperCamelCase : List[Any] = requests.get(lowercase_ ).json()[:max_stories]
return [get_hackernews_story(lowercase_ ) for story_id in story_ids]
def lowercase__ ( lowercase_ = 10 ) -> str:
"""simple docstring"""
_UpperCamelCase : List[Any] = hackernews_top_stories(lowercase_ )
return "\n".join("* [{title}]({url})".format(**lowercase_ ) for story in stories )
if __name__ == "__main__":
print(hackernews_top_stories_as_markdown())
| 51
|
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from ..utils import BaseOutput, is_torch_version, randn_tensor
from .attention_processor import SpatialNorm
from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block
@dataclass
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :torch.FloatTensor
class __SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self : Dict , __a : Dict=3 , __a : Any=3 , __a : Union[str, Any]=("DownEncoderBlock2D",) , __a : Optional[int]=(64,) , __a : int=2 , __a : Tuple=32 , __a : int="silu" , __a : str=True , ) -> Dict:
super().__init__()
_UpperCamelCase : List[str] = layers_per_block
_UpperCamelCase : Dict = torch.nn.Convad(
__a , block_out_channels[0] , kernel_size=3 , stride=1 , padding=1 , )
_UpperCamelCase : int = None
_UpperCamelCase : Any = nn.ModuleList([] )
# down
_UpperCamelCase : List[str] = block_out_channels[0]
for i, down_block_type in enumerate(__a ):
_UpperCamelCase : Tuple = output_channel
_UpperCamelCase : int = block_out_channels[i]
_UpperCamelCase : int = i == len(__a ) - 1
_UpperCamelCase : Dict = get_down_block(
__a , num_layers=self.layers_per_block , in_channels=__a , out_channels=__a , add_downsample=not is_final_block , resnet_eps=1e-6 , downsample_padding=0 , resnet_act_fn=__a , resnet_groups=__a , attention_head_dim=__a , temb_channels=__a , )
self.down_blocks.append(__a )
# mid
_UpperCamelCase : Union[str, Any] = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=__a , output_scale_factor=1 , resnet_time_scale_shift="default" , attention_head_dim=block_out_channels[-1] , resnet_groups=__a , temb_channels=__a , )
# out
_UpperCamelCase : Any = nn.GroupNorm(num_channels=block_out_channels[-1] , num_groups=__a , eps=1e-6 )
_UpperCamelCase : Any = nn.SiLU()
_UpperCamelCase : Union[str, Any] = 2 * out_channels if double_z else out_channels
_UpperCamelCase : Tuple = nn.Convad(block_out_channels[-1] , __a , 3 , padding=1 )
_UpperCamelCase : Optional[int] = False
def __SCREAMING_SNAKE_CASE ( self : List[str] , __a : Dict ) -> List[str]:
_UpperCamelCase : int = x
_UpperCamelCase : Optional[int] = self.conv_in(__a )
if self.training and self.gradient_checkpointing:
def create_custom_forward(__a : Tuple ):
def custom_forward(*__a : Any ):
return module(*__a )
return custom_forward
# down
if is_torch_version(">=" , "1.11.0" ):
for down_block in self.down_blocks:
_UpperCamelCase : Optional[int] = torch.utils.checkpoint.checkpoint(
create_custom_forward(__a ) , __a , use_reentrant=__a )
# middle
_UpperCamelCase : Tuple = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , __a , use_reentrant=__a )
else:
for down_block in self.down_blocks:
_UpperCamelCase : Any = torch.utils.checkpoint.checkpoint(create_custom_forward(__a ) , __a )
# middle
_UpperCamelCase : int = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) , __a )
else:
# down
for down_block in self.down_blocks:
_UpperCamelCase : int = down_block(__a )
# middle
_UpperCamelCase : int = self.mid_block(__a )
# post-process
_UpperCamelCase : Any = self.conv_norm_out(__a )
_UpperCamelCase : Any = self.conv_act(__a )
_UpperCamelCase : Optional[Any] = self.conv_out(__a )
return sample
class __SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self : Dict , __a : int=3 , __a : Any=3 , __a : str=("UpDecoderBlock2D",) , __a : Optional[int]=(64,) , __a : int=2 , __a : Optional[int]=32 , __a : Tuple="silu" , __a : Union[str, Any]="group" , ) -> str:
super().__init__()
_UpperCamelCase : List[Any] = layers_per_block
_UpperCamelCase : Tuple = nn.Convad(
__a , block_out_channels[-1] , kernel_size=3 , stride=1 , padding=1 , )
_UpperCamelCase : List[str] = None
_UpperCamelCase : Dict = nn.ModuleList([] )
_UpperCamelCase : List[Any] = in_channels if norm_type == "spatial" else None
# mid
_UpperCamelCase : Optional[Any] = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=__a , output_scale_factor=1 , resnet_time_scale_shift="default" if norm_type == "group" else norm_type , attention_head_dim=block_out_channels[-1] , resnet_groups=__a , temb_channels=__a , )
# up
_UpperCamelCase : List[str] = list(reversed(__a ) )
_UpperCamelCase : int = reversed_block_out_channels[0]
for i, up_block_type in enumerate(__a ):
_UpperCamelCase : int = output_channel
_UpperCamelCase : Union[str, Any] = reversed_block_out_channels[i]
_UpperCamelCase : Optional[Any] = i == len(__a ) - 1
_UpperCamelCase : Union[str, Any] = get_up_block(
__a , num_layers=self.layers_per_block + 1 , in_channels=__a , out_channels=__a , prev_output_channel=__a , add_upsample=not is_final_block , resnet_eps=1e-6 , resnet_act_fn=__a , resnet_groups=__a , attention_head_dim=__a , temb_channels=__a , resnet_time_scale_shift=__a , )
self.up_blocks.append(__a )
_UpperCamelCase : Optional[Any] = output_channel
# out
if norm_type == "spatial":
_UpperCamelCase : Optional[int] = SpatialNorm(block_out_channels[0] , __a )
else:
_UpperCamelCase : Optional[int] = nn.GroupNorm(num_channels=block_out_channels[0] , num_groups=__a , eps=1e-6 )
_UpperCamelCase : str = nn.SiLU()
_UpperCamelCase : str = nn.Convad(block_out_channels[0] , __a , 3 , padding=1 )
_UpperCamelCase : Dict = False
def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __a : List[Any] , __a : Union[str, Any]=None ) -> Tuple:
_UpperCamelCase : List[str] = z
_UpperCamelCase : Dict = self.conv_in(__a )
_UpperCamelCase : Any = next(iter(self.up_blocks.parameters() ) ).dtype
if self.training and self.gradient_checkpointing:
def create_custom_forward(__a : Any ):
def custom_forward(*__a : Tuple ):
return module(*__a )
return custom_forward
if is_torch_version(">=" , "1.11.0" ):
# middle
_UpperCamelCase : str = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , __a , __a , use_reentrant=__a )
_UpperCamelCase : Optional[int] = sample.to(__a )
# up
for up_block in self.up_blocks:
_UpperCamelCase : List[Any] = torch.utils.checkpoint.checkpoint(
create_custom_forward(__a ) , __a , __a , use_reentrant=__a )
else:
# middle
_UpperCamelCase : Optional[int] = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , __a , __a )
_UpperCamelCase : Union[str, Any] = sample.to(__a )
# up
for up_block in self.up_blocks:
_UpperCamelCase : str = torch.utils.checkpoint.checkpoint(create_custom_forward(__a ) , __a , __a )
else:
# middle
_UpperCamelCase : str = self.mid_block(__a , __a )
_UpperCamelCase : int = sample.to(__a )
# up
for up_block in self.up_blocks:
_UpperCamelCase : Any = up_block(__a , __a )
# post-process
if latent_embeds is None:
_UpperCamelCase : List[str] = self.conv_norm_out(__a )
else:
_UpperCamelCase : Optional[int] = self.conv_norm_out(__a , __a )
_UpperCamelCase : Tuple = self.conv_act(__a )
_UpperCamelCase : List[Any] = self.conv_out(__a )
return sample
class __SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self : Dict , __a : Tuple , __a : List[str] , __a : List[str] , __a : str=None , __a : Optional[int]="random" , __a : Any=False , __a : Optional[Any]=True ) -> List[Any]:
super().__init__()
_UpperCamelCase : Tuple = n_e
_UpperCamelCase : Tuple = vq_embed_dim
_UpperCamelCase : Union[str, Any] = beta
_UpperCamelCase : str = legacy
_UpperCamelCase : Dict = nn.Embedding(self.n_e , self.vq_embed_dim )
self.embedding.weight.data.uniform_(-1.0 / self.n_e , 1.0 / self.n_e )
_UpperCamelCase : Any = remap
if self.remap is not None:
self.register_buffer("used" , torch.tensor(np.load(self.remap ) ) )
_UpperCamelCase : Dict = self.used.shape[0]
_UpperCamelCase : Optional[int] = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
_UpperCamelCase : Optional[int] = self.re_embed
_UpperCamelCase : Any = self.re_embed + 1
print(
F'''Remapping {self.n_e} indices to {self.re_embed} indices. '''
F'''Using {self.unknown_index} for unknown indices.''' )
else:
_UpperCamelCase : Union[str, Any] = n_e
_UpperCamelCase : List[str] = sane_index_shape
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __a : Optional[Any] ) -> Optional[int]:
_UpperCamelCase : str = inds.shape
assert len(__a ) > 1
_UpperCamelCase : Union[str, Any] = inds.reshape(ishape[0] , -1 )
_UpperCamelCase : Optional[Any] = self.used.to(__a )
_UpperCamelCase : List[str] = (inds[:, :, None] == used[None, None, ...]).long()
_UpperCamelCase : Optional[Any] = match.argmax(-1 )
_UpperCamelCase : Any = match.sum(2 ) < 1
if self.unknown_index == "random":
_UpperCamelCase : Optional[int] = torch.randint(0 , self.re_embed , size=new[unknown].shape ).to(device=new.device )
else:
_UpperCamelCase : Dict = self.unknown_index
return new.reshape(__a )
def __SCREAMING_SNAKE_CASE ( self : Dict , __a : Optional[int] ) -> Optional[int]:
_UpperCamelCase : int = inds.shape
assert len(__a ) > 1
_UpperCamelCase : List[Any] = inds.reshape(ishape[0] , -1 )
_UpperCamelCase : Optional[int] = self.used.to(__a )
if self.re_embed > self.used.shape[0]: # extra token
_UpperCamelCase : int = 0 # simply set to zero
_UpperCamelCase : Union[str, Any] = torch.gather(used[None, :][inds.shape[0] * [0], :] , 1 , __a )
return back.reshape(__a )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __a : str ) -> Optional[int]:
# reshape z -> (batch, height, width, channel) and flatten
_UpperCamelCase : List[str] = z.permute(0 , 2 , 3 , 1 ).contiguous()
_UpperCamelCase : int = z.view(-1 , self.vq_embed_dim )
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
_UpperCamelCase : Optional[int] = torch.argmin(torch.cdist(__a , self.embedding.weight ) , dim=1 )
_UpperCamelCase : int = self.embedding(__a ).view(z.shape )
_UpperCamelCase : str = None
_UpperCamelCase : Any = None
# compute loss for embedding
if not self.legacy:
_UpperCamelCase : List[str] = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 )
else:
_UpperCamelCase : str = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 )
# preserve gradients
_UpperCamelCase : List[str] = z + (z_q - z).detach()
# reshape back to match original input shape
_UpperCamelCase : Optional[Any] = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
if self.remap is not None:
_UpperCamelCase : Tuple = min_encoding_indices.reshape(z.shape[0] , -1 ) # add batch axis
_UpperCamelCase : Dict = self.remap_to_used(__a )
_UpperCamelCase : List[str] = min_encoding_indices.reshape(-1 , 1 ) # flatten
if self.sane_index_shape:
_UpperCamelCase : str = min_encoding_indices.reshape(z_q.shape[0] , z_q.shape[2] , z_q.shape[3] )
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def __SCREAMING_SNAKE_CASE ( self : List[str] , __a : List[str] , __a : str ) -> Any:
# shape specifying (batch, height, width, channel)
if self.remap is not None:
_UpperCamelCase : str = indices.reshape(shape[0] , -1 ) # add batch axis
_UpperCamelCase : str = self.unmap_to_all(__a )
_UpperCamelCase : int = indices.reshape(-1 ) # flatten again
# get quantized latent vectors
_UpperCamelCase : Optional[int] = self.embedding(__a )
if shape is not None:
_UpperCamelCase : Tuple = z_q.view(__a )
# reshape back to match original input shape
_UpperCamelCase : Tuple = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
return z_q
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
def __init__( self : Optional[int] , __a : List[str] , __a : Optional[Any]=False ) -> int:
_UpperCamelCase : Dict = parameters
_UpperCamelCase, _UpperCamelCase : str = torch.chunk(__a , 2 , dim=1 )
_UpperCamelCase : Tuple = torch.clamp(self.logvar , -30.0 , 20.0 )
_UpperCamelCase : Union[str, Any] = deterministic
_UpperCamelCase : Dict = torch.exp(0.5 * self.logvar )
_UpperCamelCase : Any = torch.exp(self.logvar )
if self.deterministic:
_UpperCamelCase : List[Any] = torch.zeros_like(
self.mean , device=self.parameters.device , dtype=self.parameters.dtype )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __a : Optional[torch.Generator] = None ) -> torch.FloatTensor:
# make sure sample is on the same device as the parameters and has same dtype
_UpperCamelCase : List[Any] = randn_tensor(
self.mean.shape , generator=__a , device=self.parameters.device , dtype=self.parameters.dtype )
_UpperCamelCase : List[Any] = self.mean + self.std * sample
return x
def __SCREAMING_SNAKE_CASE ( self : Any , __a : List[str]=None ) -> List[Any]:
if self.deterministic:
return torch.Tensor([0.0] )
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean , 2 ) + self.var - 1.0 - self.logvar , dim=[1, 2, 3] )
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean , 2 ) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar , dim=[1, 2, 3] , )
def __SCREAMING_SNAKE_CASE ( self : str , __a : Tuple , __a : List[str]=[1, 2, 3] ) -> int:
if self.deterministic:
return torch.Tensor([0.0] )
_UpperCamelCase : List[str] = np.log(2.0 * np.pi )
return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean , 2 ) / self.var , dim=__a )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[int]:
return self.mean
| 51
| 1
|
"""simple docstring"""
import json
import multiprocessing
import os
import re
from collections import defaultdict
import torch
from accelerate import Accelerator
from accelerate.utils import set_seed
from arguments import HumanEvalArguments
from datasets import load_dataset, load_metric
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList
lowerCamelCase__ = ["\nclass", "\ndef", "\n#", "\n@", "\nprint", "\nif"]
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
def __init__( self : Optional[Any] , __a : str , __a : Union[str, Any] , __a : Optional[Any]=None , __a : Any=1 ) -> Tuple:
_UpperCamelCase : Union[str, Any] = tokenizer
_UpperCamelCase : Tuple = dataset
_UpperCamelCase : str = len(__a ) if n_tasks is None else n_tasks
_UpperCamelCase : Union[str, Any] = n_copies
def __iter__( self : str ) -> Optional[int]:
_UpperCamelCase : Optional[Any] = []
for task in range(self.n_tasks ):
# without strip, the model generate commented codes ...
prompts.append(self.tokenizer.eos_token + self.dataset[task]["prompt"].strip() )
_UpperCamelCase : Dict = self.tokenizer(__a , padding=__a , return_tensors="pt" )
for task in range(self.n_tasks ):
for _ in range(self.n_copies ):
yield {
"ids": outputs.input_ids[task],
"task_id": task,
"input_len": outputs.attention_mask[task].sum(),
}
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
def __init__( self : Tuple , __a : Any , __a : int , __a : Optional[int] ) -> int:
_UpperCamelCase : Union[str, Any] = start_length
_UpperCamelCase : Union[str, Any] = eof_strings
_UpperCamelCase : Tuple = tokenizer
def __call__( self : Optional[Any] , __a : Any , __a : Optional[int] , **__a : List[Any] ) -> Union[str, Any]:
_UpperCamelCase : List[str] = self.tokenizer.batch_decode(input_ids[:, self.start_length :] )
_UpperCamelCase : Any = []
for decoded_generation in decoded_generations:
done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) )
return all(__a )
def lowercase__ ( lowercase_ ) -> int:
"""simple docstring"""
_UpperCamelCase : List[Any] = re.split("(%s)" % "|".join(lowercase_ ) ,lowercase_ )
# last string should be ""
return "".join(string_list[:-2] )
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ,lowercase_=20 ,**lowercase_ ) -> Dict:
"""simple docstring"""
_UpperCamelCase : List[Any] = defaultdict(lowercase_ ) # dict of list of generated tokens
for step, batch in tqdm(enumerate(lowercase_ ) ):
with torch.no_grad():
_UpperCamelCase : Optional[int] = batch["ids"].shape[-1]
_UpperCamelCase : int = accelerator.unwrap_model(lowercase_ ).generate(
input_ids=batch["ids"][:, : batch["input_len"]] ,num_return_sequences=lowercase_ ,**lowercase_ )
# each task is generated batch_size times
_UpperCamelCase : Optional[Any] = batch["task_id"].repeat(lowercase_ )
_UpperCamelCase : Any = accelerator.pad_across_processes(
lowercase_ ,dim=1 ,pad_index=tokenizer.pad_token_id )
_UpperCamelCase, _UpperCamelCase : Dict = accelerator.gather((generated_tokens, generated_tasks) )
_UpperCamelCase : Any = generated_tokens.cpu().numpy()
_UpperCamelCase : Optional[int] = generated_tasks.cpu().numpy()
for task, generated_tokens in zip(lowercase_ ,lowercase_ ):
gen_token_dict[task].append(lowercase_ )
_UpperCamelCase : Tuple = [[] for _ in range(lowercase_ )]
for task, generated_tokens in gen_token_dict.items():
for s in generated_tokens:
_UpperCamelCase : Optional[int] = tokenizer.decode(lowercase_ ,skip_special_tokens=lowercase_ ,clean_up_tokenization_spaces=lowercase_ )
code_gens[task].append(remove_last_block(lowercase_ ) )
return code_gens
def lowercase__ ( ) -> List[str]:
"""simple docstring"""
_UpperCamelCase : Union[str, Any] = HfArgumentParser(lowercase_ )
_UpperCamelCase : str = parser.parse_args()
transformers.logging.set_verbosity_error()
# enables code execution in code_eval metric
_UpperCamelCase : str = args.HF_ALLOW_CODE_EVAL
# make sure tokenizer plays nice with multiprocessing
_UpperCamelCase : int = "false"
if args.num_workers is None:
_UpperCamelCase : Optional[Any] = multiprocessing.cpu_count()
# Use dataset load to feed to accelerate
_UpperCamelCase : Optional[Any] = Accelerator()
set_seed(args.seed ,device_specific=lowercase_ )
# Load model and tokenizer
_UpperCamelCase : List[str] = AutoTokenizer.from_pretrained(args.model_ckpt )
_UpperCamelCase : List[Any] = tokenizer.eos_token
_UpperCamelCase : Union[str, Any] = AutoModelForCausalLM.from_pretrained(args.model_ckpt )
# Generation settings
_UpperCamelCase : Optional[int] = {
"do_sample": args.do_sample,
"temperature": args.temperature,
"max_new_tokens": args.max_new_tokens,
"top_p": args.top_p,
"top_k": args.top_k,
"stopping_criteria": StoppingCriteriaList([EndOfFunctionCriteria(0 ,lowercase_ ,lowercase_ )] ),
}
# Load evaluation dataset and metric
_UpperCamelCase : List[Any] = load_dataset("openai_humaneval" )
_UpperCamelCase : int = load_metric("code_eval" )
_UpperCamelCase : Optional[int] = args.num_tasks if args.num_tasks is not None else len(human_eval["test"] )
_UpperCamelCase : List[str] = args.n_samples // args.batch_size
_UpperCamelCase : Tuple = TokenizedDataset(lowercase_ ,human_eval["test"] ,n_copies=lowercase_ ,n_tasks=lowercase_ )
# do not confuse args.batch_size, which is actually the num_return_sequences
_UpperCamelCase : List[Any] = DataLoader(lowercase_ ,batch_size=1 )
# Run a quick test to see if code evaluation is enabled
try:
_UpperCamelCase : Union[str, Any] = code_eval_metric.compute(references=[""] ,predictions=[[""]] )
except ValueError as exception:
print(
"Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL=\"1\"`"
" flag to enable code evaluation." )
raise exception
_UpperCamelCase, _UpperCamelCase : Tuple = accelerator.prepare(lowercase_ ,lowercase_ )
_UpperCamelCase : Union[str, Any] = complete_code(
lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ,n_tasks=lowercase_ ,batch_size=args.batch_size ,**lowercase_ ,)
if accelerator.is_main_process:
_UpperCamelCase : Union[str, Any] = []
for task in tqdm(range(lowercase_ ) ):
_UpperCamelCase : List[Any] = human_eval["test"][task]["test"]
_UpperCamelCase : List[str] = F'''check({human_eval['test'][task]['entry_point']})'''
references.append("\n" + test_func + "\n" + entry_point )
# Evaluate completions with "code_eval" metric
_UpperCamelCase, _UpperCamelCase : List[str] = code_eval_metric.compute(
references=lowercase_ ,predictions=lowercase_ ,num_workers=args.num_workers )
print(F'''Results: {pass_at_k}''' )
# Save results to json file
with open(args.output_file ,"w" ) as fp:
json.dump(lowercase_ ,lowercase_ )
# For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing
# https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script
if __name__ == "__main__":
main()
| 51
|
"""simple docstring"""
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=_UpperCamelCase )
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :str = field(default="summarization" , metadata={"include_in_asdict_even_if_is_default": True} )
SCREAMING_SNAKE_CASE__ :ClassVar[Features] = Features({"text": Value("string" )} )
SCREAMING_SNAKE_CASE__ :ClassVar[Features] = Features({"summary": Value("string" )} )
SCREAMING_SNAKE_CASE__ :str = "text"
SCREAMING_SNAKE_CASE__ :str = "summary"
@property
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Dict[str, str]:
return {self.text_column: "text", self.summary_column: "summary"}
| 51
| 1
|
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_flava import FlavaImageProcessor
lowerCamelCase__ = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
def __init__( self : Dict , *__a : Optional[int] , **__a : Dict ) -> None:
warnings.warn(
"The class FlavaFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use FlavaImageProcessor instead." , __a , )
super().__init__(*__a , **__a )
| 51
|
"""simple docstring"""
def lowercase__ ( lowercase_ ) -> set:
"""simple docstring"""
_UpperCamelCase : Union[str, Any] = set()
# edges = list of graph's edges
_UpperCamelCase : Union[str, Any] = get_edges(lowercase_ )
# While there are still elements in edges list, take an arbitrary edge
# (from_node, to_node) and add his extremity to chosen_vertices and then
# remove all arcs adjacent to the from_node and to_node
while edges:
_UpperCamelCase, _UpperCamelCase : str = edges.pop()
chosen_vertices.add(lowercase_ )
chosen_vertices.add(lowercase_ )
for edge in edges.copy():
if from_node in edge or to_node in edge:
edges.discard(lowercase_ )
return chosen_vertices
def lowercase__ ( lowercase_ ) -> set:
"""simple docstring"""
_UpperCamelCase : List[str] = set()
for from_node, to_nodes in graph.items():
for to_node in to_nodes:
edges.add((from_node, to_node) )
return edges
if __name__ == "__main__":
import doctest
doctest.testmod()
# graph = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
# print(f"Matching vertex cover:\n{matching_min_vertex_cover(graph)}")
| 51
| 1
|
"""simple docstring"""
def lowercase__ ( lowercase_ ) -> list[list[int]]:
"""simple docstring"""
_UpperCamelCase : List[str] = []
if len(lowercase_ ) == 1:
return [nums.copy()]
for _ in range(len(lowercase_ ) ):
_UpperCamelCase : Dict = nums.pop(0 )
_UpperCamelCase : Any = permute(lowercase_ )
for perm in permutations:
perm.append(lowercase_ )
result.extend(lowercase_ )
nums.append(lowercase_ )
return result
def lowercase__ ( lowercase_ ) -> int:
"""simple docstring"""
def backtrack(lowercase_ ):
if start == len(lowercase_ ) - 1:
output.append(nums[:] )
else:
for i in range(lowercase_ ,len(lowercase_ ) ):
_UpperCamelCase, _UpperCamelCase : Tuple = nums[i], nums[start]
backtrack(start + 1 )
_UpperCamelCase, _UpperCamelCase : Any = nums[i], nums[start] # backtrack
_UpperCamelCase : str = []
backtrack(0 )
return output
if __name__ == "__main__":
import doctest
# use res to print the data in permute2 function
lowerCamelCase__ = permutea([1, 2, 3])
print(res)
doctest.testmod()
| 51
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
lowerCamelCase__ = {
"configuration_owlvit": [
"OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"OwlViTConfig",
"OwlViTOnnxConfig",
"OwlViTTextConfig",
"OwlViTVisionConfig",
],
"processing_owlvit": ["OwlViTProcessor"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["OwlViTFeatureExtractor"]
lowerCamelCase__ = ["OwlViTImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"OwlViTModel",
"OwlViTPreTrainedModel",
"OwlViTTextModel",
"OwlViTVisionModel",
"OwlViTForObjectDetection",
]
if TYPE_CHECKING:
from .configuration_owlvit import (
OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OwlViTConfig,
OwlViTOnnxConfig,
OwlViTTextConfig,
OwlViTVisionConfig,
)
from .processing_owlvit import OwlViTProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_owlvit import OwlViTFeatureExtractor
from .image_processing_owlvit import OwlViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_owlvit import (
OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
OwlViTForObjectDetection,
OwlViTModel,
OwlViTPreTrainedModel,
OwlViTTextModel,
OwlViTVisionModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 51
| 1
|
"""simple docstring"""
def lowercase__ ( lowercase_ ) -> int:
"""simple docstring"""
if not isinstance(lowercase_ ,lowercase_ ):
_UpperCamelCase : List[str] = F'''Input value of [number={number}] must be an integer'''
raise TypeError(lowercase_ )
if number < 1:
_UpperCamelCase : List[Any] = F'''Input value of [number={number}] must be > 0'''
raise ValueError(lowercase_ )
_UpperCamelCase : Tuple = 1
for i in range(1 ,lowercase_ ):
current_number *= 4 * i - 2
current_number //= i + 1
return current_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 51
|
"""simple docstring"""
from collections import defaultdict
from math import ceil, sqrt
def lowercase__ ( lowercase_ = 1_000_000 ,lowercase_ = 10 ) -> int:
"""simple docstring"""
_UpperCamelCase : defaultdict = defaultdict(lowercase_ )
for outer_width in range(3 ,(t_limit // 4) + 2 ):
if outer_width * outer_width > t_limit:
_UpperCamelCase : Any = max(
ceil(sqrt(outer_width * outer_width - t_limit ) ) ,1 )
else:
_UpperCamelCase : str = 1
hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2
for hole_width in range(lowercase_ ,outer_width - 1 ,2 ):
count[outer_width * outer_width - hole_width * hole_width] += 1
return sum(1 for n in count.values() if 1 <= n <= 10 )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 51
| 1
|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import LEDConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFLEDForConditionalGeneration, TFLEDModel
@require_tf
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Any = LEDConfig
SCREAMING_SNAKE_CASE__ :str = {}
SCREAMING_SNAKE_CASE__ :List[str] = "gelu"
def __init__( self : List[Any] , __a : Union[str, Any] , __a : List[Any]=13 , __a : int=7 , __a : str=True , __a : Any=False , __a : str=99 , __a : str=32 , __a : Union[str, Any]=2 , __a : Optional[Any]=4 , __a : List[Any]=37 , __a : List[Any]=0.1 , __a : Tuple=0.1 , __a : Dict=20 , __a : str=2 , __a : Dict=1 , __a : Any=0 , __a : List[Any]=4 , ) -> List[Any]:
_UpperCamelCase : Optional[Any] = parent
_UpperCamelCase : List[str] = batch_size
_UpperCamelCase : str = seq_length
_UpperCamelCase : str = is_training
_UpperCamelCase : Any = use_labels
_UpperCamelCase : Any = vocab_size
_UpperCamelCase : List[str] = hidden_size
_UpperCamelCase : Optional[Any] = num_hidden_layers
_UpperCamelCase : Dict = num_attention_heads
_UpperCamelCase : Optional[Any] = intermediate_size
_UpperCamelCase : int = hidden_dropout_prob
_UpperCamelCase : Dict = attention_probs_dropout_prob
_UpperCamelCase : str = max_position_embeddings
_UpperCamelCase : int = eos_token_id
_UpperCamelCase : Dict = pad_token_id
_UpperCamelCase : Optional[Any] = bos_token_id
_UpperCamelCase : str = attention_window
# `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size
# [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention
# returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1]
# because its local attention only attends to `self.attention_window` and one before and one after
_UpperCamelCase : List[str] = self.attention_window + 2
# because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for
# the `test_attention_outputs` and `test_hidden_states_output` tests
_UpperCamelCase : int = (
self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window
)
def __SCREAMING_SNAKE_CASE ( self : int ) -> str:
_UpperCamelCase : Tuple = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
_UpperCamelCase : Optional[Any] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
_UpperCamelCase : Tuple = tf.concat([input_ids, eos_tensor] , axis=1 )
_UpperCamelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCamelCase : List[Any] = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , attention_window=self.attention_window , **self.config_updates , )
_UpperCamelCase : Dict = prepare_led_inputs_dict(__a , __a , __a )
_UpperCamelCase : Union[str, Any] = tf.concat(
[tf.zeros_like(__a )[:, :-1], tf.ones_like(__a )[:, -1:]] , axis=-1 , )
_UpperCamelCase : Union[str, Any] = global_attention_mask
return config, inputs_dict
def __SCREAMING_SNAKE_CASE ( self : List[str] , __a : List[Any] , __a : int ) -> Tuple:
_UpperCamelCase : Tuple = TFLEDModel(config=__a ).get_decoder()
_UpperCamelCase : Tuple = inputs_dict["input_ids"]
_UpperCamelCase : int = input_ids[:1, :]
_UpperCamelCase : List[str] = inputs_dict["attention_mask"][:1, :]
_UpperCamelCase : List[Any] = 1
# first forward pass
_UpperCamelCase : Any = model(__a , attention_mask=__a , use_cache=__a )
_UpperCamelCase, _UpperCamelCase : Union[str, Any] = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_UpperCamelCase : Any = ids_tensor((self.batch_size, 3) , config.vocab_size )
_UpperCamelCase : Any = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
_UpperCamelCase : List[str] = tf.concat([input_ids, next_tokens] , axis=-1 )
_UpperCamelCase : Union[str, Any] = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
_UpperCamelCase : Tuple = model(__a , attention_mask=__a )[0]
_UpperCamelCase : int = model(__a , attention_mask=__a , past_key_values=__a )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
_UpperCamelCase : List[Any] = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
_UpperCamelCase : List[str] = output_from_no_past[:, -3:, random_slice_idx]
_UpperCamelCase : Optional[int] = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(__a , __a , rtol=1e-3 )
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_=None ,lowercase_=None ,lowercase_=None ,lowercase_=None ,) -> Dict:
"""simple docstring"""
if attention_mask is None:
_UpperCamelCase : str = tf.cast(tf.math.not_equal(lowercase_ ,config.pad_token_id ) ,tf.inta )
if decoder_attention_mask is None:
_UpperCamelCase : str = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape ,dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] ,config.pad_token_id ) ,tf.inta ),
] ,axis=-1 ,)
if head_mask is None:
_UpperCamelCase : List[str] = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
_UpperCamelCase : List[str] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"attention_mask": attention_mask,
"decoder_input_ids": decoder_input_ids,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
}
@require_tf
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Any = (TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else ()
SCREAMING_SNAKE_CASE__ :List[str] = (TFLEDForConditionalGeneration,) if is_tf_available() else ()
SCREAMING_SNAKE_CASE__ :List[str] = (
{
"conversational": TFLEDForConditionalGeneration,
"feature-extraction": TFLEDModel,
"summarization": TFLEDForConditionalGeneration,
"text2text-generation": TFLEDForConditionalGeneration,
"translation": TFLEDForConditionalGeneration,
}
if is_tf_available()
else {}
)
SCREAMING_SNAKE_CASE__ :Tuple = True
SCREAMING_SNAKE_CASE__ :str = False
SCREAMING_SNAKE_CASE__ :Optional[Any] = False
SCREAMING_SNAKE_CASE__ :int = False
def __SCREAMING_SNAKE_CASE ( self : str ) -> List[Any]:
_UpperCamelCase : int = TFLEDModelTester(self )
_UpperCamelCase : Any = ConfigTester(self , config_class=__a )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[Any]:
self.config_tester.run_common_tests()
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> int:
_UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*__a )
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[str]:
_UpperCamelCase, _UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCamelCase : Optional[int] = tf.zeros_like(inputs_dict["attention_mask"] )
_UpperCamelCase : Union[str, Any] = 2
_UpperCamelCase : str = tf.where(
tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices , 1 , inputs_dict["global_attention_mask"] , )
_UpperCamelCase : Dict = True
_UpperCamelCase : str = self.model_tester.seq_length
_UpperCamelCase : Union[str, Any] = self.model_tester.encoder_seq_length
def check_decoder_attentions_output(__a : Optional[int] ):
_UpperCamelCase : Optional[int] = outputs.decoder_attentions
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
def check_encoder_attentions_output(__a : Optional[Any] ):
_UpperCamelCase : Union[str, Any] = [t.numpy() for t in outputs.encoder_attentions]
_UpperCamelCase : List[Any] = [t.numpy() for t in outputs.encoder_global_attentions]
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
self.assertListEqual(
list(global_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices] , )
for model_class in self.all_model_classes:
_UpperCamelCase : Dict = True
_UpperCamelCase : Optional[Any] = False
_UpperCamelCase : int = False
_UpperCamelCase : Optional[int] = model_class(__a )
_UpperCamelCase : int = model(self._prepare_for_class(__a , __a ) )
_UpperCamelCase : Any = len(__a )
self.assertEqual(config.output_hidden_states , __a )
check_encoder_attentions_output(__a )
if self.is_encoder_decoder:
_UpperCamelCase : Optional[Any] = model_class(__a )
_UpperCamelCase : List[Any] = model(self._prepare_for_class(__a , __a ) )
self.assertEqual(config.output_hidden_states , __a )
check_decoder_attentions_output(__a )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
_UpperCamelCase : int = True
_UpperCamelCase : Tuple = model_class(__a )
_UpperCamelCase : str = model(self._prepare_for_class(__a , __a ) )
self.assertEqual(config.output_hidden_states , __a )
check_encoder_attentions_output(__a )
# Check attention is always last and order is fine
_UpperCamelCase : Any = True
_UpperCamelCase : List[str] = True
_UpperCamelCase : Tuple = model_class(__a )
_UpperCamelCase : int = model(self._prepare_for_class(__a , __a ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(__a ) )
self.assertEqual(model.config.output_hidden_states , __a )
check_encoder_attentions_output(__a )
@unittest.skip("LED keeps using potentially symbolic tensors in conditionals and breaks tracing." )
def __SCREAMING_SNAKE_CASE ( self : str ) -> Dict:
pass
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Tuple:
# TODO: Head-masking not yet implement
pass
def lowercase__ ( lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
return tf.constant(lowercase_ ,dtype=tf.intaa )
lowerCamelCase__ = 1E-4
@slow
@require_tf
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> Union[str, Any]:
_UpperCamelCase : Any = TFLEDForConditionalGeneration.from_pretrained("allenai/led-base-16384" ).led
# change to intended input here
_UpperCamelCase : int = _long_tensor([512 * [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69]] )
_UpperCamelCase : Tuple = _long_tensor([128 * [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69]] )
_UpperCamelCase : Optional[Any] = prepare_led_inputs_dict(model.config , __a , __a )
_UpperCamelCase : Optional[int] = model(**__a )[0]
_UpperCamelCase : Optional[int] = (1, 1024, 768)
self.assertEqual(output.shape , __a )
# change to expected output here
_UpperCamelCase : Tuple = tf.convert_to_tensor(
[[2.30_50, 2.82_79, 0.65_31], [-1.84_57, -0.14_55, -3.56_61], [-1.01_86, 0.45_86, -2.20_43]] , )
tf.debugging.assert_near(output[:, :3, :3] , __a , atol=1e-3 )
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> str:
_UpperCamelCase : Optional[int] = TFLEDForConditionalGeneration.from_pretrained("allenai/led-base-16384" )
# change to intended input here
_UpperCamelCase : Optional[int] = _long_tensor([512 * [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69]] )
_UpperCamelCase : List[str] = _long_tensor([128 * [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69]] )
_UpperCamelCase : Optional[Any] = prepare_led_inputs_dict(model.config , __a , __a )
_UpperCamelCase : Union[str, Any] = model(**__a )[0]
_UpperCamelCase : int = (1, 1024, model.config.vocab_size)
self.assertEqual(output.shape , __a )
# change to expected output here
_UpperCamelCase : Optional[int] = tf.convert_to_tensor(
[[33.65_07, 6.45_72, 16.80_89], [5.87_39, -2.42_38, 11.29_02], [-3.21_39, -4.31_49, 4.27_83]] , )
tf.debugging.assert_near(output[:, :3, :3] , __a , atol=1e-3 , rtol=1e-3 )
| 51
|
"""simple docstring"""
from collections.abc import Iterator, MutableMapping
from dataclasses import dataclass
from typing import Generic, TypeVar
lowerCamelCase__ = TypeVar("KEY")
lowerCamelCase__ = TypeVar("VAL")
@dataclass(frozen=_UpperCamelCase , slots=_UpperCamelCase )
class __SCREAMING_SNAKE_CASE ( Generic[KEY, VAL] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :KEY
SCREAMING_SNAKE_CASE__ :VAL
class __SCREAMING_SNAKE_CASE ( _Item ):
'''simple docstring'''
def __init__( self : List[str] ) -> None:
super().__init__(__a , __a )
def __bool__( self : Dict ) -> bool:
return False
lowerCamelCase__ = _DeletedItem()
class __SCREAMING_SNAKE_CASE ( MutableMapping[KEY, VAL] ):
'''simple docstring'''
def __init__( self : int , __a : int = 8 , __a : float = 0.75 ) -> None:
_UpperCamelCase : str = initial_block_size
_UpperCamelCase : list[_Item | None] = [None] * initial_block_size
assert 0.0 < capacity_factor < 1.0
_UpperCamelCase : List[str] = capacity_factor
_UpperCamelCase : Dict = 0
def __SCREAMING_SNAKE_CASE ( self : int , __a : KEY ) -> int:
return hash(__a ) % len(self._buckets )
def __SCREAMING_SNAKE_CASE ( self : List[Any] , __a : int ) -> int:
return (ind + 1) % len(self._buckets )
def __SCREAMING_SNAKE_CASE ( self : Tuple , __a : int , __a : KEY , __a : VAL ) -> bool:
_UpperCamelCase : List[Any] = self._buckets[ind]
if not stored:
_UpperCamelCase : Tuple = _Item(__a , __a )
self._len += 1
return True
elif stored.key == key:
_UpperCamelCase : Union[str, Any] = _Item(__a , __a )
return True
else:
return False
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> bool:
_UpperCamelCase : Any = len(self._buckets ) * self._capacity_factor
return len(self ) >= int(__a )
def __SCREAMING_SNAKE_CASE ( self : str ) -> bool:
if len(self._buckets ) <= self._initial_block_size:
return False
_UpperCamelCase : List[str] = len(self._buckets ) * self._capacity_factor / 2
return len(self ) < limit
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __a : int ) -> None:
_UpperCamelCase : Any = self._buckets
_UpperCamelCase : List[Any] = [None] * new_size
_UpperCamelCase : List[str] = 0
for item in old_buckets:
if item:
self._add_item(item.key , item.val )
def __SCREAMING_SNAKE_CASE ( self : int ) -> None:
self._resize(len(self._buckets ) * 2 )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> None:
self._resize(len(self._buckets ) // 2 )
def __SCREAMING_SNAKE_CASE ( self : List[Any] , __a : KEY ) -> Iterator[int]:
_UpperCamelCase : str = self._get_bucket_index(__a )
for _ in range(len(self._buckets ) ):
yield ind
_UpperCamelCase : Tuple = self._get_next_ind(__a )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __a : KEY , __a : VAL ) -> None:
for ind in self._iterate_buckets(__a ):
if self._try_set(__a , __a , __a ):
break
def __setitem__( self : int , __a : KEY , __a : VAL ) -> None:
if self._is_full():
self._size_up()
self._add_item(__a , __a )
def __delitem__( self : str , __a : KEY ) -> None:
for ind in self._iterate_buckets(__a ):
_UpperCamelCase : Tuple = self._buckets[ind]
if item is None:
raise KeyError(__a )
if item is _deleted:
continue
if item.key == key:
_UpperCamelCase : List[Any] = _deleted
self._len -= 1
break
if self._is_sparse():
self._size_down()
def __getitem__( self : str , __a : KEY ) -> VAL:
for ind in self._iterate_buckets(__a ):
_UpperCamelCase : Tuple = self._buckets[ind]
if item is None:
break
if item is _deleted:
continue
if item.key == key:
return item.val
raise KeyError(__a )
def __len__( self : List[Any] ) -> int:
return self._len
def __iter__( self : List[str] ) -> Iterator[KEY]:
yield from (item.key for item in self._buckets if item)
def __repr__( self : List[str] ) -> str:
_UpperCamelCase : Optional[int] = " ,".join(
F'''{item.key}: {item.val}''' for item in self._buckets if item )
return F'''HashMap({val_string})'''
| 51
| 1
|
"""simple docstring"""
import collections
import inspect
import unittest
from transformers import SwinvaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel
from transformers.models.swinva.modeling_swinva import SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : List[str] , __a : Any , __a : Dict=13 , __a : Optional[Any]=32 , __a : Union[str, Any]=2 , __a : Optional[int]=3 , __a : List[Any]=16 , __a : List[Any]=[1, 2, 1] , __a : List[Any]=[2, 2, 4] , __a : Any=2 , __a : Optional[Any]=2.0 , __a : Optional[Any]=True , __a : Union[str, Any]=0.0 , __a : int=0.0 , __a : Dict=0.1 , __a : str="gelu" , __a : int=False , __a : Any=True , __a : int=0.02 , __a : Optional[int]=1e-5 , __a : List[str]=True , __a : List[str]=None , __a : Tuple=True , __a : Tuple=10 , __a : Tuple=8 , ) -> Dict:
_UpperCamelCase : Union[str, Any] = parent
_UpperCamelCase : str = batch_size
_UpperCamelCase : List[str] = image_size
_UpperCamelCase : str = patch_size
_UpperCamelCase : List[Any] = num_channels
_UpperCamelCase : List[str] = embed_dim
_UpperCamelCase : str = depths
_UpperCamelCase : Optional[int] = num_heads
_UpperCamelCase : List[Any] = window_size
_UpperCamelCase : List[Any] = mlp_ratio
_UpperCamelCase : Optional[Any] = qkv_bias
_UpperCamelCase : Any = hidden_dropout_prob
_UpperCamelCase : List[str] = attention_probs_dropout_prob
_UpperCamelCase : Optional[int] = drop_path_rate
_UpperCamelCase : Optional[int] = hidden_act
_UpperCamelCase : Optional[Any] = use_absolute_embeddings
_UpperCamelCase : Tuple = patch_norm
_UpperCamelCase : List[Any] = layer_norm_eps
_UpperCamelCase : List[str] = initializer_range
_UpperCamelCase : List[str] = is_training
_UpperCamelCase : Any = scope
_UpperCamelCase : List[Any] = use_labels
_UpperCamelCase : str = type_sequence_label_size
_UpperCamelCase : List[str] = encoder_stride
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Any:
_UpperCamelCase : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCamelCase : Optional[int] = None
if self.use_labels:
_UpperCamelCase : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCamelCase : int = self.get_config()
return config, pixel_values, labels
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[str]:
return SwinvaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __a : Union[str, Any] , __a : Union[str, Any] , __a : Dict ) -> Optional[Any]:
_UpperCamelCase : List[Any] = SwinvaModel(config=__a )
model.to(__a )
model.eval()
_UpperCamelCase : Optional[Any] = model(__a )
_UpperCamelCase : int = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
_UpperCamelCase : int = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def __SCREAMING_SNAKE_CASE ( self : Dict , __a : Dict , __a : Union[str, Any] , __a : Union[str, Any] ) -> List[Any]:
_UpperCamelCase : Tuple = SwinvaForMaskedImageModeling(config=__a )
model.to(__a )
model.eval()
_UpperCamelCase : Optional[int] = model(__a )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
_UpperCamelCase : List[Any] = 1
_UpperCamelCase : Tuple = SwinvaForMaskedImageModeling(__a )
model.to(__a )
model.eval()
_UpperCamelCase : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_UpperCamelCase : int = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def __SCREAMING_SNAKE_CASE ( self : Any , __a : Union[str, Any] , __a : Optional[int] , __a : str ) -> List[Any]:
_UpperCamelCase : int = self.type_sequence_label_size
_UpperCamelCase : Any = SwinvaForImageClassification(__a )
model.to(__a )
model.eval()
_UpperCamelCase : List[str] = model(__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> int:
_UpperCamelCase : Dict = self.prepare_config_and_inputs()
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase : Any = config_and_inputs
_UpperCamelCase : Union[str, Any] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :List[str] = (
(SwinvaModel, SwinvaForImageClassification, SwinvaForMaskedImageModeling) if is_torch_available() else ()
)
SCREAMING_SNAKE_CASE__ :List[Any] = (
{"feature-extraction": SwinvaModel, "image-classification": SwinvaForImageClassification}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE__ :Optional[Any] = False
SCREAMING_SNAKE_CASE__ :Union[str, Any] = False
SCREAMING_SNAKE_CASE__ :List[Any] = False
SCREAMING_SNAKE_CASE__ :Dict = False
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> int:
_UpperCamelCase : List[str] = SwinvaModelTester(self )
_UpperCamelCase : List[Any] = ConfigTester(self , config_class=__a , embed_dim=37 )
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> Tuple:
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[Any]:
_UpperCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
@unittest.skip(reason="Got `CUDA error: misaligned address` with PyTorch 2.0.0." )
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[str]:
pass
@unittest.skip(reason="Swinv2 does not use inputs_embeds" )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Any:
pass
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> Any:
_UpperCamelCase, _UpperCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase : Any = model_class(__a )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_UpperCamelCase : List[str] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__a , nn.Linear ) )
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Union[str, Any]:
_UpperCamelCase, _UpperCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase : str = model_class(__a )
_UpperCamelCase : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCamelCase : str = [*signature.parameters.keys()]
_UpperCamelCase : List[str] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __a )
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> str:
_UpperCamelCase, _UpperCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCamelCase : int = True
for model_class in self.all_model_classes:
_UpperCamelCase : Any = True
_UpperCamelCase : Tuple = False
_UpperCamelCase : Optional[Any] = True
_UpperCamelCase : Dict = model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
_UpperCamelCase : List[str] = model(**self._prepare_for_class(__a , __a ) )
_UpperCamelCase : Optional[Any] = outputs.attentions
_UpperCamelCase : str = len(self.model_tester.depths )
self.assertEqual(len(__a ) , __a )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
_UpperCamelCase : Union[str, Any] = True
_UpperCamelCase : str = config.window_size**2
_UpperCamelCase : List[str] = model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
_UpperCamelCase : Optional[Any] = model(**self._prepare_for_class(__a , __a ) )
_UpperCamelCase : Union[str, Any] = outputs.attentions
self.assertEqual(len(__a ) , __a )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
_UpperCamelCase : Any = len(__a )
# Check attention is always last and order is fine
_UpperCamelCase : Dict = True
_UpperCamelCase : List[str] = True
_UpperCamelCase : int = model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
_UpperCamelCase : str = model(**self._prepare_for_class(__a , __a ) )
if hasattr(self.model_tester , "num_hidden_states_types" ):
_UpperCamelCase : Optional[Any] = self.model_tester.num_hidden_states_types
else:
# also another +1 for reshaped_hidden_states
_UpperCamelCase : Optional[int] = 2
self.assertEqual(out_len + added_hidden_states , len(__a ) )
_UpperCamelCase : Optional[Any] = outputs.attentions
self.assertEqual(len(__a ) , __a )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
def __SCREAMING_SNAKE_CASE ( self : List[Any] , __a : str , __a : List[str] , __a : int , __a : str ) -> int:
_UpperCamelCase : Optional[int] = model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
_UpperCamelCase : List[str] = model(**self._prepare_for_class(__a , __a ) )
_UpperCamelCase : Tuple = outputs.hidden_states
_UpperCamelCase : Optional[Any] = getattr(
self.model_tester , "expected_num_hidden_layers" , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(__a ) , __a )
# Swinv2 has a different seq_length
_UpperCamelCase : Optional[Any] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
_UpperCamelCase : Any = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
_UpperCamelCase : List[str] = outputs.reshaped_hidden_states
self.assertEqual(len(__a ) , __a )
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase : List[str] = reshaped_hidden_states[0].shape
_UpperCamelCase : Dict = (
reshaped_hidden_states[0].view(__a , __a , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> str:
_UpperCamelCase, _UpperCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCamelCase : int = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
_UpperCamelCase : Union[str, Any] = True
self.check_hidden_states_output(__a , __a , __a , __a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_UpperCamelCase : Tuple = True
self.check_hidden_states_output(__a , __a , __a , __a )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[int]:
_UpperCamelCase, _UpperCamelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCamelCase : List[Any] = 3
_UpperCamelCase : int = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
_UpperCamelCase : Optional[Any] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
_UpperCamelCase : Optional[Any] = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
_UpperCamelCase : Optional[int] = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
_UpperCamelCase : Dict = True
self.check_hidden_states_output(__a , __a , __a , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_UpperCamelCase : Optional[Any] = True
self.check_hidden_states_output(__a , __a , __a , (padded_height, padded_width) )
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Tuple:
_UpperCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*__a )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[Any]:
_UpperCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__a )
@slow
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Dict:
for model_name in SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase : Tuple = SwinvaModel.from_pretrained(__a )
self.assertIsNotNone(__a )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[str]:
_UpperCamelCase, _UpperCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCamelCase : List[str] = _config_zero_init(__a )
for model_class in self.all_model_classes:
_UpperCamelCase : Any = model_class(config=__a )
for name, param in model.named_parameters():
if "embeddings" not in name and "logit_scale" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@require_vision
@require_torch
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> int:
return (
AutoImageProcessor.from_pretrained("microsoft/swinv2-tiny-patch4-window8-256" )
if is_vision_available()
else None
)
@slow
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Tuple:
_UpperCamelCase : Optional[Any] = SwinvaForImageClassification.from_pretrained("microsoft/swinv2-tiny-patch4-window8-256" ).to(
__a )
_UpperCamelCase : Optional[int] = self.default_image_processor
_UpperCamelCase : List[str] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
_UpperCamelCase : Any = image_processor(images=__a , return_tensors="pt" ).to(__a )
# forward pass
with torch.no_grad():
_UpperCamelCase : Any = model(**__a )
# verify the logits
_UpperCamelCase : Optional[Any] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , __a )
_UpperCamelCase : Tuple = torch.tensor([-0.39_47, -0.43_06, 0.00_26] ).to(__a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __a , atol=1e-4 ) )
| 51
|
"""simple docstring"""
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : Optional[Any] , __a : list[int] ) -> None:
_UpperCamelCase : Tuple = len(__a )
_UpperCamelCase : Dict = [0] * len_array
if len_array > 0:
_UpperCamelCase : Optional[Any] = array[0]
for i in range(1 , __a ):
_UpperCamelCase : Tuple = self.prefix_sum[i - 1] + array[i]
def __SCREAMING_SNAKE_CASE ( self : Dict , __a : int , __a : int ) -> int:
if start == 0:
return self.prefix_sum[end]
return self.prefix_sum[end] - self.prefix_sum[start - 1]
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __a : int ) -> bool:
_UpperCamelCase : int = {0}
for sum_item in self.prefix_sum:
if sum_item - target_sum in sums:
return True
sums.add(__a )
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 51
| 1
|
"""simple docstring"""
from __future__ import annotations
def lowercase__ ( lowercase_ ) -> list[int]:
"""simple docstring"""
_UpperCamelCase : Union[str, Any] = 2
_UpperCamelCase : List[str] = []
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(lowercase_ )
if n > 1:
factors.append(lowercase_ )
return factors
if __name__ == "__main__":
import doctest
doctest.testmod()
| 51
|
"""simple docstring"""
import os
import zipfile
import requests
from get_ci_error_statistics import download_artifact, get_artifacts_links
def lowercase__ ( lowercase_ ,lowercase_=7 ) -> Tuple:
"""simple docstring"""
_UpperCamelCase : Optional[int] = None
if token is not None:
_UpperCamelCase : Optional[Any] = {"Accept": "application/vnd.github+json", "Authorization": F'''Bearer {token}'''}
# The id of a workflow (not of a workflow run)
_UpperCamelCase : Any = "636036"
_UpperCamelCase : Tuple = F'''https://api.github.com/repos/huggingface/transformers/actions/workflows/{workflow_id}/runs'''
# On `main` branch + event being `schedule` + not returning PRs + only `num_runs` results
url += F'''?branch=main&event=schedule&exclude_pull_requests=true&per_page={num_runs}'''
_UpperCamelCase : Dict = requests.get(lowercase_ ,headers=lowercase_ ).json()
return result["workflow_runs"]
def lowercase__ ( lowercase_ ) -> List[str]:
"""simple docstring"""
_UpperCamelCase : List[Any] = get_daily_ci_runs(lowercase_ )
_UpperCamelCase : Tuple = None
for workflow_run in workflow_runs:
if workflow_run["status"] == "completed":
_UpperCamelCase : Union[str, Any] = workflow_run["id"]
break
return workflow_run_id
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase : str = get_last_daily_ci_runs(lowercase_ )
if workflow_run_id is not None:
_UpperCamelCase : int = get_artifacts_links(worflow_run_id=lowercase_ ,token=lowercase_ )
for artifact_name in artifact_names:
if artifact_name in artifacts_links:
_UpperCamelCase : Dict = artifacts_links[artifact_name]
download_artifact(
artifact_name=lowercase_ ,artifact_url=lowercase_ ,output_dir=lowercase_ ,token=lowercase_ )
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ) -> int:
"""simple docstring"""
get_last_daily_ci_artifacts(lowercase_ ,lowercase_ ,lowercase_ )
_UpperCamelCase : Dict = {}
for artifact_name in artifact_names:
_UpperCamelCase : Union[str, Any] = os.path.join(lowercase_ ,F'''{artifact_name}.zip''' )
if os.path.isfile(lowercase_ ):
_UpperCamelCase : int = {}
with zipfile.ZipFile(lowercase_ ) as z:
for filename in z.namelist():
if not os.path.isdir(lowercase_ ):
# read the file
with z.open(lowercase_ ) as f:
_UpperCamelCase : int = f.read().decode("UTF-8" )
return results
| 51
| 1
|
"""simple docstring"""
from __future__ import annotations
from collections.abc import Callable
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ = 100 ,) -> float:
"""simple docstring"""
_UpperCamelCase : List[Any] = x_start
_UpperCamelCase : Union[str, Any] = fnc(lowercase_ )
_UpperCamelCase : Optional[Any] = 0.0
for _ in range(lowercase_ ):
# Approximates small segments of curve as linear and solve
# for trapezoidal area
_UpperCamelCase : int = (x_end - x_start) / steps + xa
_UpperCamelCase : List[str] = fnc(lowercase_ )
area += abs(fxa + fxa ) * (xa - xa) / 2
# Increment step
_UpperCamelCase : Optional[Any] = xa
_UpperCamelCase : Tuple = fxa
return area
if __name__ == "__main__":
def lowercase__ ( lowercase_ ) -> List[str]:
"""simple docstring"""
return x**3 + x**2
print("f(x) = x^3 + x^2")
print("The area between the curve, x = -5, x = 5 and the x axis is:")
lowerCamelCase__ = 10
while i <= 10_0000:
print(f"""with {i} steps: {trapezoidal_area(f, -5, 5, i)}""")
i *= 10
| 51
|
"""simple docstring"""
import math
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self : Any , __a : list[list[float]] , __a : list[int] ) -> int:
_UpperCamelCase : List[Any] = 0.0
_UpperCamelCase : Union[str, Any] = 0.0
for i in range(len(__a ) ):
da += math.pow((sample[i] - weights[0][i]) , 2 )
da += math.pow((sample[i] - weights[1][i]) , 2 )
return 0 if da > da else 1
return 0
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __a : list[list[int | float]] , __a : list[int] , __a : int , __a : float ) -> list[list[int | float]]:
for i in range(len(__a ) ):
weights[j][i] += alpha * (sample[i] - weights[j][i])
return weights
def lowercase__ ( ) -> None:
"""simple docstring"""
_UpperCamelCase : Optional[int] = [[1, 1, 0, 0], [0, 0, 0, 1], [1, 0, 0, 0], [0, 0, 1, 1]]
# weight initialization ( n, C )
_UpperCamelCase : List[str] = [[0.2, 0.6, 0.5, 0.9], [0.8, 0.4, 0.7, 0.3]]
# training
_UpperCamelCase : List[Any] = SelfOrganizingMap()
_UpperCamelCase : int = 3
_UpperCamelCase : List[Any] = 0.5
for _ in range(lowercase_ ):
for j in range(len(lowercase_ ) ):
# training sample
_UpperCamelCase : int = training_samples[j]
# Compute the winning vector
_UpperCamelCase : Tuple = self_organizing_map.get_winner(lowercase_ ,lowercase_ )
# Update the winning vector
_UpperCamelCase : int = self_organizing_map.update(lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ )
# classify test sample
_UpperCamelCase : Optional[int] = [0, 0, 0, 1]
_UpperCamelCase : Dict = self_organizing_map.get_winner(lowercase_ ,lowercase_ )
# results
print(F'''Clusters that the test sample belongs to : {winner}''' )
print(F'''Weights that have been trained : {weights}''' )
# running the main() function
if __name__ == "__main__":
main()
| 51
| 1
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
# See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert
}
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Dict = "megatron-bert"
def __init__( self : int , __a : List[str]=2_9056 , __a : Dict=1024 , __a : Optional[Any]=24 , __a : str=16 , __a : List[Any]=4096 , __a : str="gelu" , __a : Optional[Any]=0.1 , __a : Any=0.1 , __a : Optional[int]=512 , __a : int=2 , __a : Tuple=0.02 , __a : str=1e-1_2 , __a : Optional[int]=0 , __a : Optional[Any]="absolute" , __a : Any=True , **__a : List[Any] , ) -> Tuple:
super().__init__(pad_token_id=__a , **__a )
_UpperCamelCase : Any = vocab_size
_UpperCamelCase : int = hidden_size
_UpperCamelCase : int = num_hidden_layers
_UpperCamelCase : List[Any] = num_attention_heads
_UpperCamelCase : List[str] = hidden_act
_UpperCamelCase : Union[str, Any] = intermediate_size
_UpperCamelCase : List[Any] = hidden_dropout_prob
_UpperCamelCase : Tuple = attention_probs_dropout_prob
_UpperCamelCase : Tuple = max_position_embeddings
_UpperCamelCase : Optional[Any] = type_vocab_size
_UpperCamelCase : Dict = initializer_range
_UpperCamelCase : Union[str, Any] = layer_norm_eps
_UpperCamelCase : Any = position_embedding_type
_UpperCamelCase : Optional[int] = use_cache
| 51
|
"""simple docstring"""
import argparse
import collections
import os
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_table.py
lowerCamelCase__ = "src/transformers"
lowerCamelCase__ = "docs/source/en"
lowerCamelCase__ = "."
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ) -> List[str]:
"""simple docstring"""
with open(lowercase_ ,"r" ,encoding="utf-8" ,newline="\n" ) as f:
_UpperCamelCase : Union[str, Any] = f.readlines()
# Find the start prompt.
_UpperCamelCase : Dict = 0
while not lines[start_index].startswith(lowercase_ ):
start_index += 1
start_index += 1
_UpperCamelCase : Optional[int] = start_index
while not lines[end_index].startswith(lowercase_ ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# Add here suffixes that are used to identify models, separated by |
lowerCamelCase__ = "Model|Encoder|Decoder|ForConditionalGeneration"
# Regexes that match TF/Flax/PT model names.
lowerCamelCase__ = re.compile(R"TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
lowerCamelCase__ = re.compile(R"Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
lowerCamelCase__ = re.compile(R"(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
# This is to make sure the transformers module imported is the one in the repo.
lowerCamelCase__ = direct_transformers_import(TRANSFORMERS_PATH)
def lowercase__ ( lowercase_ ) -> Any:
"""simple docstring"""
_UpperCamelCase : Tuple = re.finditer(".+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)" ,lowercase_ )
return [m.group(0 ) for m in matches]
def lowercase__ ( lowercase_ ,lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase : str = 2 if text == "✅" or text == "❌" else len(lowercase_ )
_UpperCamelCase : Union[str, Any] = (width - text_length) // 2
_UpperCamelCase : Dict = width - text_length - left_indent
return " " * left_indent + text + " " * right_indent
def lowercase__ ( ) -> str:
"""simple docstring"""
_UpperCamelCase : Optional[Any] = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
_UpperCamelCase : str = {
name: config_maping_names[code]
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if code in config_maping_names
}
_UpperCamelCase : Dict = {name: config.replace("Config" ,"" ) for name, config in model_name_to_config.items()}
# Dictionaries flagging if each model prefix has a slow/fast tokenizer, backend in PT/TF/Flax.
_UpperCamelCase : int = collections.defaultdict(lowercase_ )
_UpperCamelCase : Dict = collections.defaultdict(lowercase_ )
_UpperCamelCase : Dict = collections.defaultdict(lowercase_ )
_UpperCamelCase : int = collections.defaultdict(lowercase_ )
_UpperCamelCase : str = collections.defaultdict(lowercase_ )
# Let's lookup through all transformers object (once).
for attr_name in dir(lowercase_ ):
_UpperCamelCase : List[str] = None
if attr_name.endswith("Tokenizer" ):
_UpperCamelCase : Tuple = slow_tokenizers
_UpperCamelCase : Any = attr_name[:-9]
elif attr_name.endswith("TokenizerFast" ):
_UpperCamelCase : Optional[Any] = fast_tokenizers
_UpperCamelCase : List[str] = attr_name[:-13]
elif _re_tf_models.match(lowercase_ ) is not None:
_UpperCamelCase : List[Any] = tf_models
_UpperCamelCase : Dict = _re_tf_models.match(lowercase_ ).groups()[0]
elif _re_flax_models.match(lowercase_ ) is not None:
_UpperCamelCase : Dict = flax_models
_UpperCamelCase : Union[str, Any] = _re_flax_models.match(lowercase_ ).groups()[0]
elif _re_pt_models.match(lowercase_ ) is not None:
_UpperCamelCase : Optional[int] = pt_models
_UpperCamelCase : Any = _re_pt_models.match(lowercase_ ).groups()[0]
if lookup_dict is not None:
while len(lowercase_ ) > 0:
if attr_name in model_name_to_prefix.values():
_UpperCamelCase : Dict = True
break
# Try again after removing the last word in the name
_UpperCamelCase : List[str] = "".join(camel_case_split(lowercase_ )[:-1] )
# Let's build that table!
_UpperCamelCase : Any = list(model_name_to_config.keys() )
model_names.sort(key=str.lower )
_UpperCamelCase : List[str] = ["Model", "Tokenizer slow", "Tokenizer fast", "PyTorch support", "TensorFlow support", "Flax Support"]
# We'll need widths to properly display everything in the center (+2 is to leave one extra space on each side).
_UpperCamelCase : Union[str, Any] = [len(lowercase_ ) + 2 for c in columns]
_UpperCamelCase : Any = max([len(lowercase_ ) for name in model_names] ) + 2
# Build the table per se
_UpperCamelCase : Tuple = "|" + "|".join([_center_text(lowercase_ ,lowercase_ ) for c, w in zip(lowercase_ ,lowercase_ )] ) + "|\n"
# Use ":-----:" format to center-aligned table cell texts
table += "|" + "|".join([":" + "-" * (w - 2) + ":" for w in widths] ) + "|\n"
_UpperCamelCase : Union[str, Any] = {True: "✅", False: "❌"}
for name in model_names:
_UpperCamelCase : Optional[int] = model_name_to_prefix[name]
_UpperCamelCase : Tuple = [
name,
check[slow_tokenizers[prefix]],
check[fast_tokenizers[prefix]],
check[pt_models[prefix]],
check[tf_models[prefix]],
check[flax_models[prefix]],
]
table += "|" + "|".join([_center_text(lowercase_ ,lowercase_ ) for l, w in zip(lowercase_ ,lowercase_ )] ) + "|\n"
return table
def lowercase__ ( lowercase_=False ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase : str = _find_text_in_file(
filename=os.path.join(lowercase_ ,"index.md" ) ,start_prompt="<!--This table is updated automatically from the auto modules" ,end_prompt="<!-- End table-->" ,)
_UpperCamelCase : Any = get_model_table_from_auto_modules()
if current_table != new_table:
if overwrite:
with open(os.path.join(lowercase_ ,"index.md" ) ,"w" ,encoding="utf-8" ,newline="\n" ) as f:
f.writelines(lines[:start_index] + [new_table] + lines[end_index:] )
else:
raise ValueError(
"The model table in the `index.md` has not been updated. Run `make fix-copies` to fix this." )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
lowerCamelCase__ = parser.parse_args()
check_model_table(args.fix_and_overwrite)
| 51
| 1
|
"""simple docstring"""
import unittest
from transformers import CamembertTokenizer, CamembertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
lowerCamelCase__ = get_tests_dir("fixtures/test_sentencepiece.model")
lowerCamelCase__ = get_tests_dir("fixtures/test_sentencepiece_bpe.model")
lowerCamelCase__ = "pt" if is_torch_available() else "tf"
@require_sentencepiece
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Optional[int] = CamembertTokenizer
SCREAMING_SNAKE_CASE__ :List[Any] = CamembertTokenizerFast
SCREAMING_SNAKE_CASE__ :Optional[Any] = True
SCREAMING_SNAKE_CASE__ :int = True
def __SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[Any]:
super().setUp()
# We have a SentencePiece fixture for testing
_UpperCamelCase : Optional[int] = CamembertTokenizer(__a )
tokenizer.save_pretrained(self.tmpdirname )
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[Any]:
_UpperCamelCase : Any = "<pad>"
_UpperCamelCase : Optional[int] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__a ) , __a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__a ) , __a )
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Any:
_UpperCamelCase : List[Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<s>NOTUSED" )
self.assertEqual(vocab_keys[1] , "<pad>" )
self.assertEqual(vocab_keys[-1] , "<mask>" )
self.assertEqual(len(__a ) , 1004 )
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> Dict:
self.assertEqual(self.get_tokenizer().vocab_size , 1005 )
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> str:
_UpperCamelCase : Tuple = CamembertTokenizer(__a )
tokenizer.save_pretrained(self.tmpdirname )
_UpperCamelCase : int = CamembertTokenizerFast.from_pretrained(self.tmpdirname )
_UpperCamelCase : str = "I was born in 92000, and this is falsé."
_UpperCamelCase : Tuple = tokenizer.encode(__a )
_UpperCamelCase : Any = rust_tokenizer.encode(__a )
self.assertListEqual(__a , __a )
_UpperCamelCase : Union[str, Any] = tokenizer.encode(__a , add_special_tokens=__a )
_UpperCamelCase : List[str] = rust_tokenizer.encode(__a , add_special_tokens=__a )
self.assertListEqual(__a , __a )
# <unk> tokens are not the same for `rust` than for `slow`.
# Because spm gives back raw token instead of `unk` in EncodeAsPieces
# tokens = tokenizer.tokenize(sequence)
_UpperCamelCase : Tuple = tokenizer.convert_ids_to_tokens(__a )
_UpperCamelCase : Tuple = rust_tokenizer.tokenize(__a )
self.assertListEqual(__a , __a )
def __SCREAMING_SNAKE_CASE ( self : int ) -> List[str]:
if not self.test_rust_tokenizer:
return
_UpperCamelCase : Union[str, Any] = self.get_tokenizer()
_UpperCamelCase : int = self.get_rust_tokenizer()
_UpperCamelCase : Optional[int] = "I was born in 92000, and this is falsé."
_UpperCamelCase : Dict = tokenizer.tokenize(__a )
_UpperCamelCase : Optional[Any] = rust_tokenizer.tokenize(__a )
self.assertListEqual(__a , __a )
_UpperCamelCase : Optional[Any] = tokenizer.encode(__a , add_special_tokens=__a )
_UpperCamelCase : Any = rust_tokenizer.encode(__a , add_special_tokens=__a )
self.assertListEqual(__a , __a )
_UpperCamelCase : Optional[Any] = self.get_rust_tokenizer()
_UpperCamelCase : Any = tokenizer.encode(__a )
_UpperCamelCase : str = rust_tokenizer.encode(__a )
self.assertListEqual(__a , __a )
@slow
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[int]:
# fmt: off
_UpperCamelCase : Union[str, Any] = {"input_ids": [[5, 54, 7196, 297, 30, 23, 776, 18, 11, 3215, 3705, 8252, 22, 3164, 1181, 2116, 29, 16, 813, 25, 791, 3314, 20, 3446, 38, 2_7575, 120, 6, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [5, 468, 17, 11, 9088, 20, 1517, 8, 2_2804, 1_8818, 10, 38, 629, 607, 607, 142, 19, 7196, 867, 56, 1_0326, 24, 2267, 20, 416, 5072, 1_5612, 233, 734, 7, 2399, 27, 16, 3015, 1649, 7, 24, 20, 4338, 2399, 27, 13, 3400, 14, 13, 6189, 8, 930, 9, 6]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# camembert is a french model. So we also use french texts.
_UpperCamelCase : List[Any] = [
"Le transformeur est un modèle d'apprentissage profond introduit en 2017, "
"utilisé principalement dans le domaine du traitement automatique des langues (TAL).",
"À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus "
"pour gérer des données séquentielles, telles que le langage naturel, pour des tâches "
"telles que la traduction et la synthèse de texte.",
]
self.tokenizer_integration_test_util(
expected_encoding=__a , model_name="camembert-base" , revision="3a0641d9a1aeb7e848a74299e7e4c4bca216b4cf" , sequences=__a , )
| 51
|
"""simple docstring"""
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
lowerCamelCase__ = 200
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
lowerCamelCase__ = 50
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
lowerCamelCase__ = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 1000))
def lowercase__ ( lowercase_ ,lowercase_ ) -> tuple[str, float]:
"""simple docstring"""
_UpperCamelCase : str = len([g for position, g in enumerate(lowercase_ ) if g == main_target[position]] )
return (item, float(lowercase_ ))
def lowercase__ ( lowercase_ ,lowercase_ ) -> tuple[str, str]:
"""simple docstring"""
_UpperCamelCase : Tuple = random.randint(0 ,len(lowercase_ ) - 1 )
_UpperCamelCase : Dict = parent_a[:random_slice] + parent_a[random_slice:]
_UpperCamelCase : Tuple = parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def lowercase__ ( lowercase_ ,lowercase_ ) -> str:
"""simple docstring"""
_UpperCamelCase : int = list(lowercase_ )
if random.uniform(0 ,1 ) < MUTATION_PROBABILITY:
_UpperCamelCase : int = random.choice(lowercase_ )
return "".join(lowercase_ )
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,) -> list[str]:
"""simple docstring"""
_UpperCamelCase : Optional[Any] = []
# Generate more children proportionally to the fitness score.
_UpperCamelCase : List[str] = int(parent_a[1] * 100 ) + 1
_UpperCamelCase : Union[str, Any] = 10 if child_n >= 10 else child_n
for _ in range(lowercase_ ):
_UpperCamelCase : Dict = population_score[random.randint(0 ,lowercase_ )][0]
_UpperCamelCase, _UpperCamelCase : Dict = crossover(parent_a[0] ,lowercase_ )
# Append new string to the population list.
pop.append(mutate(lowercase_ ,lowercase_ ) )
pop.append(mutate(lowercase_ ,lowercase_ ) )
return pop
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ = True ) -> tuple[int, int, str]:
"""simple docstring"""
if N_POPULATION < N_SELECTED:
_UpperCamelCase : List[str] = F'''{N_POPULATION} must be bigger than {N_SELECTED}'''
raise ValueError(lowercase_ )
# Verify that the target contains no genes besides the ones inside genes variable.
_UpperCamelCase : int = sorted({c for c in target if c not in genes} )
if not_in_genes_list:
_UpperCamelCase : int = F'''{not_in_genes_list} is not in genes list, evolution cannot converge'''
raise ValueError(lowercase_ )
# Generate random starting population.
_UpperCamelCase : Union[str, Any] = []
for _ in range(lowercase_ ):
population.append("".join([random.choice(lowercase_ ) for i in range(len(lowercase_ ) )] ) )
# Just some logs to know what the algorithms is doing.
_UpperCamelCase, _UpperCamelCase : str = 0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(lowercase_ )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
_UpperCamelCase : int = [evaluate(lowercase_ ,lowercase_ ) for item in population]
# Check if there is a matching evolution.
_UpperCamelCase : Optional[Any] = sorted(lowercase_ ,key=lambda lowercase_ : x[1] ,reverse=lowercase_ )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
F'''\nGeneration: {generation}'''
F'''\nTotal Population:{total_population}'''
F'''\nBest score: {population_score[0][1]}'''
F'''\nBest string: {population_score[0][0]}''' )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
_UpperCamelCase : str = population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(lowercase_ )
# Normalize population score to be between 0 and 1.
_UpperCamelCase : str = [
(item, score / len(lowercase_ )) for item, score in population_score
]
# This is selection
for i in range(lowercase_ ):
population.extend(select(population_score[int(lowercase_ )] ,lowercase_ ,lowercase_ ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(lowercase_ ) > N_POPULATION:
break
if __name__ == "__main__":
lowerCamelCase__ = (
"This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!"
)
lowerCamelCase__ = list(
" ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm"
"nopqrstuvwxyz.,;!?+-*#@^'èéòà€ù=)(&%$£/\\"
)
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = basic(target_str, genes_list)
print(
f"""\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}"""
)
| 51
| 1
|
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase__ = logging.get_logger(__name__)
def lowercase__ ( lowercase_ ,lowercase_=False ) -> Any:
"""simple docstring"""
_UpperCamelCase : Union[str, Any] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'''blocks.{i}.norm1.weight''', F'''vit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((F'''blocks.{i}.norm1.bias''', F'''vit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append((F'''blocks.{i}.attn.proj.weight''', F'''vit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((F'''blocks.{i}.attn.proj.bias''', F'''vit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((F'''blocks.{i}.norm2.weight''', F'''vit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((F'''blocks.{i}.norm2.bias''', F'''vit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((F'''blocks.{i}.mlp.fc1.weight''', F'''vit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((F'''blocks.{i}.mlp.fc1.bias''', F'''vit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((F'''blocks.{i}.mlp.fc2.weight''', F'''vit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((F'''blocks.{i}.mlp.fc2.bias''', F'''vit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
("cls_token", "vit.embeddings.cls_token"),
("patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight"),
("patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias"),
("pos_embed", "vit.embeddings.position_embeddings"),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
_UpperCamelCase : List[str] = [(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("norm.weight", "vit.layernorm.weight"),
("norm.bias", "vit.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
return rename_keys
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_=False ) -> Any:
"""simple docstring"""
for i in range(config.num_hidden_layers ):
if base_model:
_UpperCamelCase : Union[str, Any] = ""
else:
_UpperCamelCase : Union[str, Any] = "vit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_UpperCamelCase : Any = state_dict.pop(F'''blocks.{i}.attn.qkv.weight''' )
_UpperCamelCase : List[str] = state_dict.pop(F'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
_UpperCamelCase : List[Any] = in_proj_weight[
: config.hidden_size, :
]
_UpperCamelCase : Tuple = in_proj_bias[: config.hidden_size]
_UpperCamelCase : Any = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_UpperCamelCase : Optional[int] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_UpperCamelCase : Dict = in_proj_weight[
-config.hidden_size :, :
]
_UpperCamelCase : Any = in_proj_bias[-config.hidden_size :]
def lowercase__ ( lowercase_ ) -> Any:
"""simple docstring"""
_UpperCamelCase : Optional[int] = ["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(lowercase_ ,lowercase_ )
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ) -> List[str]:
"""simple docstring"""
_UpperCamelCase : Dict = dct.pop(lowercase_ )
_UpperCamelCase : List[str] = val
def lowercase__ ( ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase : Union[str, Any] = "http://images.cocodataset.org/val2017/000000039769.jpg"
_UpperCamelCase : Any = Image.open(requests.get(lowercase_ ,stream=lowercase_ ).raw )
return im
@torch.no_grad()
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_=True ) -> List[str]:
"""simple docstring"""
_UpperCamelCase : Union[str, Any] = ViTConfig()
# patch_size
if model_name[-1] == "8":
_UpperCamelCase : List[Any] = 8
# set labels if required
if not base_model:
_UpperCamelCase : Tuple = 1_000
_UpperCamelCase : Union[str, Any] = "huggingface/label-files"
_UpperCamelCase : Optional[Any] = "imagenet-1k-id2label.json"
_UpperCamelCase : int = json.load(open(hf_hub_download(lowercase_ ,lowercase_ ,repo_type="dataset" ) ,"r" ) )
_UpperCamelCase : int = {int(lowercase_ ): v for k, v in idalabel.items()}
_UpperCamelCase : int = idalabel
_UpperCamelCase : List[str] = {v: k for k, v in idalabel.items()}
# size of the architecture
if model_name in ["dino_vits8", "dino_vits16"]:
_UpperCamelCase : Dict = 384
_UpperCamelCase : Any = 1_536
_UpperCamelCase : List[Any] = 12
_UpperCamelCase : Union[str, Any] = 6
# load original model from torch hub
_UpperCamelCase : List[str] = torch.hub.load("facebookresearch/dino:main" ,lowercase_ )
original_model.eval()
# load state_dict of original model, remove and rename some keys
_UpperCamelCase : int = original_model.state_dict()
if base_model:
remove_classification_head_(lowercase_ )
_UpperCamelCase : Optional[Any] = create_rename_keys(lowercase_ ,base_model=lowercase_ )
for src, dest in rename_keys:
rename_key(lowercase_ ,lowercase_ ,lowercase_ )
read_in_q_k_v(lowercase_ ,lowercase_ ,lowercase_ )
# load HuggingFace model
if base_model:
_UpperCamelCase : Dict = ViTModel(lowercase_ ,add_pooling_layer=lowercase_ ).eval()
else:
_UpperCamelCase : List[str] = ViTForImageClassification(lowercase_ ).eval()
model.load_state_dict(lowercase_ )
# Check outputs on an image, prepared by ViTImageProcessor
_UpperCamelCase : Tuple = ViTImageProcessor()
_UpperCamelCase : str = image_processor(images=prepare_img() ,return_tensors="pt" )
_UpperCamelCase : str = encoding["pixel_values"]
_UpperCamelCase : Union[str, Any] = model(lowercase_ )
if base_model:
_UpperCamelCase : Union[str, Any] = original_model(lowercase_ )
assert torch.allclose(lowercase_ ,outputs.last_hidden_state[:, 0, :] ,atol=1e-1 )
else:
_UpperCamelCase : str = original_model(lowercase_ )
assert logits.shape == outputs.logits.shape
assert torch.allclose(lowercase_ ,outputs.logits ,atol=1e-3 )
Path(lowercase_ ).mkdir(exist_ok=lowercase_ )
print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowercase_ )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(lowercase_ )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="dino_vitb16",
type=str,
help="Name of the model trained with DINO you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--base_model",
action="store_true",
help="Whether to only convert the base model (no projection head weights).",
)
parser.set_defaults(base_model=True)
lowerCamelCase__ = parser.parse_args()
convert_vit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.base_model)
| 51
|
"""simple docstring"""
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = ["model.decoder.embed_positions.weights"]
def lowercase__ ( lowercase_ ) -> Optional[Any]:
"""simple docstring"""
if "emb" in name:
_UpperCamelCase : List[str] = name.replace("emb" ,"model.decoder.embed_tokens" )
if "transformer" in name:
_UpperCamelCase : Optional[int] = name.replace("transformer" ,"model.decoder" )
if "cross_attention" in name:
_UpperCamelCase : Optional[int] = name.replace("cross_attention" ,"encoder_attn" )
if "linear1" in name:
_UpperCamelCase : Optional[Any] = name.replace("linear1" ,"fc1" )
if "linear2" in name:
_UpperCamelCase : Union[str, Any] = name.replace("linear2" ,"fc2" )
if "norm1" in name:
_UpperCamelCase : Optional[Any] = name.replace("norm1" ,"self_attn_layer_norm" )
if "norm_cross" in name:
_UpperCamelCase : Dict = name.replace("norm_cross" ,"encoder_attn_layer_norm" )
if "norm2" in name:
_UpperCamelCase : Union[str, Any] = name.replace("norm2" ,"final_layer_norm" )
if "out_norm" in name:
_UpperCamelCase : Union[str, Any] = name.replace("out_norm" ,"model.decoder.layer_norm" )
if "linears" in name:
_UpperCamelCase : List[str] = name.replace("linears" ,"lm_heads" )
if "condition_provider.conditioners.description.output_proj" in name:
_UpperCamelCase : Any = name.replace("condition_provider.conditioners.description.output_proj" ,"enc_to_dec_proj" )
return name
def lowercase__ ( lowercase_ ,lowercase_ ) -> Tuple[Dict, Dict]:
"""simple docstring"""
_UpperCamelCase : str = list(state_dict.keys() )
_UpperCamelCase : Optional[Any] = {}
for key in keys:
_UpperCamelCase : Optional[int] = state_dict.pop(lowercase_ )
_UpperCamelCase : List[Any] = rename_keys(lowercase_ )
if "in_proj_weight" in key:
# split fused qkv proj
_UpperCamelCase : Tuple = val[:hidden_size, :]
_UpperCamelCase : Optional[Any] = val[hidden_size : 2 * hidden_size, :]
_UpperCamelCase : Optional[Any] = val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
_UpperCamelCase : Optional[Any] = val
else:
_UpperCamelCase : List[str] = val
return state_dict, enc_dec_proj_state_dict
def lowercase__ ( lowercase_ ) -> MusicgenDecoderConfig:
"""simple docstring"""
if checkpoint == "small":
# default config values
_UpperCamelCase : List[Any] = 1_024
_UpperCamelCase : List[str] = 24
_UpperCamelCase : Any = 16
elif checkpoint == "medium":
_UpperCamelCase : Tuple = 1_536
_UpperCamelCase : Dict = 48
_UpperCamelCase : Tuple = 24
elif checkpoint == "large":
_UpperCamelCase : int = 2_048
_UpperCamelCase : Optional[int] = 48
_UpperCamelCase : Dict = 32
else:
raise ValueError(F'''Checkpoint should be one of `[\'small\', \'medium\', \'large\']`, got {checkpoint}.''' )
_UpperCamelCase : str = MusicgenDecoderConfig(
hidden_size=lowercase_ ,ffn_dim=hidden_size * 4 ,num_hidden_layers=lowercase_ ,num_attention_heads=lowercase_ ,)
return config
@torch.no_grad()
def lowercase__ ( lowercase_ ,lowercase_=None ,lowercase_=None ,lowercase_="cpu" ) -> List[str]:
"""simple docstring"""
_UpperCamelCase : str = MusicGen.get_pretrained(lowercase_ ,device=lowercase_ )
_UpperCamelCase : Union[str, Any] = decoder_config_from_checkpoint(lowercase_ )
_UpperCamelCase : Optional[int] = fairseq_model.lm.state_dict()
_UpperCamelCase, _UpperCamelCase : Optional[Any] = rename_state_dict(
lowercase_ ,hidden_size=decoder_config.hidden_size )
_UpperCamelCase : Tuple = TaEncoderModel.from_pretrained("t5-base" )
_UpperCamelCase : Union[str, Any] = EncodecModel.from_pretrained("facebook/encodec_32khz" )
_UpperCamelCase : str = MusicgenForCausalLM(lowercase_ ).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
_UpperCamelCase, _UpperCamelCase : str = decoder.load_state_dict(lowercase_ ,strict=lowercase_ )
for key in missing_keys.copy():
if key.startswith(("text_encoder", "audio_encoder") ) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(lowercase_ )
if len(lowercase_ ) > 0:
raise ValueError(F'''Missing key(s) in state_dict: {missing_keys}''' )
if len(lowercase_ ) > 0:
raise ValueError(F'''Unexpected key(s) in state_dict: {unexpected_keys}''' )
# init the composite model
_UpperCamelCase : str = MusicgenForConditionalGeneration(text_encoder=lowercase_ ,audio_encoder=lowercase_ ,decoder=lowercase_ )
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(lowercase_ )
# check we can do a forward pass
_UpperCamelCase : List[str] = torch.arange(0 ,8 ,dtype=torch.long ).reshape(2 ,-1 )
_UpperCamelCase : Dict = input_ids.reshape(2 * 4 ,-1 )
with torch.no_grad():
_UpperCamelCase : Tuple = model(input_ids=lowercase_ ,decoder_input_ids=lowercase_ ).logits
if logits.shape != (8, 1, 2_048):
raise ValueError("Incorrect shape for logits" )
# now construct the processor
_UpperCamelCase : int = AutoTokenizer.from_pretrained("t5-base" )
_UpperCamelCase : str = AutoFeatureExtractor.from_pretrained("facebook/encodec_32khz" ,padding_side="left" )
_UpperCamelCase : Optional[int] = MusicgenProcessor(feature_extractor=lowercase_ ,tokenizer=lowercase_ )
# set the appropriate bos/pad token ids
_UpperCamelCase : str = 2_048
_UpperCamelCase : str = 2_048
# set other default generation config params
_UpperCamelCase : Optional[Any] = int(30 * audio_encoder.config.frame_rate )
_UpperCamelCase : List[str] = True
_UpperCamelCase : int = 3.0
if pytorch_dump_folder is not None:
Path(lowercase_ ).mkdir(exist_ok=lowercase_ )
logger.info(F'''Saving model {checkpoint} to {pytorch_dump_folder}''' )
model.save_pretrained(lowercase_ )
processor.save_pretrained(lowercase_ )
if repo_id:
logger.info(F'''Pushing model {checkpoint} to {repo_id}''' )
model.push_to_hub(lowercase_ )
processor.push_to_hub(lowercase_ )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint",
default="small",
type=str,
help="Checkpoint size of the MusicGen model you'd like to convert. Can be one of: `['small', 'medium', 'large']`.",
)
parser.add_argument(
"--pytorch_dump_folder",
required=True,
default=None,
type=str,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
parser.add_argument(
"--device", default="cpu", type=str, help="Torch device to run the conversion, either cpu or cuda."
)
lowerCamelCase__ = parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
| 51
| 1
|
"""simple docstring"""
import tempfile
import unittest
from transformers import TaConfig, is_torch_available
from transformers.testing_utils import (
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : str , __a : Dict , __a : Union[str, Any]=99 , __a : Union[str, Any]=13 , __a : Tuple=7 , __a : List[Any]=9 , __a : Any=True , __a : List[Any]=True , __a : Optional[int]=False , __a : List[Any]=32 , __a : int=5 , __a : Dict=4 , __a : Any=37 , __a : int=8 , __a : List[str]=0.1 , __a : Optional[Any]=0.0_02 , __a : Optional[int]=1 , __a : List[Any]=0 , __a : int=0 , __a : Optional[int]=None , __a : int=None , ) -> Optional[Any]:
_UpperCamelCase : int = parent
_UpperCamelCase : str = batch_size
_UpperCamelCase : Any = encoder_seq_length
_UpperCamelCase : Union[str, Any] = decoder_seq_length
# For common tests
_UpperCamelCase : List[Any] = self.decoder_seq_length
_UpperCamelCase : Dict = is_training
_UpperCamelCase : Optional[Any] = use_attention_mask
_UpperCamelCase : Tuple = use_labels
_UpperCamelCase : Union[str, Any] = vocab_size
_UpperCamelCase : List[str] = hidden_size
_UpperCamelCase : Union[str, Any] = num_hidden_layers
_UpperCamelCase : str = num_attention_heads
_UpperCamelCase : Optional[int] = d_ff
_UpperCamelCase : Any = relative_attention_num_buckets
_UpperCamelCase : Union[str, Any] = dropout_rate
_UpperCamelCase : Dict = initializer_factor
_UpperCamelCase : Optional[Any] = eos_token_id
_UpperCamelCase : Optional[int] = pad_token_id
_UpperCamelCase : Union[str, Any] = decoder_start_token_id
_UpperCamelCase : Optional[int] = None
_UpperCamelCase : int = decoder_layers
def __SCREAMING_SNAKE_CASE ( self : str ) -> Optional[Any]:
return TaConfig.from_pretrained("google/umt5-base" )
def __SCREAMING_SNAKE_CASE ( self : str , __a : Optional[int] , __a : str , __a : Any , __a : Dict=None , __a : Optional[int]=None , __a : Tuple=None , __a : Optional[int]=None , __a : Tuple=None , ) -> str:
if attention_mask is None:
_UpperCamelCase : List[str] = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
_UpperCamelCase : List[str] = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
_UpperCamelCase : Optional[Any] = torch.ones(config.num_hidden_layers , config.num_attention_heads , device=__a )
if decoder_head_mask is None:
_UpperCamelCase : str = torch.ones(config.num_decoder_layers , config.num_attention_heads , device=__a )
if cross_attn_head_mask is None:
_UpperCamelCase : Union[str, Any] = torch.ones(
config.num_decoder_layers , config.num_attention_heads , device=__a )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
def __SCREAMING_SNAKE_CASE ( self : str ) -> Tuple:
_UpperCamelCase : str = ids_tensor([self.batch_size, self.encoder_seq_length] , self.vocab_size )
_UpperCamelCase : Optional[Any] = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for NllbMoe the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
_UpperCamelCase : int = input_ids.clamp(self.pad_token_id + 1 )
_UpperCamelCase : str = decoder_input_ids.clamp(self.pad_token_id + 1 )
_UpperCamelCase : Optional[Any] = self.get_config()
_UpperCamelCase : Dict = config.num_attention_heads
_UpperCamelCase : Optional[Any] = self.prepare_inputs_dict(__a , __a , __a )
return config, input_dict
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Tuple:
_UpperCamelCase, _UpperCamelCase : List[str] = self.prepare_config_and_inputs()
return config, inputs_dict
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Dict:
return TaConfig(
vocab_size=166 , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[int]:
return TaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def __SCREAMING_SNAKE_CASE ( self : Dict , __a : str , __a : Optional[Any] , __a : List[str] , __a : List[str] , __a : List[str] , __a : List[Any] , ) -> Union[str, Any]:
_UpperCamelCase : Optional[int] = UMTaModel(config=__a )
model.to(__a )
model.eval()
_UpperCamelCase : Dict = model(
input_ids=__a , decoder_input_ids=__a , attention_mask=__a , decoder_attention_mask=__a , )
_UpperCamelCase : Dict = model(input_ids=__a , decoder_input_ids=__a )
_UpperCamelCase : List[str] = result.last_hidden_state
_UpperCamelCase : str = result.past_key_values
_UpperCamelCase : List[Any] = result.encoder_last_hidden_state
self.parent.assertEqual(encoder_output.size() , (self.batch_size, self.encoder_seq_length, self.hidden_size) )
self.parent.assertEqual(decoder_output.size() , (self.batch_size, self.decoder_seq_length, self.hidden_size) )
# There should be `num_layers` key value embeddings stored in decoder_past
self.parent.assertEqual(len(__a ) , config.num_layers )
# There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple
self.parent.assertEqual(len(decoder_past[0] ) , 4 )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __a : Union[str, Any] , __a : List[Any] , __a : Any , __a : Tuple , __a : Union[str, Any] , __a : Optional[int] , ) -> Union[str, Any]:
_UpperCamelCase : Optional[Any] = UMTaModel(config=__a ).get_decoder().to(__a ).eval()
# first forward pass
_UpperCamelCase : Dict = model(__a , use_cache=__a )
_UpperCamelCase : Dict = model(__a )
_UpperCamelCase : str = model(__a , use_cache=__a )
self.parent.assertTrue(len(__a ) == len(__a ) )
self.parent.assertTrue(len(__a ) == len(__a ) + 1 )
_UpperCamelCase, _UpperCamelCase : List[str] = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_UpperCamelCase : List[str] = ids_tensor((self.batch_size, 1) , config.vocab_size )
# append to next input_ids and
_UpperCamelCase : str = torch.cat([input_ids, next_tokens] , dim=-1 )
_UpperCamelCase : Optional[Any] = model(__a )["last_hidden_state"]
_UpperCamelCase : Union[str, Any] = model(__a , past_key_values=__a )["last_hidden_state"]
# select random slice
_UpperCamelCase : Any = ids_tensor((1,) , output_from_past.shape[-1] ).item()
_UpperCamelCase : Union[str, Any] = output_from_no_past[:, -1, random_slice_idx].detach()
_UpperCamelCase : int = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__a , __a , atol=1e-3 ) )
def __SCREAMING_SNAKE_CASE ( self : Tuple , __a : int , __a : Union[str, Any] , ) -> Dict:
_UpperCamelCase : Tuple = UMTaModel(config=__a ).to(__a ).half().eval()
_UpperCamelCase : List[Any] = model(**__a )["last_hidden_state"]
self.parent.assertFalse(torch.isnan(__a ).any().item() )
@require_torch
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Optional[int] = (
(UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else ()
)
SCREAMING_SNAKE_CASE__ :Dict = (UMTaForConditionalGeneration,) if is_torch_available() else ()
SCREAMING_SNAKE_CASE__ :int = (
{
"conversational": UMTaForConditionalGeneration,
"feature-extraction": UMTaModel,
"summarization": UMTaForConditionalGeneration,
"text2text-generation": UMTaForConditionalGeneration,
"translation": UMTaForConditionalGeneration,
"question-answering": UMTaForQuestionAnswering,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE__ :Dict = True
SCREAMING_SNAKE_CASE__ :Dict = False
SCREAMING_SNAKE_CASE__ :Optional[int] = False
SCREAMING_SNAKE_CASE__ :Union[str, Any] = True
SCREAMING_SNAKE_CASE__ :List[str] = True
# The small UMT5 model needs higher percentages for CPU/MP tests
SCREAMING_SNAKE_CASE__ :Union[str, Any] = [0.8, 0.9]
def __SCREAMING_SNAKE_CASE ( self : int ) -> Optional[int]:
_UpperCamelCase : Optional[int] = UMTaModelTester(self )
@unittest.skip("Test has a segmentation fault on torch 1.8.0" )
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> Union[str, Any]:
_UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
_UpperCamelCase : int = UMTaModel(config_and_inputs[0] ).to(__a )
with tempfile.TemporaryDirectory() as tmpdirname:
torch.onnx.export(
__a , (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) , F'''{tmpdirname}/t5_test.onnx''' , export_params=__a , opset_version=9 , input_names=["input_ids", "decoder_input_ids"] , )
@unittest.skipIf(torch_device == "cpu" , "Cant do half precision" )
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[str]:
_UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_fpaa_forward(*__a )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple:
_UpperCamelCase : int = ["encoder_attentions", "decoder_attentions", "cross_attentions"]
_UpperCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
_UpperCamelCase : List[Any] = config_and_inputs[0]
_UpperCamelCase : Optional[Any] = UMTaForConditionalGeneration(__a ).eval()
model.to(__a )
_UpperCamelCase : Dict = {
"head_mask": torch.zeros(config.num_layers , config.num_heads , device=__a ),
"decoder_head_mask": torch.zeros(config.num_decoder_layers , config.num_heads , device=__a ),
"cross_attn_head_mask": torch.zeros(config.num_decoder_layers , config.num_heads , device=__a ),
}
for attn_name, (name, mask) in zip(__a , head_masking.items() ):
_UpperCamelCase : List[Any] = {name: mask}
# Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified
if name == "head_mask":
_UpperCamelCase : int = torch.ones(
config.num_decoder_layers , config.num_heads , device=__a )
_UpperCamelCase : Any = model.generate(
config_and_inputs[1]["input_ids"] , num_beams=1 , max_length=3 , output_attentions=__a , return_dict_in_generate=__a , **__a , )
# We check the state of decoder_attentions and cross_attentions just from the last step
_UpperCamelCase : Dict = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1]
self.assertEqual(sum([w.sum().item() for w in attn_weights] ) , 0.0 )
@unittest.skip("Does not work on the tiny model as we keep hitting edge cases." )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Tuple:
pass
@require_torch
@require_sentencepiece
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@slow
@unittest.skip(
"Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged" )
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[Any]:
_UpperCamelCase : Dict = UMTaForConditionalGeneration.from_pretrained("google/umt5-small" , return_dict=__a ).to(__a )
_UpperCamelCase : List[str] = AutoTokenizer.from_pretrained("google/umt5-small" , use_fast=__a , legacy=__a )
_UpperCamelCase : List[Any] = [
"Bonjour monsieur <extra_id_0> bien <extra_id_1>.",
"No se como puedo <extra_id_0>.",
"This is the reason why we <extra_id_0> them.",
"The <extra_id_0> walks in <extra_id_1>, seats",
"A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.",
]
_UpperCamelCase : Tuple = tokenizer(__a , return_tensors="pt" , padding=__a ).input_ids
# fmt: off
_UpperCamelCase : Tuple = torch.tensor(
[
[ 3_8530, 21_0703, 25_6299, 1410, 25_6298, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 826, 321, 671, 2_5922, 25_6299, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 1460, 339, 312, 1_9014, 1_0620, 758, 25_6299, 2355,274, 1, 0, 0, 0, 0, 0, 0,0, 0],
[ 517, 25_6299, 1_4869, 281, 301, 25_6298, 275, 11_9983,1, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 320, 25_6299, 1_4869, 281, 2234, 289, 2275, 333,6_1391, 289, 25_6298, 543, 25_6297, 16_8714, 329, 25_6296,274, 1],
] )
# fmt: on
torch.testing.assert_allclose(__a , __a )
_UpperCamelCase : Tuple = model.generate(input_ids.to(__a ) )
_UpperCamelCase : Union[str, Any] = [
"<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>",
"<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>",
"<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>",
"<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>",
"<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>",
]
_UpperCamelCase : List[str] = tokenizer.batch_decode(__a )
self.assertEqual(__a , __a )
| 51
|
"""simple docstring"""
from datetime import datetime
import requests
from bsa import BeautifulSoup
if __name__ == "__main__":
lowerCamelCase__ = input("Enter image url: ").strip()
print(f"""Downloading image from {url} ...""")
lowerCamelCase__ = BeautifulSoup(requests.get(url).content, "html.parser")
# The image URL is in the content field of the first meta tag with property og:image
lowerCamelCase__ = soup.find("meta", {"property": "og:image"})["content"]
lowerCamelCase__ = requests.get(image_url).content
lowerCamelCase__ = f"""{datetime.now():%Y-%m-%d_%H:%M:%S}.jpg"""
with open(file_name, "wb") as fp:
fp.write(image_data)
print(f"""Done. Image saved to disk as {file_name}.""")
| 51
| 1
|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import (
TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaubertConfig,
TFFlaubertForMultipleChoice,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForSequenceClassification,
TFFlaubertForTokenClassification,
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
)
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : Optional[int] , __a : Any , ) -> Tuple:
_UpperCamelCase : Dict = parent
_UpperCamelCase : Dict = 13
_UpperCamelCase : Dict = 7
_UpperCamelCase : List[Any] = True
_UpperCamelCase : List[Any] = True
_UpperCamelCase : str = True
_UpperCamelCase : Optional[Any] = True
_UpperCamelCase : Optional[int] = True
_UpperCamelCase : Union[str, Any] = False
_UpperCamelCase : List[str] = False
_UpperCamelCase : List[str] = False
_UpperCamelCase : List[Any] = 2
_UpperCamelCase : int = 99
_UpperCamelCase : str = 0
_UpperCamelCase : Tuple = 32
_UpperCamelCase : Tuple = 2
_UpperCamelCase : str = 4
_UpperCamelCase : int = 0.1
_UpperCamelCase : List[Any] = 0.1
_UpperCamelCase : Any = 512
_UpperCamelCase : Dict = 16
_UpperCamelCase : Dict = 2
_UpperCamelCase : Tuple = 0.02
_UpperCamelCase : Any = 3
_UpperCamelCase : Optional[int] = 4
_UpperCamelCase : Tuple = "last"
_UpperCamelCase : List[str] = True
_UpperCamelCase : Union[str, Any] = None
_UpperCamelCase : Optional[int] = 0
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> str:
_UpperCamelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCamelCase : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] , dtype=tf.floataa )
_UpperCamelCase : Any = None
if self.use_input_lengths:
_UpperCamelCase : Any = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
_UpperCamelCase : str = None
if self.use_token_type_ids:
_UpperCamelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
_UpperCamelCase : Dict = None
_UpperCamelCase : Union[str, Any] = None
_UpperCamelCase : int = None
if self.use_labels:
_UpperCamelCase : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCamelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_UpperCamelCase : Dict = ids_tensor([self.batch_size] , 2 , dtype=tf.floataa )
_UpperCamelCase : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices )
_UpperCamelCase : Optional[int] = FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , bos_token_id=self.bos_token_id , )
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def __SCREAMING_SNAKE_CASE ( self : Tuple , __a : Dict , __a : int , __a : Optional[Any] , __a : Any , __a : Optional[int] , __a : Tuple , __a : Any , __a : Optional[Any] , __a : Union[str, Any] , ) -> Any:
_UpperCamelCase : List[Any] = TFFlaubertModel(config=__a )
_UpperCamelCase : Any = {"input_ids": input_ids, "lengths": input_lengths, "langs": token_type_ids}
_UpperCamelCase : List[Any] = model(__a )
_UpperCamelCase : Tuple = [input_ids, input_mask]
_UpperCamelCase : Dict = model(__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __a : Any , __a : List[str] , __a : Optional[Any] , __a : Dict , __a : Optional[int] , __a : List[Any] , __a : Optional[Any] , __a : Any , __a : Any , ) -> Dict:
_UpperCamelCase : Optional[int] = TFFlaubertWithLMHeadModel(__a )
_UpperCamelCase : List[Any] = {"input_ids": input_ids, "lengths": input_lengths, "langs": token_type_ids}
_UpperCamelCase : List[str] = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __SCREAMING_SNAKE_CASE ( self : Tuple , __a : Any , __a : Tuple , __a : List[str] , __a : Tuple , __a : List[str] , __a : Optional[Any] , __a : List[Any] , __a : List[str] , __a : List[str] , ) -> List[str]:
_UpperCamelCase : str = TFFlaubertForQuestionAnsweringSimple(__a )
_UpperCamelCase : Optional[Any] = {"input_ids": input_ids, "lengths": input_lengths}
_UpperCamelCase : Union[str, Any] = model(__a )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __SCREAMING_SNAKE_CASE ( self : Tuple , __a : List[Any] , __a : Optional[Any] , __a : Union[str, Any] , __a : Union[str, Any] , __a : List[Any] , __a : Dict , __a : Union[str, Any] , __a : int , __a : Tuple , ) -> Optional[Any]:
_UpperCamelCase : Optional[int] = TFFlaubertForSequenceClassification(__a )
_UpperCamelCase : int = {"input_ids": input_ids, "lengths": input_lengths}
_UpperCamelCase : Optional[int] = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __SCREAMING_SNAKE_CASE ( self : Tuple , __a : Union[str, Any] , __a : str , __a : List[Any] , __a : int , __a : Any , __a : Union[str, Any] , __a : List[Any] , __a : Union[str, Any] , __a : str , ) -> Any:
_UpperCamelCase : Optional[Any] = self.num_labels
_UpperCamelCase : List[str] = TFFlaubertForTokenClassification(config=__a )
_UpperCamelCase : Any = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_UpperCamelCase : int = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __a : Tuple , __a : Tuple , __a : Union[str, Any] , __a : Optional[int] , __a : Tuple , __a : Any , __a : int , __a : List[Any] , __a : List[str] , ) -> str:
_UpperCamelCase : str = self.num_choices
_UpperCamelCase : Union[str, Any] = TFFlaubertForMultipleChoice(config=__a )
_UpperCamelCase : Optional[Any] = tf.tile(tf.expand_dims(__a , 1 ) , (1, self.num_choices, 1) )
_UpperCamelCase : int = tf.tile(tf.expand_dims(__a , 1 ) , (1, self.num_choices, 1) )
_UpperCamelCase : List[Any] = tf.tile(tf.expand_dims(__a , 1 ) , (1, self.num_choices, 1) )
_UpperCamelCase : Optional[int] = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
_UpperCamelCase : Optional[Any] = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Dict:
_UpperCamelCase : str = self.prepare_config_and_inputs()
(
(
_UpperCamelCase
), (
_UpperCamelCase
), (
_UpperCamelCase
), (
_UpperCamelCase
), (
_UpperCamelCase
), (
_UpperCamelCase
), (
_UpperCamelCase
), (
_UpperCamelCase
), (
_UpperCamelCase
),
) : Tuple = config_and_inputs
_UpperCamelCase : Any = {
"input_ids": input_ids,
"token_type_ids": token_type_ids,
"langs": token_type_ids,
"lengths": input_lengths,
}
return config, inputs_dict
@require_tf
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :List[Any] = (
(
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
TFFlaubertForSequenceClassification,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForTokenClassification,
TFFlaubertForMultipleChoice,
)
if is_tf_available()
else ()
)
SCREAMING_SNAKE_CASE__ :Any = (
(TFFlaubertWithLMHeadModel,) if is_tf_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
SCREAMING_SNAKE_CASE__ :List[Any] = (
{
"feature-extraction": TFFlaubertModel,
"fill-mask": TFFlaubertWithLMHeadModel,
"question-answering": TFFlaubertForQuestionAnsweringSimple,
"text-classification": TFFlaubertForSequenceClassification,
"token-classification": TFFlaubertForTokenClassification,
"zero-shot": TFFlaubertForSequenceClassification,
}
if is_tf_available()
else {}
)
SCREAMING_SNAKE_CASE__ :List[str] = False
SCREAMING_SNAKE_CASE__ :Union[str, Any] = False
def __SCREAMING_SNAKE_CASE ( self : List[str] , __a : Union[str, Any] , __a : Dict , __a : str , __a : List[Any] , __a : Dict ) -> Optional[int]:
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("Fast" )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[str]:
_UpperCamelCase : Union[str, Any] = TFFlaubertModelTester(self )
_UpperCamelCase : Tuple = ConfigTester(self , config_class=__a , emb_dim=37 )
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[str]:
self.config_tester.run_common_tests()
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Tuple:
_UpperCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*__a )
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Dict:
_UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*__a )
def __SCREAMING_SNAKE_CASE ( self : int ) -> Any:
_UpperCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*__a )
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[Any]:
_UpperCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*__a )
def __SCREAMING_SNAKE_CASE ( self : int ) -> Tuple:
_UpperCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_token_classification(*__a )
def __SCREAMING_SNAKE_CASE ( self : str ) -> Tuple:
_UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_multiple_choice(*__a )
@slow
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[Any]:
for model_name in TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase : Union[str, Any] = TFFlaubertModel.from_pretrained(__a )
self.assertIsNotNone(__a )
@require_tf
@require_sentencepiece
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@slow
def __SCREAMING_SNAKE_CASE ( self : Any ) -> Tuple:
_UpperCamelCase : Union[str, Any] = TFFlaubertModel.from_pretrained("jplu/tf-flaubert-small-cased" )
_UpperCamelCase : List[Any] = tf.convert_to_tensor(
[[0, 158, 735, 2592, 1424, 6727, 82, 1]] , dtype=tf.intaa , ) # "J'aime flaubert !"
_UpperCamelCase : Union[str, Any] = model(__a )[0]
_UpperCamelCase : int = tf.TensorShape((1, 8, 512) )
self.assertEqual(output.shape , __a )
# compare the actual values for a slice.
_UpperCamelCase : Tuple = tf.convert_to_tensor(
[
[
[-1.8_76_87_73, -1.56_65_55, 0.27_07_24_18],
[-1.6_92_00_38, -0.5_87_35_05, 1.9_32_95_99],
[-2.9_56_39_85, -1.6_99_38_35, 1.7_97_20_52],
]
] , dtype=tf.floataa , )
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 51
|
"""simple docstring"""
import importlib
import os
import sys
# This is required to make the module import works (when the python process is running from the root of the repo)
sys.path.append(".")
def lowercase__ ( lowercase_ ) -> int:
"""simple docstring"""
_UpperCamelCase : Any = test_file.split(os.path.sep )
if components[0:2] != ["tests", "models"]:
raise ValueError(
"`test_file` should start with `tests/models/` (with `/` being the OS specific path separator). Got "
F'''{test_file} instead.''' )
_UpperCamelCase : str = components[-1]
if not test_fn.endswith("py" ):
raise ValueError(F'''`test_file` should be a python file. Got {test_fn} instead.''' )
if not test_fn.startswith("test_modeling_" ):
raise ValueError(
F'''`test_file` should point to a file name of the form `test_modeling_*.py`. Got {test_fn} instead.''' )
_UpperCamelCase : Dict = components[:-1] + [test_fn.replace(".py" ,"" )]
_UpperCamelCase : List[str] = ".".join(lowercase_ )
return test_module_path
def lowercase__ ( lowercase_ ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase : Optional[Any] = get_module_path(lowercase_ )
_UpperCamelCase : str = importlib.import_module(lowercase_ )
return test_module
def lowercase__ ( lowercase_ ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase : Union[str, Any] = []
_UpperCamelCase : List[Any] = get_test_module(lowercase_ )
for attr in dir(lowercase_ ):
if attr.endswith("ModelTester" ):
tester_classes.append(getattr(lowercase_ ,lowercase_ ) )
# sort with class names
return sorted(lowercase_ ,key=lambda lowercase_ : x.__name__ )
def lowercase__ ( lowercase_ ) -> Tuple:
"""simple docstring"""
_UpperCamelCase : Optional[Any] = []
_UpperCamelCase : Any = get_test_module(lowercase_ )
for attr in dir(lowercase_ ):
_UpperCamelCase : int = getattr(lowercase_ ,lowercase_ )
# (TF/Flax)ModelTesterMixin is also an attribute in specific model test module. Let's exclude them by checking
# `all_model_classes` is not empty (which also excludes other special classes).
_UpperCamelCase : Optional[Any] = getattr(lowercase_ ,"all_model_classes" ,[] )
if len(lowercase_ ) > 0:
test_classes.append(lowercase_ )
# sort with class names
return sorted(lowercase_ ,key=lambda lowercase_ : x.__name__ )
def lowercase__ ( lowercase_ ) -> Any:
"""simple docstring"""
_UpperCamelCase : Dict = get_test_classes(lowercase_ )
_UpperCamelCase : int = set()
for test_class in test_classes:
model_classes.update(test_class.all_model_classes )
# sort with class names
return sorted(lowercase_ ,key=lambda lowercase_ : x.__name__ )
def lowercase__ ( lowercase_ ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase : List[str] = test_class()
if hasattr(lowercase_ ,"setUp" ):
test.setUp()
_UpperCamelCase : Tuple = None
if hasattr(lowercase_ ,"model_tester" ):
# `(TF/Flax)ModelTesterMixin` has this attribute default to `None`. Let's skip this case.
if test.model_tester is not None:
_UpperCamelCase : Tuple = test.model_tester.__class__
return model_tester
def lowercase__ ( lowercase_ ,lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase : str = get_test_classes(lowercase_ )
_UpperCamelCase : Dict = []
for test_class in test_classes:
if model_class in test_class.all_model_classes:
target_test_classes.append(lowercase_ )
# sort with class names
return sorted(lowercase_ ,key=lambda lowercase_ : x.__name__ )
def lowercase__ ( lowercase_ ,lowercase_ ) -> Dict:
"""simple docstring"""
_UpperCamelCase : Any = get_test_classes_for_model(lowercase_ ,lowercase_ )
_UpperCamelCase : List[Any] = []
for test_class in test_classes:
_UpperCamelCase : List[Any] = get_model_tester_from_test_class(lowercase_ )
if tester_class is not None:
tester_classes.append(lowercase_ )
# sort with class names
return sorted(lowercase_ ,key=lambda lowercase_ : x.__name__ )
def lowercase__ ( lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase : Any = get_test_classes(lowercase_ )
_UpperCamelCase : Tuple = {test_class: get_model_tester_from_test_class(lowercase_ ) for test_class in test_classes}
return test_tester_mapping
def lowercase__ ( lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase : List[Any] = get_model_classes(lowercase_ )
_UpperCamelCase : Optional[int] = {
model_class: get_test_classes_for_model(lowercase_ ,lowercase_ ) for model_class in model_classes
}
return model_test_mapping
def lowercase__ ( lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase : Optional[Any] = get_model_classes(lowercase_ )
_UpperCamelCase : Tuple = {
model_class: get_tester_classes_for_model(lowercase_ ,lowercase_ ) for model_class in model_classes
}
return model_to_tester_mapping
def lowercase__ ( lowercase_ ) -> Optional[int]:
"""simple docstring"""
if isinstance(lowercase_ ,lowercase_ ):
return o
elif isinstance(lowercase_ ,lowercase_ ):
return o.__name__
elif isinstance(lowercase_ ,(list, tuple) ):
return [to_json(lowercase_ ) for x in o]
elif isinstance(lowercase_ ,lowercase_ ):
return {to_json(lowercase_ ): to_json(lowercase_ ) for k, v in o.items()}
else:
return o
| 51
| 1
|
"""simple docstring"""
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Optional[int] = MgpstrTokenizer
SCREAMING_SNAKE_CASE__ :Tuple = False
SCREAMING_SNAKE_CASE__ :Tuple = {}
SCREAMING_SNAKE_CASE__ :Optional[int] = False
def __SCREAMING_SNAKE_CASE ( self : Any ) -> Tuple:
super().setUp()
# fmt: off
_UpperCamelCase : Tuple = ["[GO]", "[s]", "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z"]
# fmt: on
_UpperCamelCase : List[str] = dict(zip(__a , range(len(__a ) ) ) )
_UpperCamelCase : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(__a ) + "\n" )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] , **__a : Any ) -> Optional[int]:
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **__a )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __a : Optional[int] ) -> Tuple:
_UpperCamelCase : Tuple = "tester"
_UpperCamelCase : Optional[int] = "tester"
return input_text, output_text
@unittest.skip("MGP-STR always lower cases letters." )
def __SCREAMING_SNAKE_CASE ( self : str ) -> Any:
pass
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[Any]:
_UpperCamelCase : Optional[int] = self.get_tokenizers(do_lower_case=__a )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
_UpperCamelCase : Tuple = "[SPECIAL_TOKEN]"
tokenizer.add_special_tokens({"cls_token": special_token} )
_UpperCamelCase : List[str] = tokenizer.encode([special_token] , add_special_tokens=__a )
self.assertEqual(len(__a ) , 1 )
_UpperCamelCase : str = tokenizer.decode(__a , skip_special_tokens=__a )
self.assertTrue(special_token not in decoded )
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Dict:
_UpperCamelCase : int = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
_UpperCamelCase, _UpperCamelCase : Dict = self.get_input_output_texts(__a )
_UpperCamelCase : Tuple = tokenizer.tokenize(__a )
_UpperCamelCase : Dict = tokenizer.convert_tokens_to_ids(__a )
_UpperCamelCase : str = tokenizer.encode(__a , add_special_tokens=__a )
self.assertListEqual(__a , __a )
_UpperCamelCase : Dict = tokenizer.convert_ids_to_tokens(__a )
self.assertNotEqual(len(__a ) , 0 )
_UpperCamelCase : Dict = tokenizer.decode(__a )
self.assertIsInstance(__a , __a )
self.assertEqual(text_a.replace(" " , "" ) , __a )
@unittest.skip("MGP-STR tokenizer only handles one sequence." )
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> int:
pass
@unittest.skip("inputs cannot be pretokenized in MgpstrTokenizer" )
def __SCREAMING_SNAKE_CASE ( self : int ) -> List[str]:
pass
| 51
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCamelCase__ = {
"configuration_mask2former": [
"MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"Mask2FormerConfig",
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["Mask2FormerImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"Mask2FormerForUniversalSegmentation",
"Mask2FormerModel",
"Mask2FormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_maskaformer import MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskaFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_maskaformer import MaskaFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskaformer import (
MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskaFormerForUniversalSegmentation,
MaskaFormerModel,
MaskaFormerPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 51
| 1
|
"""simple docstring"""
from __future__ import annotations
import math
lowerCamelCase__ = "2020.9.26"
lowerCamelCase__ = "xcodz-dot, cclaus, dhruvmanila"
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ) -> tuple[float, float]:
"""simple docstring"""
if not all(isinstance(lowercase_ ,(float, int) ) for val in locals().values() ):
_UpperCamelCase : Any = F'''Input values must either be float or int: {list(locals().values() )}'''
raise TypeError(lowercase_ )
_UpperCamelCase : Tuple = ((x * distance) / (z + distance)) * scale
_UpperCamelCase : Union[str, Any] = ((y * distance) / (z + distance)) * scale
return projected_x, projected_y
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ) -> tuple[float, float, float]:
"""simple docstring"""
if not isinstance(lowercase_ ,lowercase_ ):
raise TypeError("Axis must be a str" )
_UpperCamelCase : List[str] = locals()
del input_variables["axis"]
if not all(isinstance(lowercase_ ,(float, int) ) for val in input_variables.values() ):
_UpperCamelCase : Any = (
"Input values except axis must either be float or int: "
F'''{list(input_variables.values() )}'''
)
raise TypeError(lowercase_ )
_UpperCamelCase : List[str] = (angle % 360) / 450 * 180 / math.pi
if axis == "z":
_UpperCamelCase : Tuple = x * math.cos(lowercase_ ) - y * math.sin(lowercase_ )
_UpperCamelCase : str = y * math.cos(lowercase_ ) + x * math.sin(lowercase_ )
_UpperCamelCase : Union[str, Any] = z
elif axis == "x":
_UpperCamelCase : Any = y * math.cos(lowercase_ ) - z * math.sin(lowercase_ )
_UpperCamelCase : List[Any] = z * math.cos(lowercase_ ) + y * math.sin(lowercase_ )
_UpperCamelCase : Optional[Any] = x
elif axis == "y":
_UpperCamelCase : Any = x * math.cos(lowercase_ ) - z * math.sin(lowercase_ )
_UpperCamelCase : Dict = z * math.cos(lowercase_ ) + x * math.sin(lowercase_ )
_UpperCamelCase : List[Any] = y
else:
raise ValueError("not a valid axis, choose one of 'x', 'y', 'z'" )
return new_x, new_y, new_z
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f"""{convert_to_ad(1.0, 2.0, 3.0, 1_0.0, 1_0.0) = }""")
print(f"""{rotate(1.0, 2.0, 3.0, "y", 9_0.0) = }""")
| 51
|
"""simple docstring"""
lowerCamelCase__ = "\n# Installazione di Transformers\n! pip install transformers datasets\n# Per installare dalla fonte invece dell'ultima versione rilasciata, commenta il comando sopra e\n# rimuovi la modalità commento al comando seguente.\n# ! pip install git+https://github.com/huggingface/transformers.git\n"
lowerCamelCase__ = [{"type": "code", "content": INSTALL_CONTENT}]
lowerCamelCase__ = {
"{processor_class}": "FakeProcessorClass",
"{model_class}": "FakeModelClass",
"{object_class}": "FakeObjectClass",
}
| 51
| 1
|
"""simple docstring"""
def lowercase__ ( lowercase_ ) -> str:
"""simple docstring"""
if not all(char in "01" for char in bin_string ):
raise ValueError("Non-binary value was passed to the function" )
if not bin_string:
raise ValueError("Empty string was passed to the function" )
_UpperCamelCase : Any = ""
while len(lowercase_ ) % 3 != 0:
_UpperCamelCase : Any = "0" + bin_string
_UpperCamelCase : Optional[Any] = [
bin_string[index : index + 3]
for index in range(len(lowercase_ ) )
if index % 3 == 0
]
for bin_group in bin_string_in_3_list:
_UpperCamelCase : str = 0
for index, val in enumerate(lowercase_ ):
oct_val += int(2 ** (2 - index) * int(lowercase_ ) )
oct_string += str(lowercase_ )
return oct_string
if __name__ == "__main__":
from doctest import testmod
testmod()
| 51
|
"""simple docstring"""
import os
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers.models.realm.configuration_realm import RealmConfig
from transformers.models.realm.retrieval_realm import _REALM_BLOCK_RECORDS_FILENAME, RealmRetriever
from transformers.models.realm.tokenization_realm import VOCAB_FILES_NAMES, RealmTokenizer
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[Any]:
_UpperCamelCase : Tuple = tempfile.mkdtemp()
_UpperCamelCase : str = 5
# Realm tok
_UpperCamelCase : Tuple = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"test",
"question",
"this",
"is",
"the",
"first",
"second",
"third",
"fourth",
"fifth",
"record",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
_UpperCamelCase : Optional[int] = os.path.join(self.tmpdirname , "realm_tokenizer" )
os.makedirs(__a , exist_ok=__a )
_UpperCamelCase : Optional[Any] = os.path.join(__a , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
_UpperCamelCase : Optional[int] = os.path.join(self.tmpdirname , "realm_block_records" )
os.makedirs(__a , exist_ok=__a )
def __SCREAMING_SNAKE_CASE ( self : str ) -> RealmTokenizer:
return RealmTokenizer.from_pretrained(os.path.join(self.tmpdirname , "realm_tokenizer" ) )
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Dict:
shutil.rmtree(self.tmpdirname )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[str]:
_UpperCamelCase : Optional[Any] = RealmConfig(num_block_records=self.num_block_records )
return config
def __SCREAMING_SNAKE_CASE ( self : int ) -> int:
_UpperCamelCase : Any = Dataset.from_dict(
{
"id": ["0", "1"],
"question": ["foo", "bar"],
"answers": [["Foo", "Bar"], ["Bar"]],
} )
return dataset
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> str:
_UpperCamelCase : int = np.array(
[
b"This is the first record",
b"This is the second record",
b"This is the third record",
b"This is the fourth record",
b"This is the fifth record",
b"This is a longer longer longer record",
] , dtype=__a , )
return block_records
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[Any]:
_UpperCamelCase : List[str] = RealmRetriever(
block_records=self.get_dummy_block_records() , tokenizer=self.get_tokenizer() , )
return retriever
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Tuple:
_UpperCamelCase : Tuple = self.get_config()
_UpperCamelCase : int = self.get_dummy_retriever()
_UpperCamelCase : Tuple = retriever.tokenizer
_UpperCamelCase : List[str] = np.array([0, 3] , dtype="long" )
_UpperCamelCase : Union[str, Any] = tokenizer(["Test question"] ).input_ids
_UpperCamelCase : List[str] = tokenizer(
["the fourth"] , add_special_tokens=__a , return_token_type_ids=__a , return_attention_mask=__a , ).input_ids
_UpperCamelCase : str = config.reader_seq_len
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase : List[Any] = retriever(
__a , __a , answer_ids=__a , max_length=__a , return_tensors="np" )
self.assertEqual(len(__a ) , 2 )
self.assertEqual(len(__a ) , 2 )
self.assertEqual(len(__a ) , 2 )
self.assertEqual(concat_inputs.input_ids.shape , (2, 10) )
self.assertEqual(concat_inputs.attention_mask.shape , (2, 10) )
self.assertEqual(concat_inputs.token_type_ids.shape , (2, 10) )
self.assertEqual(concat_inputs.special_tokens_mask.shape , (2, 10) )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[0] ) , ["[CLS]", "test", "question", "[SEP]", "this", "is", "the", "first", "record", "[SEP]"] , )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[1] ) , ["[CLS]", "test", "question", "[SEP]", "this", "is", "the", "fourth", "record", "[SEP]"] , )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[Any]:
_UpperCamelCase : Any = self.get_config()
_UpperCamelCase : Dict = self.get_dummy_retriever()
_UpperCamelCase : Dict = retriever.tokenizer
_UpperCamelCase : List[Any] = np.array([0, 3, 5] , dtype="long" )
_UpperCamelCase : Optional[int] = tokenizer(["Test question"] ).input_ids
_UpperCamelCase : str = tokenizer(
["the fourth", "longer longer"] , add_special_tokens=__a , return_token_type_ids=__a , return_attention_mask=__a , ).input_ids
_UpperCamelCase : Union[str, Any] = config.reader_seq_len
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase : Optional[Any] = retriever(
__a , __a , answer_ids=__a , max_length=__a , return_tensors="np" )
self.assertEqual([False, True, True] , __a )
self.assertEqual([[-1, -1, -1], [6, -1, -1], [6, 7, 8]] , __a )
self.assertEqual([[-1, -1, -1], [7, -1, -1], [7, 8, 9]] , __a )
def __SCREAMING_SNAKE_CASE ( self : int ) -> List[str]:
_UpperCamelCase : List[Any] = self.get_dummy_retriever()
retriever.save_pretrained(os.path.join(self.tmpdirname , "realm_block_records" ) )
# Test local path
_UpperCamelCase : int = retriever.from_pretrained(os.path.join(self.tmpdirname , "realm_block_records" ) )
self.assertEqual(retriever.block_records[0] , b"This is the first record" )
# Test mocked remote path
with patch("transformers.models.realm.retrieval_realm.hf_hub_download" ) as mock_hf_hub_download:
_UpperCamelCase : List[Any] = os.path.join(
os.path.join(self.tmpdirname , "realm_block_records" ) , _REALM_BLOCK_RECORDS_FILENAME )
_UpperCamelCase : int = RealmRetriever.from_pretrained("google/realm-cc-news-pretrained-openqa" )
self.assertEqual(retriever.block_records[0] , b"This is the first record" )
| 51
| 1
|
"""simple docstring"""
import argparse
import collections
import json
from pathlib import Path
import requests
import torch
import yaml
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTImageProcessor,
MobileViTVaConfig,
MobileViTVaForImageClassification,
MobileViTVaForSemanticSegmentation,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase__ = logging.get_logger(__name__)
def lowercase__ ( lowercase_ ) -> Optional[int]:
"""simple docstring"""
print("Loading config file..." )
def flatten_yaml_as_dict(lowercase_ ,lowercase_="" ,lowercase_="." ):
_UpperCamelCase : int = []
for k, v in d.items():
_UpperCamelCase : Optional[int] = parent_key + sep + k if parent_key else k
if isinstance(lowercase_ ,collections.abc.MutableMapping ):
items.extend(flatten_yaml_as_dict(lowercase_ ,lowercase_ ,sep=lowercase_ ).items() )
else:
items.append((new_key, v) )
return dict(lowercase_ )
_UpperCamelCase : Optional[int] = argparse.Namespace()
with open(lowercase_ ,"r" ) as yaml_file:
try:
_UpperCamelCase : str = yaml.load(lowercase_ ,Loader=yaml.FullLoader )
_UpperCamelCase : Union[str, Any] = flatten_yaml_as_dict(lowercase_ )
for k, v in flat_cfg.items():
setattr(lowercase_ ,lowercase_ ,lowercase_ )
except yaml.YAMLError as exc:
logger.error("Error while loading config file: {}. Error message: {}".format(lowercase_ ,str(lowercase_ ) ) )
return config
def lowercase__ ( lowercase_ ,lowercase_ ) -> int:
"""simple docstring"""
_UpperCamelCase : Union[str, Any] = MobileViTVaConfig()
_UpperCamelCase : List[str] = False
# dataset
if task_name.startswith("imagenet1k_" ):
_UpperCamelCase : List[str] = 1_000
if int(task_name.strip().split("_" )[-1] ) == 384:
_UpperCamelCase : Optional[Any] = 384
else:
_UpperCamelCase : Dict = 256
_UpperCamelCase : List[Any] = "imagenet-1k-id2label.json"
elif task_name.startswith("imagenet21k_to_1k_" ):
_UpperCamelCase : Tuple = 21_000
if int(task_name.strip().split("_" )[-1] ) == 384:
_UpperCamelCase : int = 384
else:
_UpperCamelCase : List[Any] = 256
_UpperCamelCase : Any = "imagenet-22k-id2label.json"
elif task_name.startswith("ade20k_" ):
_UpperCamelCase : Union[str, Any] = 151
_UpperCamelCase : List[str] = 512
_UpperCamelCase : int = "ade20k-id2label.json"
_UpperCamelCase : Optional[Any] = True
elif task_name.startswith("voc_" ):
_UpperCamelCase : Any = 21
_UpperCamelCase : Any = 512
_UpperCamelCase : Dict = "pascal-voc-id2label.json"
_UpperCamelCase : Union[str, Any] = True
# orig_config
_UpperCamelCase : Dict = load_orig_config_file(lowercase_ )
assert getattr(lowercase_ ,"model.classification.name" ,-1 ) == "mobilevit_v2", "Invalid model"
_UpperCamelCase : Optional[int] = getattr(lowercase_ ,"model.classification.mitv2.width_multiplier" ,1.0 )
assert (
getattr(lowercase_ ,"model.classification.mitv2.attn_norm_layer" ,-1 ) == "layer_norm_2d"
), "Norm layers other than layer_norm_2d is not supported"
_UpperCamelCase : List[str] = getattr(lowercase_ ,"model.classification.activation.name" ,"swish" )
# config.image_size == getattr(orig_config, 'sampler.bs.crop_size_width', 256)
if is_segmentation_model:
_UpperCamelCase : Optional[int] = getattr(lowercase_ ,"model.segmentation.output_stride" ,16 )
if "_deeplabv3" in task_name:
_UpperCamelCase : Tuple = getattr(lowercase_ ,"model.segmentation.deeplabv3.aspp_rates" ,[12, 24, 36] )
_UpperCamelCase : Any = getattr(lowercase_ ,"model.segmentation.deeplabv3.aspp_out_channels" ,512 )
_UpperCamelCase : Dict = getattr(lowercase_ ,"model.segmentation.deeplabv3.aspp_dropout" ,0.1 )
# id2label
_UpperCamelCase : List[str] = "huggingface/label-files"
_UpperCamelCase : Dict = json.load(open(hf_hub_download(lowercase_ ,lowercase_ ,repo_type="dataset" ) ,"r" ) )
_UpperCamelCase : Any = {int(lowercase_ ): v for k, v in idalabel.items()}
_UpperCamelCase : Any = idalabel
_UpperCamelCase : Optional[int] = {v: k for k, v in idalabel.items()}
return config
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ) -> List[str]:
"""simple docstring"""
_UpperCamelCase : Any = dct.pop(lowercase_ )
_UpperCamelCase : Union[str, Any] = val
def lowercase__ ( lowercase_ ,lowercase_=False ) -> Optional[int]:
"""simple docstring"""
if base_model:
_UpperCamelCase : Dict = ""
else:
_UpperCamelCase : str = "mobilevitv2."
_UpperCamelCase : Any = []
for k in state_dict.keys():
if k[:8] == "encoder.":
_UpperCamelCase : str = k[8:]
else:
_UpperCamelCase : Dict = k
if ".block." in k:
_UpperCamelCase : Union[str, Any] = k_new.replace(".block." ,"." )
if ".conv." in k:
_UpperCamelCase : Tuple = k_new.replace(".conv." ,".convolution." )
if ".norm." in k:
_UpperCamelCase : Tuple = k_new.replace(".norm." ,".normalization." )
if "conv_1." in k:
_UpperCamelCase : Optional[Any] = k_new.replace("conv_1." ,F'''{model_prefix}conv_stem.''' )
for i in [1, 2]:
if F'''layer_{i}.''' in k:
_UpperCamelCase : Any = k_new.replace(F'''layer_{i}.''' ,F'''{model_prefix}encoder.layer.{i-1}.layer.''' )
if ".exp_1x1." in k:
_UpperCamelCase : Tuple = k_new.replace(".exp_1x1." ,".expand_1x1." )
if ".red_1x1." in k:
_UpperCamelCase : Optional[int] = k_new.replace(".red_1x1." ,".reduce_1x1." )
for i in [3, 4, 5]:
if F'''layer_{i}.0.''' in k:
_UpperCamelCase : Any = k_new.replace(F'''layer_{i}.0.''' ,F'''{model_prefix}encoder.layer.{i-1}.downsampling_layer.''' )
if F'''layer_{i}.1.local_rep.0.''' in k:
_UpperCamelCase : List[str] = k_new.replace(F'''layer_{i}.1.local_rep.0.''' ,F'''{model_prefix}encoder.layer.{i-1}.conv_kxk.''' )
if F'''layer_{i}.1.local_rep.1.''' in k:
_UpperCamelCase : int = k_new.replace(F'''layer_{i}.1.local_rep.1.''' ,F'''{model_prefix}encoder.layer.{i-1}.conv_1x1.''' )
for i in [3, 4, 5]:
if i == 3:
_UpperCamelCase : Union[str, Any] = [0, 1]
elif i == 4:
_UpperCamelCase : Tuple = [0, 1, 2, 3]
elif i == 5:
_UpperCamelCase : List[str] = [0, 1, 2]
for j in j_in:
if F'''layer_{i}.1.global_rep.{j}.''' in k:
_UpperCamelCase : str = k_new.replace(
F'''layer_{i}.1.global_rep.{j}.''' ,F'''{model_prefix}encoder.layer.{i-1}.transformer.layer.{j}.''' )
if F'''layer_{i}.1.global_rep.{j+1}.''' in k:
_UpperCamelCase : Dict = k_new.replace(
F'''layer_{i}.1.global_rep.{j+1}.''' ,F'''{model_prefix}encoder.layer.{i-1}.layernorm.''' )
if F'''layer_{i}.1.conv_proj.''' in k:
_UpperCamelCase : Optional[Any] = k_new.replace(F'''layer_{i}.1.conv_proj.''' ,F'''{model_prefix}encoder.layer.{i-1}.conv_projection.''' )
if "pre_norm_attn.0." in k:
_UpperCamelCase : Dict = k_new.replace("pre_norm_attn.0." ,"layernorm_before." )
if "pre_norm_attn.1." in k:
_UpperCamelCase : List[str] = k_new.replace("pre_norm_attn.1." ,"attention." )
if "pre_norm_ffn.0." in k:
_UpperCamelCase : Optional[int] = k_new.replace("pre_norm_ffn.0." ,"layernorm_after." )
if "pre_norm_ffn.1." in k:
_UpperCamelCase : int = k_new.replace("pre_norm_ffn.1." ,"ffn.conv1." )
if "pre_norm_ffn.3." in k:
_UpperCamelCase : Any = k_new.replace("pre_norm_ffn.3." ,"ffn.conv2." )
if "classifier.1." in k:
_UpperCamelCase : int = k_new.replace("classifier.1." ,"classifier." )
if "seg_head." in k:
_UpperCamelCase : Tuple = k_new.replace("seg_head." ,"segmentation_head." )
if ".aspp_layer." in k:
_UpperCamelCase : List[Any] = k_new.replace(".aspp_layer." ,"." )
if ".aspp_pool." in k:
_UpperCamelCase : Tuple = k_new.replace(".aspp_pool." ,"." )
rename_keys.append((k, k_new) )
return rename_keys
def lowercase__ ( lowercase_ ) -> str:
"""simple docstring"""
_UpperCamelCase : Dict = []
for k in state_dict.keys():
if k.startswith("seg_head.aux_head." ):
keys_to_ignore.append(lowercase_ )
for k in keys_to_ignore:
state_dict.pop(lowercase_ ,lowercase_ )
def lowercase__ ( ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase : Union[str, Any] = "http://images.cocodataset.org/val2017/000000039769.jpg"
# url = "https://cdn.britannica.com/86/141086-050-9D7C75EE/Gulfstream-G450-business-jet-passengers.jpg"
_UpperCamelCase : Union[str, Any] = Image.open(requests.get(lowercase_ ,stream=lowercase_ ).raw )
return im
@torch.no_grad()
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ) -> str:
"""simple docstring"""
_UpperCamelCase : Union[str, Any] = get_mobilevitva_config(lowercase_ ,lowercase_ )
# load original state_dict
_UpperCamelCase : str = torch.load(lowercase_ ,map_location="cpu" )
# load huggingface model
if task_name.startswith("ade20k_" ) or task_name.startswith("voc_" ):
_UpperCamelCase : Optional[int] = MobileViTVaForSemanticSegmentation(lowercase_ ).eval()
_UpperCamelCase : Any = False
else:
_UpperCamelCase : List[Any] = MobileViTVaForImageClassification(lowercase_ ).eval()
_UpperCamelCase : List[str] = False
# remove and rename some keys of load the original model
_UpperCamelCase : Dict = checkpoint
remove_unused_keys(lowercase_ )
_UpperCamelCase : Optional[Any] = create_rename_keys(lowercase_ ,base_model=lowercase_ )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(lowercase_ ,lowercase_ ,lowercase_ )
# load modified state_dict
model.load_state_dict(lowercase_ )
# Check outputs on an image, prepared by MobileViTImageProcessor
_UpperCamelCase : Tuple = MobileViTImageProcessor(crop_size=config.image_size ,size=config.image_size + 32 )
_UpperCamelCase : Dict = image_processor(images=prepare_img() ,return_tensors="pt" )
_UpperCamelCase : int = model(**lowercase_ )
# verify classification model
if task_name.startswith("imagenet" ):
_UpperCamelCase : Tuple = outputs.logits
_UpperCamelCase : Tuple = logits.argmax(-1 ).item()
print("Predicted class:" ,model.config.idalabel[predicted_class_idx] )
if task_name.startswith("imagenet1k_256" ) and config.width_multiplier == 1.0:
# expected_logits for base variant
_UpperCamelCase : Optional[int] = torch.tensor([-1.6336e00, -7.3204e-02, -5.1883e-01] )
assert torch.allclose(logits[0, :3] ,lowercase_ ,atol=1e-4 )
Path(lowercase_ ).mkdir(exist_ok=lowercase_ )
print(F'''Saving model {task_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowercase_ )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(lowercase_ )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--task",
default="imagenet1k_256",
type=str,
help=(
"Name of the task for which the MobileViTV2 model you'd like to convert is trained on . "
"\n Classification (ImageNet-1k)\n - MobileViTV2 (256x256) : imagenet1k_256\n - MobileViTV2 (Trained on 256x256 and Finetuned on 384x384) : imagenet1k_384\n - MobileViTV2 (Trained on ImageNet-21k and Finetuned on ImageNet-1k 256x256) :\n imagenet21k_to_1k_256\n - MobileViTV2 (Trained on ImageNet-21k, Finetuned on ImageNet-1k 256x256, and Finetuned on\n ImageNet-1k 384x384) : imagenet21k_to_1k_384\n Segmentation\n - ADE20K Dataset : ade20k_deeplabv3\n - Pascal VOC 2012 Dataset: voc_deeplabv3\n "
),
choices=[
"imagenet1k_256",
"imagenet1k_384",
"imagenet21k_to_1k_256",
"imagenet21k_to_1k_384",
"ade20k_deeplabv3",
"voc_deeplabv3",
],
)
parser.add_argument(
"--orig_checkpoint_path", required=True, type=str, help="Path to the original state dict (.pt file)."
)
parser.add_argument("--orig_config_path", required=True, type=str, help="Path to the original config file.")
parser.add_argument(
"--pytorch_dump_folder_path", required=True, type=str, help="Path to the output PyTorch model directory."
)
lowerCamelCase__ = parser.parse_args()
convert_mobilevitva_checkpoint(
args.task, args.orig_checkpoint_path, args.orig_config_path, args.pytorch_dump_folder_path
)
| 51
|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import LEDConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFLEDForConditionalGeneration, TFLEDModel
@require_tf
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Any = LEDConfig
SCREAMING_SNAKE_CASE__ :str = {}
SCREAMING_SNAKE_CASE__ :List[str] = "gelu"
def __init__( self : List[Any] , __a : Union[str, Any] , __a : List[Any]=13 , __a : int=7 , __a : str=True , __a : Any=False , __a : str=99 , __a : str=32 , __a : Union[str, Any]=2 , __a : Optional[Any]=4 , __a : List[Any]=37 , __a : List[Any]=0.1 , __a : Tuple=0.1 , __a : Dict=20 , __a : str=2 , __a : Dict=1 , __a : Any=0 , __a : List[Any]=4 , ) -> List[Any]:
_UpperCamelCase : Optional[Any] = parent
_UpperCamelCase : List[str] = batch_size
_UpperCamelCase : str = seq_length
_UpperCamelCase : str = is_training
_UpperCamelCase : Any = use_labels
_UpperCamelCase : Any = vocab_size
_UpperCamelCase : List[str] = hidden_size
_UpperCamelCase : Optional[Any] = num_hidden_layers
_UpperCamelCase : Dict = num_attention_heads
_UpperCamelCase : Optional[Any] = intermediate_size
_UpperCamelCase : int = hidden_dropout_prob
_UpperCamelCase : Dict = attention_probs_dropout_prob
_UpperCamelCase : str = max_position_embeddings
_UpperCamelCase : int = eos_token_id
_UpperCamelCase : Dict = pad_token_id
_UpperCamelCase : Optional[Any] = bos_token_id
_UpperCamelCase : str = attention_window
# `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size
# [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention
# returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1]
# because its local attention only attends to `self.attention_window` and one before and one after
_UpperCamelCase : List[str] = self.attention_window + 2
# because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for
# the `test_attention_outputs` and `test_hidden_states_output` tests
_UpperCamelCase : int = (
self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window
)
def __SCREAMING_SNAKE_CASE ( self : int ) -> str:
_UpperCamelCase : Tuple = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
_UpperCamelCase : Optional[Any] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
_UpperCamelCase : Tuple = tf.concat([input_ids, eos_tensor] , axis=1 )
_UpperCamelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCamelCase : List[Any] = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , attention_window=self.attention_window , **self.config_updates , )
_UpperCamelCase : Dict = prepare_led_inputs_dict(__a , __a , __a )
_UpperCamelCase : Union[str, Any] = tf.concat(
[tf.zeros_like(__a )[:, :-1], tf.ones_like(__a )[:, -1:]] , axis=-1 , )
_UpperCamelCase : Union[str, Any] = global_attention_mask
return config, inputs_dict
def __SCREAMING_SNAKE_CASE ( self : List[str] , __a : List[Any] , __a : int ) -> Tuple:
_UpperCamelCase : Tuple = TFLEDModel(config=__a ).get_decoder()
_UpperCamelCase : Tuple = inputs_dict["input_ids"]
_UpperCamelCase : int = input_ids[:1, :]
_UpperCamelCase : List[str] = inputs_dict["attention_mask"][:1, :]
_UpperCamelCase : List[Any] = 1
# first forward pass
_UpperCamelCase : Any = model(__a , attention_mask=__a , use_cache=__a )
_UpperCamelCase, _UpperCamelCase : Union[str, Any] = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_UpperCamelCase : Any = ids_tensor((self.batch_size, 3) , config.vocab_size )
_UpperCamelCase : Any = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
_UpperCamelCase : List[str] = tf.concat([input_ids, next_tokens] , axis=-1 )
_UpperCamelCase : Union[str, Any] = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
_UpperCamelCase : Tuple = model(__a , attention_mask=__a )[0]
_UpperCamelCase : int = model(__a , attention_mask=__a , past_key_values=__a )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
_UpperCamelCase : List[Any] = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
_UpperCamelCase : List[str] = output_from_no_past[:, -3:, random_slice_idx]
_UpperCamelCase : Optional[int] = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(__a , __a , rtol=1e-3 )
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_=None ,lowercase_=None ,lowercase_=None ,lowercase_=None ,) -> Dict:
"""simple docstring"""
if attention_mask is None:
_UpperCamelCase : str = tf.cast(tf.math.not_equal(lowercase_ ,config.pad_token_id ) ,tf.inta )
if decoder_attention_mask is None:
_UpperCamelCase : str = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape ,dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] ,config.pad_token_id ) ,tf.inta ),
] ,axis=-1 ,)
if head_mask is None:
_UpperCamelCase : List[str] = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
_UpperCamelCase : List[str] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"attention_mask": attention_mask,
"decoder_input_ids": decoder_input_ids,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
}
@require_tf
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Any = (TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else ()
SCREAMING_SNAKE_CASE__ :List[str] = (TFLEDForConditionalGeneration,) if is_tf_available() else ()
SCREAMING_SNAKE_CASE__ :List[str] = (
{
"conversational": TFLEDForConditionalGeneration,
"feature-extraction": TFLEDModel,
"summarization": TFLEDForConditionalGeneration,
"text2text-generation": TFLEDForConditionalGeneration,
"translation": TFLEDForConditionalGeneration,
}
if is_tf_available()
else {}
)
SCREAMING_SNAKE_CASE__ :Tuple = True
SCREAMING_SNAKE_CASE__ :str = False
SCREAMING_SNAKE_CASE__ :Optional[Any] = False
SCREAMING_SNAKE_CASE__ :int = False
def __SCREAMING_SNAKE_CASE ( self : str ) -> List[Any]:
_UpperCamelCase : int = TFLEDModelTester(self )
_UpperCamelCase : Any = ConfigTester(self , config_class=__a )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[Any]:
self.config_tester.run_common_tests()
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> int:
_UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*__a )
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[str]:
_UpperCamelCase, _UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCamelCase : Optional[int] = tf.zeros_like(inputs_dict["attention_mask"] )
_UpperCamelCase : Union[str, Any] = 2
_UpperCamelCase : str = tf.where(
tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices , 1 , inputs_dict["global_attention_mask"] , )
_UpperCamelCase : Dict = True
_UpperCamelCase : str = self.model_tester.seq_length
_UpperCamelCase : Union[str, Any] = self.model_tester.encoder_seq_length
def check_decoder_attentions_output(__a : Optional[int] ):
_UpperCamelCase : Optional[int] = outputs.decoder_attentions
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
def check_encoder_attentions_output(__a : Optional[Any] ):
_UpperCamelCase : Union[str, Any] = [t.numpy() for t in outputs.encoder_attentions]
_UpperCamelCase : List[Any] = [t.numpy() for t in outputs.encoder_global_attentions]
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
self.assertListEqual(
list(global_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices] , )
for model_class in self.all_model_classes:
_UpperCamelCase : Dict = True
_UpperCamelCase : Optional[Any] = False
_UpperCamelCase : int = False
_UpperCamelCase : Optional[int] = model_class(__a )
_UpperCamelCase : int = model(self._prepare_for_class(__a , __a ) )
_UpperCamelCase : Any = len(__a )
self.assertEqual(config.output_hidden_states , __a )
check_encoder_attentions_output(__a )
if self.is_encoder_decoder:
_UpperCamelCase : Optional[Any] = model_class(__a )
_UpperCamelCase : List[Any] = model(self._prepare_for_class(__a , __a ) )
self.assertEqual(config.output_hidden_states , __a )
check_decoder_attentions_output(__a )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
_UpperCamelCase : int = True
_UpperCamelCase : Tuple = model_class(__a )
_UpperCamelCase : str = model(self._prepare_for_class(__a , __a ) )
self.assertEqual(config.output_hidden_states , __a )
check_encoder_attentions_output(__a )
# Check attention is always last and order is fine
_UpperCamelCase : Any = True
_UpperCamelCase : List[str] = True
_UpperCamelCase : Tuple = model_class(__a )
_UpperCamelCase : int = model(self._prepare_for_class(__a , __a ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(__a ) )
self.assertEqual(model.config.output_hidden_states , __a )
check_encoder_attentions_output(__a )
@unittest.skip("LED keeps using potentially symbolic tensors in conditionals and breaks tracing." )
def __SCREAMING_SNAKE_CASE ( self : str ) -> Dict:
pass
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Tuple:
# TODO: Head-masking not yet implement
pass
def lowercase__ ( lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
return tf.constant(lowercase_ ,dtype=tf.intaa )
lowerCamelCase__ = 1E-4
@slow
@require_tf
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> Union[str, Any]:
_UpperCamelCase : Any = TFLEDForConditionalGeneration.from_pretrained("allenai/led-base-16384" ).led
# change to intended input here
_UpperCamelCase : int = _long_tensor([512 * [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69]] )
_UpperCamelCase : Tuple = _long_tensor([128 * [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69]] )
_UpperCamelCase : Optional[Any] = prepare_led_inputs_dict(model.config , __a , __a )
_UpperCamelCase : Optional[int] = model(**__a )[0]
_UpperCamelCase : Optional[int] = (1, 1024, 768)
self.assertEqual(output.shape , __a )
# change to expected output here
_UpperCamelCase : Tuple = tf.convert_to_tensor(
[[2.30_50, 2.82_79, 0.65_31], [-1.84_57, -0.14_55, -3.56_61], [-1.01_86, 0.45_86, -2.20_43]] , )
tf.debugging.assert_near(output[:, :3, :3] , __a , atol=1e-3 )
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> str:
_UpperCamelCase : Optional[int] = TFLEDForConditionalGeneration.from_pretrained("allenai/led-base-16384" )
# change to intended input here
_UpperCamelCase : Optional[int] = _long_tensor([512 * [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69]] )
_UpperCamelCase : List[str] = _long_tensor([128 * [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69]] )
_UpperCamelCase : Optional[Any] = prepare_led_inputs_dict(model.config , __a , __a )
_UpperCamelCase : Union[str, Any] = model(**__a )[0]
_UpperCamelCase : int = (1, 1024, model.config.vocab_size)
self.assertEqual(output.shape , __a )
# change to expected output here
_UpperCamelCase : Optional[int] = tf.convert_to_tensor(
[[33.65_07, 6.45_72, 16.80_89], [5.87_39, -2.42_38, 11.29_02], [-3.21_39, -4.31_49, 4.27_83]] , )
tf.debugging.assert_near(output[:, :3, :3] , __a , atol=1e-3 , rtol=1e-3 )
| 51
| 1
|
"""simple docstring"""
def lowercase__ ( lowercase_ ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase : int = [0] * len(lowercase_ )
_UpperCamelCase : List[str] = []
_UpperCamelCase : Union[str, Any] = []
_UpperCamelCase : str = 0
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(lowercase_ ) ):
if indegree[i] == 0:
queue.append(lowercase_ )
while queue:
_UpperCamelCase : Any = queue.pop(0 )
cnt += 1
topo.append(lowercase_ )
for x in graph[vertex]:
indegree[x] -= 1
if indegree[x] == 0:
queue.append(lowercase_ )
if cnt != len(lowercase_ ):
print("Cycle exists" )
else:
print(lowercase_ )
# Adjacency List of Graph
lowerCamelCase__ = {0: [1, 2], 1: [3], 2: [3], 3: [4, 5], 4: [], 5: []}
topological_sort(graph)
| 51
|
"""simple docstring"""
import json
import os
import unittest
from transformers.models.roc_bert.tokenization_roc_bert import (
VOCAB_FILES_NAMES,
RoCBertBasicTokenizer,
RoCBertTokenizer,
RoCBertWordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Union[str, Any] = RoCBertTokenizer
SCREAMING_SNAKE_CASE__ :Dict = None
SCREAMING_SNAKE_CASE__ :List[Any] = False
SCREAMING_SNAKE_CASE__ :Union[str, Any] = True
SCREAMING_SNAKE_CASE__ :Union[str, Any] = filter_non_english
def __SCREAMING_SNAKE_CASE ( self : str ) -> Dict:
super().setUp()
_UpperCamelCase : Any = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "你", "好", "是", "谁", "a", "b", "c", "d"]
_UpperCamelCase : List[str] = {}
_UpperCamelCase : Tuple = {}
for i, value in enumerate(__a ):
_UpperCamelCase : List[str] = i
_UpperCamelCase : Optional[Any] = i
_UpperCamelCase : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
_UpperCamelCase : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["word_shape_file"] )
_UpperCamelCase : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["word_pronunciation_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
with open(self.word_shape_file , "w" , encoding="utf-8" ) as word_shape_writer:
json.dump(__a , __a , ensure_ascii=__a )
with open(self.word_pronunciation_file , "w" , encoding="utf-8" ) as word_pronunciation_writer:
json.dump(__a , __a , ensure_ascii=__a )
def __SCREAMING_SNAKE_CASE ( self : int ) -> Optional[int]:
_UpperCamelCase : Tuple = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
_UpperCamelCase : int = tokenizer.tokenize("你好[SEP]你是谁" )
self.assertListEqual(__a , ["你", "好", "[SEP]", "你", "是", "谁"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__a ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_shape_ids(__a ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_pronunciation_ids(__a ) , [5, 6, 2, 5, 7, 8] )
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> Dict:
_UpperCamelCase : Dict = RoCBertBasicTokenizer()
self.assertListEqual(tokenizer.tokenize("ah\u535A\u63A8zz" ) , ["ah", "\u535A", "\u63A8", "zz"] )
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Any:
_UpperCamelCase : List[Any] = RoCBertBasicTokenizer(do_lower_case=__a )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["hello", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple:
_UpperCamelCase : Optional[Any] = RoCBertBasicTokenizer(do_lower_case=__a , strip_accents=__a )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hällo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["h\u00E9llo"] )
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> str:
_UpperCamelCase : Dict = RoCBertBasicTokenizer(do_lower_case=__a , strip_accents=__a )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Union[str, Any]:
_UpperCamelCase : List[str] = RoCBertBasicTokenizer(do_lower_case=__a )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def __SCREAMING_SNAKE_CASE ( self : str ) -> Optional[int]:
_UpperCamelCase : Tuple = RoCBertBasicTokenizer(do_lower_case=__a )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["HeLLo", "!", "how", "Are", "yoU", "?"] )
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[str]:
_UpperCamelCase : Union[str, Any] = RoCBertBasicTokenizer(do_lower_case=__a , strip_accents=__a )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HäLLo", "!", "how", "Are", "yoU", "?"] )
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[Any]:
_UpperCamelCase : Tuple = RoCBertBasicTokenizer(do_lower_case=__a , strip_accents=__a )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HaLLo", "!", "how", "Are", "yoU", "?"] )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> int:
_UpperCamelCase : int = RoCBertBasicTokenizer(do_lower_case=__a , never_split=["[UNK]"] )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? [UNK]" ) , ["HeLLo", "!", "how", "Are", "yoU", "?", "[UNK]"] )
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[Any]:
_UpperCamelCase : Optional[int] = ["[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", "##ing"]
_UpperCamelCase : Any = {}
for i, token in enumerate(__a ):
_UpperCamelCase : str = i
_UpperCamelCase : Optional[int] = RoCBertWordpieceTokenizer(vocab=__a , unk_token="[UNK]" )
self.assertListEqual(tokenizer.tokenize("" ) , [] )
self.assertListEqual(tokenizer.tokenize("unwanted running" ) , ["un", "##want", "##ed", "runn", "##ing"] )
self.assertListEqual(tokenizer.tokenize("unwantedX running" ) , ["[UNK]", "runn", "##ing"] )
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[Any]:
self.assertTrue(_is_whitespace(" " ) )
self.assertTrue(_is_whitespace("\t" ) )
self.assertTrue(_is_whitespace("\r" ) )
self.assertTrue(_is_whitespace("\n" ) )
self.assertTrue(_is_whitespace("\u00A0" ) )
self.assertFalse(_is_whitespace("A" ) )
self.assertFalse(_is_whitespace("-" ) )
def __SCREAMING_SNAKE_CASE ( self : Any ) -> Dict:
self.assertTrue(_is_control("\u0005" ) )
self.assertFalse(_is_control("A" ) )
self.assertFalse(_is_control(" " ) )
self.assertFalse(_is_control("\t" ) )
self.assertFalse(_is_control("\r" ) )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[int]:
self.assertTrue(_is_punctuation("-" ) )
self.assertTrue(_is_punctuation("$" ) )
self.assertTrue(_is_punctuation("`" ) )
self.assertTrue(_is_punctuation("." ) )
self.assertFalse(_is_punctuation("A" ) )
self.assertFalse(_is_punctuation(" " ) )
def __SCREAMING_SNAKE_CASE ( self : int ) -> List[Any]:
_UpperCamelCase : Optional[Any] = self.get_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(__a ) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]] )
if self.test_rust_tokenizer:
_UpperCamelCase : Tuple = self.get_rust_tokenizer()
self.assertListEqual(
[rust_tokenizer.tokenize(__a ) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]] )
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[Any]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
_UpperCamelCase : int = self.rust_tokenizer_class.from_pretrained(__a , **__a )
_UpperCamelCase : Union[str, Any] = F'''A, naïve {tokenizer_r.mask_token} AllenNLP sentence.'''
_UpperCamelCase : Optional[Any] = tokenizer_r.encode_plus(
__a , return_attention_mask=__a , return_token_type_ids=__a , return_offsets_mapping=__a , add_special_tokens=__a , )
_UpperCamelCase : List[Any] = tokenizer_r.do_lower_case if hasattr(__a , "do_lower_case" ) else False
_UpperCamelCase : Dict = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), "A"),
((1, 2), ","),
((3, 5), "na"),
((5, 6), "##ï"),
((6, 8), "##ve"),
((9, 15), tokenizer_r.mask_token),
((16, 21), "Allen"),
((21, 23), "##NL"),
((23, 24), "##P"),
((25, 33), "sentence"),
((33, 34), "."),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), "a"),
((1, 2), ","),
((3, 8), "naive"),
((9, 15), tokenizer_r.mask_token),
((16, 21), "allen"),
((21, 23), "##nl"),
((23, 24), "##p"),
((25, 33), "sentence"),
((33, 34), "."),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens["input_ids"] ) )
self.assertEqual([e[0] for e in expected_results] , tokens["offset_mapping"] )
def __SCREAMING_SNAKE_CASE ( self : int ) -> List[Any]:
_UpperCamelCase : Optional[Any] = ["的", "人", "有"]
_UpperCamelCase : int = "".join(__a )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
_UpperCamelCase : int = True
_UpperCamelCase : Any = self.tokenizer_class.from_pretrained(__a , **__a )
_UpperCamelCase : Optional[Any] = self.rust_tokenizer_class.from_pretrained(__a , **__a )
_UpperCamelCase : int = tokenizer_p.encode(__a , add_special_tokens=__a )
_UpperCamelCase : int = tokenizer_r.encode(__a , add_special_tokens=__a )
_UpperCamelCase : List[Any] = tokenizer_r.convert_ids_to_tokens(__a )
_UpperCamelCase : Union[str, Any] = tokenizer_p.convert_ids_to_tokens(__a )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(__a , __a )
self.assertListEqual(__a , __a )
_UpperCamelCase : Any = False
_UpperCamelCase : Dict = self.rust_tokenizer_class.from_pretrained(__a , **__a )
_UpperCamelCase : Any = self.tokenizer_class.from_pretrained(__a , **__a )
_UpperCamelCase : Any = tokenizer_r.encode(__a , add_special_tokens=__a )
_UpperCamelCase : Any = tokenizer_p.encode(__a , add_special_tokens=__a )
_UpperCamelCase : Union[str, Any] = tokenizer_r.convert_ids_to_tokens(__a )
_UpperCamelCase : Dict = tokenizer_p.convert_ids_to_tokens(__a )
# it is expected that only the first Chinese character is not preceded by "##".
_UpperCamelCase : Any = [
F'''##{token}''' if idx != 0 else token for idx, token in enumerate(__a )
]
self.assertListEqual(__a , __a )
self.assertListEqual(__a , __a )
@slow
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Any:
_UpperCamelCase : Dict = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
_UpperCamelCase : Optional[int] = tokenizer.encode("你好" , add_special_tokens=__a )
_UpperCamelCase : Dict = tokenizer.encode("你是谁" , add_special_tokens=__a )
_UpperCamelCase : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(__a )
_UpperCamelCase : Tuple = tokenizer.build_inputs_with_special_tokens(__a , __a )
assert encoded_sentence == [1] + text + [2]
assert encoded_pair == [1] + text + [2] + text_a + [2]
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> int:
_UpperCamelCase : Optional[Any] = self.get_tokenizers(do_lower_case=__a )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
_UpperCamelCase : int = "你好,你是谁"
_UpperCamelCase : Any = tokenizer.tokenize(__a )
_UpperCamelCase : Optional[Any] = tokenizer.convert_tokens_to_ids(__a )
_UpperCamelCase : List[str] = tokenizer.convert_tokens_to_shape_ids(__a )
_UpperCamelCase : Any = tokenizer.convert_tokens_to_pronunciation_ids(__a )
_UpperCamelCase : Optional[int] = tokenizer.prepare_for_model(
__a , __a , __a , add_special_tokens=__a )
_UpperCamelCase : Tuple = tokenizer.encode_plus(__a , add_special_tokens=__a )
self.assertEqual(__a , __a )
| 51
| 1
|
"""simple docstring"""
def lowercase__ ( lowercase_ ) -> str:
"""simple docstring"""
_UpperCamelCase : List[Any] = []
_UpperCamelCase : str = []
_UpperCamelCase : Dict = {
"^": 3,
"*": 2,
"/": 2,
"%": 2,
"+": 1,
"-": 1,
} # Priority of each operator
_UpperCamelCase : Union[str, Any] = len(lowercase_ ) if (len(lowercase_ ) > 7) else 7
# Print table header for output
print(
"Symbol".center(8 ) ,"Stack".center(lowercase_ ) ,"Postfix".center(lowercase_ ) ,sep=" | " ,)
print("-" * (print_width * 3 + 7) )
for x in infix:
if x.isalpha() or x.isdigit():
post_fix.append(lowercase_ ) # if x is Alphabet / Digit, add it to Postfix
elif x == "(":
stack.append(lowercase_ ) # if x is "(" push to Stack
elif x == ")": # if x is ")" pop stack until "(" is encountered
while stack[-1] != "(":
post_fix.append(stack.pop() ) # Pop stack & add the content to Postfix
stack.pop()
else:
if len(lowercase_ ) == 0:
stack.append(lowercase_ ) # If stack is empty, push x to stack
else: # while priority of x is not > priority of element in the stack
while len(lowercase_ ) > 0 and priority[x] <= priority[stack[-1]]:
post_fix.append(stack.pop() ) # pop stack & add to Postfix
stack.append(lowercase_ ) # push x to stack
print(
x.center(8 ) ,("".join(lowercase_ )).ljust(lowercase_ ) ,("".join(lowercase_ )).ljust(lowercase_ ) ,sep=" | " ,) # Output in tabular format
while len(lowercase_ ) > 0: # while stack is not empty
post_fix.append(stack.pop() ) # pop stack & add to Postfix
print(
" ".center(8 ) ,("".join(lowercase_ )).ljust(lowercase_ ) ,("".join(lowercase_ )).ljust(lowercase_ ) ,sep=" | " ,) # Output in tabular format
return "".join(lowercase_ ) # return Postfix as str
def lowercase__ ( lowercase_ ) -> List[str]:
"""simple docstring"""
_UpperCamelCase : str = list(infix[::-1] ) # reverse the infix equation
for i in range(len(lowercase_ ) ):
if infix[i] == "(":
_UpperCamelCase : Any = ")" # change "(" to ")"
elif infix[i] == ")":
_UpperCamelCase : Tuple = "(" # change ")" to "("
return (infix_2_postfix("".join(lowercase_ ) ))[
::-1
] # call infix_2_postfix on Infix, return reverse of Postfix
if __name__ == "__main__":
lowerCamelCase__ = input("\nEnter an Infix Equation = ") # Input an Infix equation
lowerCamelCase__ = "".join(Infix.split()) # Remove spaces from the input
print("\n\t", Infix, "(Infix) -> ", infix_2_prefix(Infix), "(Prefix)")
| 51
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"hustvl/yolos-small": "https://huggingface.co/hustvl/yolos-small/resolve/main/config.json",
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Tuple = "yolos"
def __init__( self : Dict , __a : Optional[Any]=768 , __a : List[Any]=12 , __a : Any=12 , __a : List[Any]=3072 , __a : Optional[int]="gelu" , __a : Dict=0.0 , __a : Optional[Any]=0.0 , __a : Any=0.02 , __a : Optional[int]=1e-1_2 , __a : List[Any]=[512, 864] , __a : List[str]=16 , __a : str=3 , __a : Optional[Any]=True , __a : Optional[Any]=100 , __a : List[str]=True , __a : Any=False , __a : List[str]=1 , __a : str=5 , __a : Optional[Any]=2 , __a : Tuple=5 , __a : Any=2 , __a : Union[str, Any]=0.1 , **__a : List[str] , ) -> List[str]:
super().__init__(**__a )
_UpperCamelCase : Dict = hidden_size
_UpperCamelCase : Any = num_hidden_layers
_UpperCamelCase : str = num_attention_heads
_UpperCamelCase : Dict = intermediate_size
_UpperCamelCase : List[str] = hidden_act
_UpperCamelCase : List[str] = hidden_dropout_prob
_UpperCamelCase : str = attention_probs_dropout_prob
_UpperCamelCase : Tuple = initializer_range
_UpperCamelCase : List[str] = layer_norm_eps
_UpperCamelCase : Tuple = image_size
_UpperCamelCase : Tuple = patch_size
_UpperCamelCase : Dict = num_channels
_UpperCamelCase : Any = qkv_bias
_UpperCamelCase : str = num_detection_tokens
_UpperCamelCase : str = use_mid_position_embeddings
_UpperCamelCase : List[str] = auxiliary_loss
# Hungarian matcher
_UpperCamelCase : List[Any] = class_cost
_UpperCamelCase : int = bbox_cost
_UpperCamelCase : Optional[int] = giou_cost
# Loss coefficients
_UpperCamelCase : List[Any] = bbox_loss_coefficient
_UpperCamelCase : str = giou_loss_coefficient
_UpperCamelCase : Dict = eos_coefficient
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :List[str] = version.parse("1.11" )
@property
def __SCREAMING_SNAKE_CASE ( self : str ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> float:
return 1e-4
@property
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> int:
return 12
| 51
| 1
|
"""simple docstring"""
import os
import zipfile
import requests
from get_ci_error_statistics import download_artifact, get_artifacts_links
def lowercase__ ( lowercase_ ,lowercase_=7 ) -> Tuple:
"""simple docstring"""
_UpperCamelCase : Optional[int] = None
if token is not None:
_UpperCamelCase : Optional[Any] = {"Accept": "application/vnd.github+json", "Authorization": F'''Bearer {token}'''}
# The id of a workflow (not of a workflow run)
_UpperCamelCase : Any = "636036"
_UpperCamelCase : Tuple = F'''https://api.github.com/repos/huggingface/transformers/actions/workflows/{workflow_id}/runs'''
# On `main` branch + event being `schedule` + not returning PRs + only `num_runs` results
url += F'''?branch=main&event=schedule&exclude_pull_requests=true&per_page={num_runs}'''
_UpperCamelCase : Dict = requests.get(lowercase_ ,headers=lowercase_ ).json()
return result["workflow_runs"]
def lowercase__ ( lowercase_ ) -> List[str]:
"""simple docstring"""
_UpperCamelCase : List[Any] = get_daily_ci_runs(lowercase_ )
_UpperCamelCase : Tuple = None
for workflow_run in workflow_runs:
if workflow_run["status"] == "completed":
_UpperCamelCase : Union[str, Any] = workflow_run["id"]
break
return workflow_run_id
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase : str = get_last_daily_ci_runs(lowercase_ )
if workflow_run_id is not None:
_UpperCamelCase : int = get_artifacts_links(worflow_run_id=lowercase_ ,token=lowercase_ )
for artifact_name in artifact_names:
if artifact_name in artifacts_links:
_UpperCamelCase : Dict = artifacts_links[artifact_name]
download_artifact(
artifact_name=lowercase_ ,artifact_url=lowercase_ ,output_dir=lowercase_ ,token=lowercase_ )
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ) -> int:
"""simple docstring"""
get_last_daily_ci_artifacts(lowercase_ ,lowercase_ ,lowercase_ )
_UpperCamelCase : Dict = {}
for artifact_name in artifact_names:
_UpperCamelCase : Union[str, Any] = os.path.join(lowercase_ ,F'''{artifact_name}.zip''' )
if os.path.isfile(lowercase_ ):
_UpperCamelCase : int = {}
with zipfile.ZipFile(lowercase_ ) as z:
for filename in z.namelist():
if not os.path.isdir(lowercase_ ):
# read the file
with z.open(lowercase_ ) as f:
_UpperCamelCase : int = f.read().decode("UTF-8" )
return results
| 51
|
"""simple docstring"""
from __future__ import annotations
import string
from itertools import cycle, product
from pathlib import Path
lowerCamelCase__ = (
string.ascii_letters + string.digits + string.punctuation + string.whitespace
)
lowerCamelCase__ = [ord(letter) for letter in string.ascii_lowercase]
lowerCamelCase__ = {ord(char) for char in VALID_CHARS}
lowerCamelCase__ = ["the", "be", "to", "of", "and", "in", "that", "have"]
def lowercase__ ( lowercase_ ,lowercase_ ) -> str | None:
"""simple docstring"""
_UpperCamelCase : str = ""
_UpperCamelCase : int
_UpperCamelCase : int
_UpperCamelCase : int
for keychar, cipherchar in zip(cycle(lowercase_ ) ,lowercase_ ):
_UpperCamelCase : Dict = cipherchar ^ keychar
if decodedchar not in VALID_INTS:
return None
decoded += chr(lowercase_ )
return decoded
def lowercase__ ( lowercase_ ) -> list[str]:
"""simple docstring"""
_UpperCamelCase : list[str] = []
for key in product(lowercase_ ,repeat=3 ):
_UpperCamelCase : int = try_key(lowercase_ ,lowercase_ )
if encoded is not None:
possibles.append(lowercase_ )
return possibles
def lowercase__ ( lowercase_ ,lowercase_ ) -> list[str]:
"""simple docstring"""
return [possible for possible in possibles if common_word in possible.lower()]
def lowercase__ ( lowercase_ = "p059_cipher.txt" ) -> int:
"""simple docstring"""
_UpperCamelCase : list[int]
_UpperCamelCase : list[str]
_UpperCamelCase : str
_UpperCamelCase : str
_UpperCamelCase : str = Path(lowercase_ ).parent.joinpath(lowercase_ ).read_text(encoding="utf-8" )
_UpperCamelCase : Optional[Any] = [int(lowercase_ ) for number in data.strip().split("," )]
_UpperCamelCase : List[str] = filter_valid_chars(lowercase_ )
for common_word in COMMON_WORDS:
_UpperCamelCase : Union[str, Any] = filter_common_word(lowercase_ ,lowercase_ )
if len(lowercase_ ) == 1:
break
_UpperCamelCase : Union[str, Any] = possibles[0]
return sum(ord(lowercase_ ) for char in decoded_text )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 51
| 1
|
"""simple docstring"""
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ) -> float:
"""simple docstring"""
_UpperCamelCase : int = (num_of_terms / 2) * (2 * first_term + (num_of_terms - 1) * common_diff)
# formula for sum of series
return total
def lowercase__ ( ) -> Any:
"""simple docstring"""
print(sum_of_series(1 ,1 ,10 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 51
|
"""simple docstring"""
def lowercase__ ( lowercase_ ,lowercase_ ) -> None:
"""simple docstring"""
_UpperCamelCase : List[Any] = len(lowercase_ )
print("The following activities are selected:" )
# The first activity is always selected
_UpperCamelCase : List[Any] = 0
print(lowercase_ ,end="," )
# Consider rest of the activities
for j in range(lowercase_ ):
# If this activity has start time greater than
# or equal to the finish time of previously
# selected activity, then select it
if start[j] >= finish[i]:
print(lowercase_ ,end="," )
_UpperCamelCase : Optional[Any] = j
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase__ = [1, 3, 0, 5, 8, 5]
lowerCamelCase__ = [2, 4, 6, 7, 9, 9]
print_max_activities(start, finish)
| 51
| 1
|
"""simple docstring"""
from __future__ import annotations
from typing import Any
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
pass
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : int , __a : Any ) -> None:
_UpperCamelCase : Any = data
_UpperCamelCase : Node | None = None
def __iter__( self : Union[str, Any] ) -> int:
_UpperCamelCase : Any = self
_UpperCamelCase : Tuple = []
while node:
if node in visited:
raise ContainsLoopError
visited.append(__a )
yield node.data
_UpperCamelCase : List[Any] = node.next_node
@property
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> bool:
try:
list(self )
return False
except ContainsLoopError:
return True
if __name__ == "__main__":
lowerCamelCase__ = Node(1)
lowerCamelCase__ = Node(2)
lowerCamelCase__ = Node(3)
lowerCamelCase__ = Node(4)
print(root_node.has_loop) # False
lowerCamelCase__ = root_node.next_node
print(root_node.has_loop) # True
lowerCamelCase__ = Node(5)
lowerCamelCase__ = Node(6)
lowerCamelCase__ = Node(5)
lowerCamelCase__ = Node(6)
print(root_node.has_loop) # False
lowerCamelCase__ = Node(1)
print(root_node.has_loop) # False
| 51
|
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from ..utils import BaseOutput, is_torch_version, randn_tensor
from .attention_processor import SpatialNorm
from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block
@dataclass
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :torch.FloatTensor
class __SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self : Dict , __a : Dict=3 , __a : Any=3 , __a : Union[str, Any]=("DownEncoderBlock2D",) , __a : Optional[int]=(64,) , __a : int=2 , __a : Tuple=32 , __a : int="silu" , __a : str=True , ) -> Dict:
super().__init__()
_UpperCamelCase : List[str] = layers_per_block
_UpperCamelCase : Dict = torch.nn.Convad(
__a , block_out_channels[0] , kernel_size=3 , stride=1 , padding=1 , )
_UpperCamelCase : int = None
_UpperCamelCase : Any = nn.ModuleList([] )
# down
_UpperCamelCase : List[str] = block_out_channels[0]
for i, down_block_type in enumerate(__a ):
_UpperCamelCase : Tuple = output_channel
_UpperCamelCase : int = block_out_channels[i]
_UpperCamelCase : int = i == len(__a ) - 1
_UpperCamelCase : Dict = get_down_block(
__a , num_layers=self.layers_per_block , in_channels=__a , out_channels=__a , add_downsample=not is_final_block , resnet_eps=1e-6 , downsample_padding=0 , resnet_act_fn=__a , resnet_groups=__a , attention_head_dim=__a , temb_channels=__a , )
self.down_blocks.append(__a )
# mid
_UpperCamelCase : Union[str, Any] = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=__a , output_scale_factor=1 , resnet_time_scale_shift="default" , attention_head_dim=block_out_channels[-1] , resnet_groups=__a , temb_channels=__a , )
# out
_UpperCamelCase : Any = nn.GroupNorm(num_channels=block_out_channels[-1] , num_groups=__a , eps=1e-6 )
_UpperCamelCase : Any = nn.SiLU()
_UpperCamelCase : Union[str, Any] = 2 * out_channels if double_z else out_channels
_UpperCamelCase : Tuple = nn.Convad(block_out_channels[-1] , __a , 3 , padding=1 )
_UpperCamelCase : Optional[int] = False
def __SCREAMING_SNAKE_CASE ( self : List[str] , __a : Dict ) -> List[str]:
_UpperCamelCase : int = x
_UpperCamelCase : Optional[int] = self.conv_in(__a )
if self.training and self.gradient_checkpointing:
def create_custom_forward(__a : Tuple ):
def custom_forward(*__a : Any ):
return module(*__a )
return custom_forward
# down
if is_torch_version(">=" , "1.11.0" ):
for down_block in self.down_blocks:
_UpperCamelCase : Optional[int] = torch.utils.checkpoint.checkpoint(
create_custom_forward(__a ) , __a , use_reentrant=__a )
# middle
_UpperCamelCase : Tuple = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , __a , use_reentrant=__a )
else:
for down_block in self.down_blocks:
_UpperCamelCase : Any = torch.utils.checkpoint.checkpoint(create_custom_forward(__a ) , __a )
# middle
_UpperCamelCase : int = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) , __a )
else:
# down
for down_block in self.down_blocks:
_UpperCamelCase : int = down_block(__a )
# middle
_UpperCamelCase : int = self.mid_block(__a )
# post-process
_UpperCamelCase : Any = self.conv_norm_out(__a )
_UpperCamelCase : Any = self.conv_act(__a )
_UpperCamelCase : Optional[Any] = self.conv_out(__a )
return sample
class __SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self : Dict , __a : int=3 , __a : Any=3 , __a : str=("UpDecoderBlock2D",) , __a : Optional[int]=(64,) , __a : int=2 , __a : Optional[int]=32 , __a : Tuple="silu" , __a : Union[str, Any]="group" , ) -> str:
super().__init__()
_UpperCamelCase : List[Any] = layers_per_block
_UpperCamelCase : Tuple = nn.Convad(
__a , block_out_channels[-1] , kernel_size=3 , stride=1 , padding=1 , )
_UpperCamelCase : List[str] = None
_UpperCamelCase : Dict = nn.ModuleList([] )
_UpperCamelCase : List[Any] = in_channels if norm_type == "spatial" else None
# mid
_UpperCamelCase : Optional[Any] = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=__a , output_scale_factor=1 , resnet_time_scale_shift="default" if norm_type == "group" else norm_type , attention_head_dim=block_out_channels[-1] , resnet_groups=__a , temb_channels=__a , )
# up
_UpperCamelCase : List[str] = list(reversed(__a ) )
_UpperCamelCase : int = reversed_block_out_channels[0]
for i, up_block_type in enumerate(__a ):
_UpperCamelCase : int = output_channel
_UpperCamelCase : Union[str, Any] = reversed_block_out_channels[i]
_UpperCamelCase : Optional[Any] = i == len(__a ) - 1
_UpperCamelCase : Union[str, Any] = get_up_block(
__a , num_layers=self.layers_per_block + 1 , in_channels=__a , out_channels=__a , prev_output_channel=__a , add_upsample=not is_final_block , resnet_eps=1e-6 , resnet_act_fn=__a , resnet_groups=__a , attention_head_dim=__a , temb_channels=__a , resnet_time_scale_shift=__a , )
self.up_blocks.append(__a )
_UpperCamelCase : Optional[Any] = output_channel
# out
if norm_type == "spatial":
_UpperCamelCase : Optional[int] = SpatialNorm(block_out_channels[0] , __a )
else:
_UpperCamelCase : Optional[int] = nn.GroupNorm(num_channels=block_out_channels[0] , num_groups=__a , eps=1e-6 )
_UpperCamelCase : str = nn.SiLU()
_UpperCamelCase : str = nn.Convad(block_out_channels[0] , __a , 3 , padding=1 )
_UpperCamelCase : Dict = False
def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __a : List[Any] , __a : Union[str, Any]=None ) -> Tuple:
_UpperCamelCase : List[str] = z
_UpperCamelCase : Dict = self.conv_in(__a )
_UpperCamelCase : Any = next(iter(self.up_blocks.parameters() ) ).dtype
if self.training and self.gradient_checkpointing:
def create_custom_forward(__a : Any ):
def custom_forward(*__a : Tuple ):
return module(*__a )
return custom_forward
if is_torch_version(">=" , "1.11.0" ):
# middle
_UpperCamelCase : str = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , __a , __a , use_reentrant=__a )
_UpperCamelCase : Optional[int] = sample.to(__a )
# up
for up_block in self.up_blocks:
_UpperCamelCase : List[Any] = torch.utils.checkpoint.checkpoint(
create_custom_forward(__a ) , __a , __a , use_reentrant=__a )
else:
# middle
_UpperCamelCase : Optional[int] = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , __a , __a )
_UpperCamelCase : Union[str, Any] = sample.to(__a )
# up
for up_block in self.up_blocks:
_UpperCamelCase : str = torch.utils.checkpoint.checkpoint(create_custom_forward(__a ) , __a , __a )
else:
# middle
_UpperCamelCase : str = self.mid_block(__a , __a )
_UpperCamelCase : int = sample.to(__a )
# up
for up_block in self.up_blocks:
_UpperCamelCase : Any = up_block(__a , __a )
# post-process
if latent_embeds is None:
_UpperCamelCase : List[str] = self.conv_norm_out(__a )
else:
_UpperCamelCase : Optional[int] = self.conv_norm_out(__a , __a )
_UpperCamelCase : Tuple = self.conv_act(__a )
_UpperCamelCase : List[Any] = self.conv_out(__a )
return sample
class __SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self : Dict , __a : Tuple , __a : List[str] , __a : List[str] , __a : str=None , __a : Optional[int]="random" , __a : Any=False , __a : Optional[Any]=True ) -> List[Any]:
super().__init__()
_UpperCamelCase : Tuple = n_e
_UpperCamelCase : Tuple = vq_embed_dim
_UpperCamelCase : Union[str, Any] = beta
_UpperCamelCase : str = legacy
_UpperCamelCase : Dict = nn.Embedding(self.n_e , self.vq_embed_dim )
self.embedding.weight.data.uniform_(-1.0 / self.n_e , 1.0 / self.n_e )
_UpperCamelCase : Any = remap
if self.remap is not None:
self.register_buffer("used" , torch.tensor(np.load(self.remap ) ) )
_UpperCamelCase : Dict = self.used.shape[0]
_UpperCamelCase : Optional[int] = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
_UpperCamelCase : Optional[int] = self.re_embed
_UpperCamelCase : Any = self.re_embed + 1
print(
F'''Remapping {self.n_e} indices to {self.re_embed} indices. '''
F'''Using {self.unknown_index} for unknown indices.''' )
else:
_UpperCamelCase : Union[str, Any] = n_e
_UpperCamelCase : List[str] = sane_index_shape
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __a : Optional[Any] ) -> Optional[int]:
_UpperCamelCase : str = inds.shape
assert len(__a ) > 1
_UpperCamelCase : Union[str, Any] = inds.reshape(ishape[0] , -1 )
_UpperCamelCase : Optional[Any] = self.used.to(__a )
_UpperCamelCase : List[str] = (inds[:, :, None] == used[None, None, ...]).long()
_UpperCamelCase : Optional[Any] = match.argmax(-1 )
_UpperCamelCase : Any = match.sum(2 ) < 1
if self.unknown_index == "random":
_UpperCamelCase : Optional[int] = torch.randint(0 , self.re_embed , size=new[unknown].shape ).to(device=new.device )
else:
_UpperCamelCase : Dict = self.unknown_index
return new.reshape(__a )
def __SCREAMING_SNAKE_CASE ( self : Dict , __a : Optional[int] ) -> Optional[int]:
_UpperCamelCase : int = inds.shape
assert len(__a ) > 1
_UpperCamelCase : List[Any] = inds.reshape(ishape[0] , -1 )
_UpperCamelCase : Optional[int] = self.used.to(__a )
if self.re_embed > self.used.shape[0]: # extra token
_UpperCamelCase : int = 0 # simply set to zero
_UpperCamelCase : Union[str, Any] = torch.gather(used[None, :][inds.shape[0] * [0], :] , 1 , __a )
return back.reshape(__a )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __a : str ) -> Optional[int]:
# reshape z -> (batch, height, width, channel) and flatten
_UpperCamelCase : List[str] = z.permute(0 , 2 , 3 , 1 ).contiguous()
_UpperCamelCase : int = z.view(-1 , self.vq_embed_dim )
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
_UpperCamelCase : Optional[int] = torch.argmin(torch.cdist(__a , self.embedding.weight ) , dim=1 )
_UpperCamelCase : int = self.embedding(__a ).view(z.shape )
_UpperCamelCase : str = None
_UpperCamelCase : Any = None
# compute loss for embedding
if not self.legacy:
_UpperCamelCase : List[str] = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 )
else:
_UpperCamelCase : str = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 )
# preserve gradients
_UpperCamelCase : List[str] = z + (z_q - z).detach()
# reshape back to match original input shape
_UpperCamelCase : Optional[Any] = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
if self.remap is not None:
_UpperCamelCase : Tuple = min_encoding_indices.reshape(z.shape[0] , -1 ) # add batch axis
_UpperCamelCase : Dict = self.remap_to_used(__a )
_UpperCamelCase : List[str] = min_encoding_indices.reshape(-1 , 1 ) # flatten
if self.sane_index_shape:
_UpperCamelCase : str = min_encoding_indices.reshape(z_q.shape[0] , z_q.shape[2] , z_q.shape[3] )
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def __SCREAMING_SNAKE_CASE ( self : List[str] , __a : List[str] , __a : str ) -> Any:
# shape specifying (batch, height, width, channel)
if self.remap is not None:
_UpperCamelCase : str = indices.reshape(shape[0] , -1 ) # add batch axis
_UpperCamelCase : str = self.unmap_to_all(__a )
_UpperCamelCase : int = indices.reshape(-1 ) # flatten again
# get quantized latent vectors
_UpperCamelCase : Optional[int] = self.embedding(__a )
if shape is not None:
_UpperCamelCase : Tuple = z_q.view(__a )
# reshape back to match original input shape
_UpperCamelCase : Tuple = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
return z_q
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
def __init__( self : Optional[int] , __a : List[str] , __a : Optional[Any]=False ) -> int:
_UpperCamelCase : Dict = parameters
_UpperCamelCase, _UpperCamelCase : str = torch.chunk(__a , 2 , dim=1 )
_UpperCamelCase : Tuple = torch.clamp(self.logvar , -30.0 , 20.0 )
_UpperCamelCase : Union[str, Any] = deterministic
_UpperCamelCase : Dict = torch.exp(0.5 * self.logvar )
_UpperCamelCase : Any = torch.exp(self.logvar )
if self.deterministic:
_UpperCamelCase : List[Any] = torch.zeros_like(
self.mean , device=self.parameters.device , dtype=self.parameters.dtype )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __a : Optional[torch.Generator] = None ) -> torch.FloatTensor:
# make sure sample is on the same device as the parameters and has same dtype
_UpperCamelCase : List[Any] = randn_tensor(
self.mean.shape , generator=__a , device=self.parameters.device , dtype=self.parameters.dtype )
_UpperCamelCase : List[Any] = self.mean + self.std * sample
return x
def __SCREAMING_SNAKE_CASE ( self : Any , __a : List[str]=None ) -> List[Any]:
if self.deterministic:
return torch.Tensor([0.0] )
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean , 2 ) + self.var - 1.0 - self.logvar , dim=[1, 2, 3] )
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean , 2 ) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar , dim=[1, 2, 3] , )
def __SCREAMING_SNAKE_CASE ( self : str , __a : Tuple , __a : List[str]=[1, 2, 3] ) -> int:
if self.deterministic:
return torch.Tensor([0.0] )
_UpperCamelCase : List[str] = np.log(2.0 * np.pi )
return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean , 2 ) / self.var , dim=__a )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[int]:
return self.mean
| 51
| 1
|
"""simple docstring"""
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ) -> int:
"""simple docstring"""
_UpperCamelCase, _UpperCamelCase : Any = len(lowercase_ ), len(grid[0] )
if (
min(lowercase_ ,lowercase_ ) < 0
or row == row_length
or col == col_length
or (row, col) in visit
or grid[row][col] == 1
):
return 0
if row == row_length - 1 and col == col_length - 1:
return 1
visit.add((row, col) )
_UpperCamelCase : Union[str, Any] = 0
count += depth_first_search(lowercase_ ,row + 1 ,lowercase_ ,lowercase_ )
count += depth_first_search(lowercase_ ,row - 1 ,lowercase_ ,lowercase_ )
count += depth_first_search(lowercase_ ,lowercase_ ,col + 1 ,lowercase_ )
count += depth_first_search(lowercase_ ,lowercase_ ,col - 1 ,lowercase_ )
visit.remove((row, col) )
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 51
|
"""simple docstring"""
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=_UpperCamelCase )
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :str = field(default="summarization" , metadata={"include_in_asdict_even_if_is_default": True} )
SCREAMING_SNAKE_CASE__ :ClassVar[Features] = Features({"text": Value("string" )} )
SCREAMING_SNAKE_CASE__ :ClassVar[Features] = Features({"summary": Value("string" )} )
SCREAMING_SNAKE_CASE__ :str = "text"
SCREAMING_SNAKE_CASE__ :str = "summary"
@property
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Dict[str, str]:
return {self.text_column: "text", self.summary_column: "summary"}
| 51
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowerCamelCase__ = {
"configuration_convnext": ["CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP", "ConvNextConfig", "ConvNextOnnxConfig"]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["ConvNextFeatureExtractor"]
lowerCamelCase__ = ["ConvNextImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST",
"ConvNextForImageClassification",
"ConvNextModel",
"ConvNextPreTrainedModel",
"ConvNextBackbone",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"TFConvNextForImageClassification",
"TFConvNextModel",
"TFConvNextPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_convnext import CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvNextConfig, ConvNextOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_convnext import ConvNextFeatureExtractor
from .image_processing_convnext import ConvNextImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convnext import (
CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvNextBackbone,
ConvNextForImageClassification,
ConvNextModel,
ConvNextPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convnext import TFConvNextForImageClassification, TFConvNextModel, TFConvNextPreTrainedModel
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 51
|
"""simple docstring"""
def lowercase__ ( lowercase_ ) -> set:
"""simple docstring"""
_UpperCamelCase : Union[str, Any] = set()
# edges = list of graph's edges
_UpperCamelCase : Union[str, Any] = get_edges(lowercase_ )
# While there are still elements in edges list, take an arbitrary edge
# (from_node, to_node) and add his extremity to chosen_vertices and then
# remove all arcs adjacent to the from_node and to_node
while edges:
_UpperCamelCase, _UpperCamelCase : str = edges.pop()
chosen_vertices.add(lowercase_ )
chosen_vertices.add(lowercase_ )
for edge in edges.copy():
if from_node in edge or to_node in edge:
edges.discard(lowercase_ )
return chosen_vertices
def lowercase__ ( lowercase_ ) -> set:
"""simple docstring"""
_UpperCamelCase : List[str] = set()
for from_node, to_nodes in graph.items():
for to_node in to_nodes:
edges.add((from_node, to_node) )
return edges
if __name__ == "__main__":
import doctest
doctest.testmod()
# graph = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
# print(f"Matching vertex cover:\n{matching_min_vertex_cover(graph)}")
| 51
| 1
|
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import convert_to_rgb, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowerCamelCase__ = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :str = ["pixel_values"]
def __init__( self : int , __a : bool = True , __a : Dict[str, int] = None , __a : PILImageResampling = PILImageResampling.BICUBIC , __a : bool = True , __a : Union[int, float] = 1 / 255 , __a : bool = True , __a : Optional[Union[float, List[float]]] = None , __a : Optional[Union[float, List[float]]] = None , __a : bool = True , **__a : str , ) -> None:
super().__init__(**__a )
_UpperCamelCase : Optional[Any] = size if size is not None else {"height": 384, "width": 384}
_UpperCamelCase : str = get_size_dict(__a , default_to_square=__a )
_UpperCamelCase : Optional[Any] = do_resize
_UpperCamelCase : List[str] = size
_UpperCamelCase : Dict = resample
_UpperCamelCase : List[str] = do_rescale
_UpperCamelCase : str = rescale_factor
_UpperCamelCase : Tuple = do_normalize
_UpperCamelCase : Any = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
_UpperCamelCase : str = image_std if image_std is not None else OPENAI_CLIP_STD
_UpperCamelCase : str = do_convert_rgb
def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __a : np.ndarray , __a : Dict[str, int] , __a : PILImageResampling = PILImageResampling.BICUBIC , __a : Optional[Union[str, ChannelDimension]] = None , **__a : List[str] , ) -> np.ndarray:
_UpperCamelCase : int = get_size_dict(__a , default_to_square=__a )
if "height" not in size or "width" not in size:
raise ValueError(F'''The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}''' )
_UpperCamelCase : str = (size["height"], size["width"])
return resize(__a , size=__a , resample=__a , data_format=__a , **__a )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __a : np.ndarray , __a : Union[int, float] , __a : Optional[Union[str, ChannelDimension]] = None , **__a : List[Any] , ) -> List[str]:
return rescale(__a , scale=__a , data_format=__a , **__a )
def __SCREAMING_SNAKE_CASE ( self : List[str] , __a : np.ndarray , __a : Union[float, List[float]] , __a : Union[float, List[float]] , __a : Optional[Union[str, ChannelDimension]] = None , **__a : Tuple , ) -> np.ndarray:
return normalize(__a , mean=__a , std=__a , data_format=__a , **__a )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __a : ImageInput , __a : Optional[bool] = None , __a : Optional[Dict[str, int]] = None , __a : PILImageResampling = None , __a : Optional[bool] = None , __a : Optional[float] = None , __a : Optional[bool] = None , __a : Optional[Union[float, List[float]]] = None , __a : Optional[Union[float, List[float]]] = None , __a : Optional[Union[str, TensorType]] = None , __a : bool = None , __a : ChannelDimension = ChannelDimension.FIRST , **__a : Tuple , ) -> PIL.Image.Image:
_UpperCamelCase : Optional[int] = do_resize if do_resize is not None else self.do_resize
_UpperCamelCase : Dict = resample if resample is not None else self.resample
_UpperCamelCase : Dict = do_rescale if do_rescale is not None else self.do_rescale
_UpperCamelCase : Optional[int] = rescale_factor if rescale_factor is not None else self.rescale_factor
_UpperCamelCase : Union[str, Any] = do_normalize if do_normalize is not None else self.do_normalize
_UpperCamelCase : int = image_mean if image_mean is not None else self.image_mean
_UpperCamelCase : List[Any] = image_std if image_std is not None else self.image_std
_UpperCamelCase : str = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
_UpperCamelCase : Union[str, Any] = size if size is not None else self.size
_UpperCamelCase : Union[str, Any] = get_size_dict(__a , default_to_square=__a )
_UpperCamelCase : List[Any] = make_list_of_images(__a )
if not valid_images(__a ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
_UpperCamelCase : List[Any] = [convert_to_rgb(__a ) for image in images]
# All transformations expect numpy arrays.
_UpperCamelCase : List[str] = [to_numpy_array(__a ) for image in images]
if do_resize:
_UpperCamelCase : List[str] = [self.resize(image=__a , size=__a , resample=__a ) for image in images]
if do_rescale:
_UpperCamelCase : int = [self.rescale(image=__a , scale=__a ) for image in images]
if do_normalize:
_UpperCamelCase : int = [self.normalize(image=__a , mean=__a , std=__a ) for image in images]
_UpperCamelCase : Union[str, Any] = [to_channel_dimension_format(__a , __a ) for image in images]
_UpperCamelCase : Union[str, Any] = BatchFeature(data={"pixel_values": images} , tensor_type=__a )
return encoded_outputs
| 51
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
lowerCamelCase__ = {
"configuration_owlvit": [
"OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"OwlViTConfig",
"OwlViTOnnxConfig",
"OwlViTTextConfig",
"OwlViTVisionConfig",
],
"processing_owlvit": ["OwlViTProcessor"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["OwlViTFeatureExtractor"]
lowerCamelCase__ = ["OwlViTImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"OwlViTModel",
"OwlViTPreTrainedModel",
"OwlViTTextModel",
"OwlViTVisionModel",
"OwlViTForObjectDetection",
]
if TYPE_CHECKING:
from .configuration_owlvit import (
OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OwlViTConfig,
OwlViTOnnxConfig,
OwlViTTextConfig,
OwlViTVisionConfig,
)
from .processing_owlvit import OwlViTProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_owlvit import OwlViTFeatureExtractor
from .image_processing_owlvit import OwlViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_owlvit import (
OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
OwlViTForObjectDetection,
OwlViTModel,
OwlViTPreTrainedModel,
OwlViTTextModel,
OwlViTVisionModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 51
| 1
|
"""simple docstring"""
import os
import tempfile
import unittest
from transformers import DistilBertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
)
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
def __init__( self : List[str] , __a : str , __a : Union[str, Any]=13 , __a : int=7 , __a : Optional[int]=True , __a : List[str]=True , __a : List[str]=False , __a : Optional[int]=True , __a : List[Any]=99 , __a : Union[str, Any]=32 , __a : Optional[int]=5 , __a : Union[str, Any]=4 , __a : List[str]=37 , __a : Optional[int]="gelu" , __a : Any=0.1 , __a : List[Any]=0.1 , __a : Union[str, Any]=512 , __a : Tuple=16 , __a : Union[str, Any]=2 , __a : List[Any]=0.02 , __a : Tuple=3 , __a : Dict=4 , __a : List[Any]=None , ) -> Any:
_UpperCamelCase : List[Any] = parent
_UpperCamelCase : Tuple = batch_size
_UpperCamelCase : Optional[int] = seq_length
_UpperCamelCase : Optional[Any] = is_training
_UpperCamelCase : int = use_input_mask
_UpperCamelCase : Any = use_token_type_ids
_UpperCamelCase : Any = use_labels
_UpperCamelCase : Any = vocab_size
_UpperCamelCase : Optional[Any] = hidden_size
_UpperCamelCase : List[Any] = num_hidden_layers
_UpperCamelCase : Any = num_attention_heads
_UpperCamelCase : List[str] = intermediate_size
_UpperCamelCase : Any = hidden_act
_UpperCamelCase : Dict = hidden_dropout_prob
_UpperCamelCase : str = attention_probs_dropout_prob
_UpperCamelCase : List[str] = max_position_embeddings
_UpperCamelCase : Optional[int] = type_vocab_size
_UpperCamelCase : Dict = type_sequence_label_size
_UpperCamelCase : Optional[Any] = initializer_range
_UpperCamelCase : Union[str, Any] = num_labels
_UpperCamelCase : Optional[Any] = num_choices
_UpperCamelCase : int = scope
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Tuple:
_UpperCamelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCamelCase : Union[str, Any] = None
if self.use_input_mask:
_UpperCamelCase : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCamelCase : Any = None
_UpperCamelCase : List[Any] = None
_UpperCamelCase : Tuple = None
if self.use_labels:
_UpperCamelCase : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCamelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_UpperCamelCase : List[str] = ids_tensor([self.batch_size] , self.num_choices )
_UpperCamelCase : Optional[int] = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> int:
return DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __a : Any , __a : str , __a : int , __a : List[Any] , __a : int , __a : Any ) -> str:
_UpperCamelCase : Any = DistilBertModel(config=__a )
model.to(__a )
model.eval()
_UpperCamelCase : Dict = model(__a , __a )
_UpperCamelCase : str = model(__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __SCREAMING_SNAKE_CASE ( self : Tuple , __a : int , __a : Optional[Any] , __a : List[Any] , __a : List[str] , __a : Optional[Any] , __a : Any ) -> int:
_UpperCamelCase : List[Any] = DistilBertForMaskedLM(config=__a )
model.to(__a )
model.eval()
_UpperCamelCase : Optional[Any] = model(__a , attention_mask=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __a : Tuple , __a : Tuple , __a : int , __a : Dict , __a : Tuple , __a : Optional[Any] ) -> Optional[Any]:
_UpperCamelCase : Union[str, Any] = DistilBertForQuestionAnswering(config=__a )
model.to(__a )
model.eval()
_UpperCamelCase : Union[str, Any] = model(
__a , attention_mask=__a , start_positions=__a , end_positions=__a )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __a : int , __a : Any , __a : Optional[int] , __a : Optional[int] , __a : int , __a : List[str] ) -> Optional[int]:
_UpperCamelCase : str = self.num_labels
_UpperCamelCase : List[str] = DistilBertForSequenceClassification(__a )
model.to(__a )
model.eval()
_UpperCamelCase : List[Any] = model(__a , attention_mask=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __a : Optional[int] , __a : Any , __a : str , __a : Optional[Any] , __a : Tuple , __a : Dict ) -> str:
_UpperCamelCase : Optional[int] = self.num_labels
_UpperCamelCase : Dict = DistilBertForTokenClassification(config=__a )
model.to(__a )
model.eval()
_UpperCamelCase : Optional[int] = model(__a , attention_mask=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __SCREAMING_SNAKE_CASE ( self : Dict , __a : List[Any] , __a : int , __a : Dict , __a : str , __a : Union[str, Any] , __a : Union[str, Any] ) -> Optional[int]:
_UpperCamelCase : Tuple = self.num_choices
_UpperCamelCase : List[Any] = DistilBertForMultipleChoice(config=__a )
model.to(__a )
model.eval()
_UpperCamelCase : Tuple = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_UpperCamelCase : str = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_UpperCamelCase : int = model(
__a , attention_mask=__a , labels=__a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Union[str, Any]:
_UpperCamelCase : Any = self.prepare_config_and_inputs()
((_UpperCamelCase), (_UpperCamelCase), (_UpperCamelCase), (_UpperCamelCase), (_UpperCamelCase), (_UpperCamelCase)) : List[str] = config_and_inputs
_UpperCamelCase : Tuple = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Any = (
(
DistilBertModel,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
)
if is_torch_available()
else None
)
SCREAMING_SNAKE_CASE__ :Dict = (
{
"feature-extraction": DistilBertModel,
"fill-mask": DistilBertForMaskedLM,
"question-answering": DistilBertForQuestionAnswering,
"text-classification": DistilBertForSequenceClassification,
"token-classification": DistilBertForTokenClassification,
"zero-shot": DistilBertForSequenceClassification,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE__ :List[str] = True
SCREAMING_SNAKE_CASE__ :Optional[int] = True
SCREAMING_SNAKE_CASE__ :Optional[Any] = True
SCREAMING_SNAKE_CASE__ :str = True
def __SCREAMING_SNAKE_CASE ( self : Any ) -> Tuple:
_UpperCamelCase : Tuple = DistilBertModelTester(self )
_UpperCamelCase : Optional[Any] = ConfigTester(self , config_class=__a , dim=37 )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Dict:
self.config_tester.run_common_tests()
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Tuple:
_UpperCamelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*__a )
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[int]:
_UpperCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*__a )
def __SCREAMING_SNAKE_CASE ( self : str ) -> List[Any]:
_UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*__a )
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[str]:
_UpperCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*__a )
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Any:
_UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*__a )
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> Dict:
_UpperCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*__a )
@slow
def __SCREAMING_SNAKE_CASE ( self : str ) -> str:
for model_name in DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase : Optional[int] = DistilBertModel.from_pretrained(__a )
self.assertIsNotNone(__a )
@slow
@require_torch_gpu
def __SCREAMING_SNAKE_CASE ( self : str ) -> Tuple:
_UpperCamelCase, _UpperCamelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# BertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == DistilBertForMultipleChoice:
return
_UpperCamelCase : Tuple = True
_UpperCamelCase : Tuple = model_class(config=__a )
_UpperCamelCase : int = self._prepare_for_class(__a , __a )
_UpperCamelCase : str = torch.jit.trace(
__a , (inputs_dict["input_ids"].to("cpu" ), inputs_dict["attention_mask"].to("cpu" )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(__a , os.path.join(__a , "traced_model.pt" ) )
_UpperCamelCase : Optional[int] = torch.jit.load(os.path.join(__a , "traced_model.pt" ) , map_location=__a )
loaded(inputs_dict["input_ids"].to(__a ) , inputs_dict["attention_mask"].to(__a ) )
@require_torch
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@slow
def __SCREAMING_SNAKE_CASE ( self : str ) -> Union[str, Any]:
_UpperCamelCase : List[Any] = DistilBertModel.from_pretrained("distilbert-base-uncased" )
_UpperCamelCase : int = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
_UpperCamelCase : Any = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
_UpperCamelCase : Any = model(__a , attention_mask=__a )[0]
_UpperCamelCase : str = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , __a )
_UpperCamelCase : Optional[int] = torch.tensor(
[[[-0.16_39, 0.32_99, 0.16_48], [-0.17_46, 0.32_89, 0.17_10], [-0.18_84, 0.33_57, 0.18_10]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __a , atol=1e-4 ) )
| 51
|
"""simple docstring"""
from collections import defaultdict
from math import ceil, sqrt
def lowercase__ ( lowercase_ = 1_000_000 ,lowercase_ = 10 ) -> int:
"""simple docstring"""
_UpperCamelCase : defaultdict = defaultdict(lowercase_ )
for outer_width in range(3 ,(t_limit // 4) + 2 ):
if outer_width * outer_width > t_limit:
_UpperCamelCase : Any = max(
ceil(sqrt(outer_width * outer_width - t_limit ) ) ,1 )
else:
_UpperCamelCase : str = 1
hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2
for hole_width in range(lowercase_ ,outer_width - 1 ,2 ):
count[outer_width * outer_width - hole_width * hole_width] += 1
return sum(1 for n in count.values() if 1 <= n <= 10 )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 51
| 1
|
"""simple docstring"""
import unittest
import numpy as np
import torch
from diffusers import DDIMPipeline, DDIMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow, torch_device
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Optional[int] = DDIMPipeline
SCREAMING_SNAKE_CASE__ :Optional[int] = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
SCREAMING_SNAKE_CASE__ :List[str] = PipelineTesterMixin.required_optional_params - {
"num_images_per_prompt",
"latents",
"callback",
"callback_steps",
}
SCREAMING_SNAKE_CASE__ :Union[str, Any] = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
SCREAMING_SNAKE_CASE__ :List[Any] = False
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Union[str, Any]:
torch.manual_seed(0 )
_UpperCamelCase : List[Any] = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("DownBlock2D", "AttnDownBlock2D") , up_block_types=("AttnUpBlock2D", "UpBlock2D") , )
_UpperCamelCase : List[Any] = DDIMScheduler()
_UpperCamelCase : Any = {"unet": unet, "scheduler": scheduler}
return components
def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __a : Union[str, Any] , __a : int=0 ) -> Dict:
if str(__a ).startswith("mps" ):
_UpperCamelCase : List[str] = torch.manual_seed(__a )
else:
_UpperCamelCase : Tuple = torch.Generator(device=__a ).manual_seed(__a )
_UpperCamelCase : Tuple = {
"batch_size": 1,
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Union[str, Any]:
_UpperCamelCase : Union[str, Any] = "cpu"
_UpperCamelCase : Any = self.get_dummy_components()
_UpperCamelCase : Any = self.pipeline_class(**__a )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
_UpperCamelCase : List[str] = self.get_dummy_inputs(__a )
_UpperCamelCase : Tuple = pipe(**__a ).images
_UpperCamelCase : Union[str, Any] = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 32, 32, 3) )
_UpperCamelCase : List[Any] = np.array(
[1.0_0_0e0_0, 5.7_1_7e-0_1, 4.7_1_7e-0_1, 1.0_0_0e0_0, 0.0_0_0e0_0, 1.0_0_0e0_0, 3.0_0_0e-0_4, 0.0_0_0e0_0, 9.0_0_0e-0_4] )
_UpperCamelCase : Dict = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(__a , 1e-3 )
def __SCREAMING_SNAKE_CASE ( self : int ) -> str:
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
def __SCREAMING_SNAKE_CASE ( self : Any ) -> List[Any]:
super().test_save_load_local(expected_max_difference=3e-3 )
def __SCREAMING_SNAKE_CASE ( self : Any ) -> Dict:
super().test_save_load_optional_components(expected_max_difference=3e-3 )
def __SCREAMING_SNAKE_CASE ( self : int ) -> List[str]:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Tuple:
_UpperCamelCase : List[Any] = "google/ddpm-cifar10-32"
_UpperCamelCase : Tuple = UNetaDModel.from_pretrained(__a )
_UpperCamelCase : str = DDIMScheduler()
_UpperCamelCase : Union[str, Any] = DDIMPipeline(unet=__a , scheduler=__a )
ddim.to(__a )
ddim.set_progress_bar_config(disable=__a )
_UpperCamelCase : Dict = torch.manual_seed(0 )
_UpperCamelCase : Union[str, Any] = ddim(generator=__a , eta=0.0 , output_type="numpy" ).images
_UpperCamelCase : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_UpperCamelCase : Any = np.array([0.17_23, 0.16_17, 0.16_00, 0.16_26, 0.14_97, 0.15_13, 0.15_05, 0.14_42, 0.14_53] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[Any]:
_UpperCamelCase : Any = "google/ddpm-ema-bedroom-256"
_UpperCamelCase : Optional[int] = UNetaDModel.from_pretrained(__a )
_UpperCamelCase : Optional[Any] = DDIMScheduler.from_pretrained(__a )
_UpperCamelCase : int = DDIMPipeline(unet=__a , scheduler=__a )
ddpm.to(__a )
ddpm.set_progress_bar_config(disable=__a )
_UpperCamelCase : int = torch.manual_seed(0 )
_UpperCamelCase : Tuple = ddpm(generator=__a , output_type="numpy" ).images
_UpperCamelCase : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
_UpperCamelCase : int = np.array([0.00_60, 0.02_01, 0.03_44, 0.00_24, 0.00_18, 0.00_02, 0.00_22, 0.00_00, 0.00_69] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 51
|
"""simple docstring"""
from collections.abc import Iterator, MutableMapping
from dataclasses import dataclass
from typing import Generic, TypeVar
lowerCamelCase__ = TypeVar("KEY")
lowerCamelCase__ = TypeVar("VAL")
@dataclass(frozen=_UpperCamelCase , slots=_UpperCamelCase )
class __SCREAMING_SNAKE_CASE ( Generic[KEY, VAL] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :KEY
SCREAMING_SNAKE_CASE__ :VAL
class __SCREAMING_SNAKE_CASE ( _Item ):
'''simple docstring'''
def __init__( self : List[str] ) -> None:
super().__init__(__a , __a )
def __bool__( self : Dict ) -> bool:
return False
lowerCamelCase__ = _DeletedItem()
class __SCREAMING_SNAKE_CASE ( MutableMapping[KEY, VAL] ):
'''simple docstring'''
def __init__( self : int , __a : int = 8 , __a : float = 0.75 ) -> None:
_UpperCamelCase : str = initial_block_size
_UpperCamelCase : list[_Item | None] = [None] * initial_block_size
assert 0.0 < capacity_factor < 1.0
_UpperCamelCase : List[str] = capacity_factor
_UpperCamelCase : Dict = 0
def __SCREAMING_SNAKE_CASE ( self : int , __a : KEY ) -> int:
return hash(__a ) % len(self._buckets )
def __SCREAMING_SNAKE_CASE ( self : List[Any] , __a : int ) -> int:
return (ind + 1) % len(self._buckets )
def __SCREAMING_SNAKE_CASE ( self : Tuple , __a : int , __a : KEY , __a : VAL ) -> bool:
_UpperCamelCase : List[Any] = self._buckets[ind]
if not stored:
_UpperCamelCase : Tuple = _Item(__a , __a )
self._len += 1
return True
elif stored.key == key:
_UpperCamelCase : Union[str, Any] = _Item(__a , __a )
return True
else:
return False
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> bool:
_UpperCamelCase : Any = len(self._buckets ) * self._capacity_factor
return len(self ) >= int(__a )
def __SCREAMING_SNAKE_CASE ( self : str ) -> bool:
if len(self._buckets ) <= self._initial_block_size:
return False
_UpperCamelCase : List[str] = len(self._buckets ) * self._capacity_factor / 2
return len(self ) < limit
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __a : int ) -> None:
_UpperCamelCase : Any = self._buckets
_UpperCamelCase : List[Any] = [None] * new_size
_UpperCamelCase : List[str] = 0
for item in old_buckets:
if item:
self._add_item(item.key , item.val )
def __SCREAMING_SNAKE_CASE ( self : int ) -> None:
self._resize(len(self._buckets ) * 2 )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> None:
self._resize(len(self._buckets ) // 2 )
def __SCREAMING_SNAKE_CASE ( self : List[Any] , __a : KEY ) -> Iterator[int]:
_UpperCamelCase : str = self._get_bucket_index(__a )
for _ in range(len(self._buckets ) ):
yield ind
_UpperCamelCase : Tuple = self._get_next_ind(__a )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __a : KEY , __a : VAL ) -> None:
for ind in self._iterate_buckets(__a ):
if self._try_set(__a , __a , __a ):
break
def __setitem__( self : int , __a : KEY , __a : VAL ) -> None:
if self._is_full():
self._size_up()
self._add_item(__a , __a )
def __delitem__( self : str , __a : KEY ) -> None:
for ind in self._iterate_buckets(__a ):
_UpperCamelCase : Tuple = self._buckets[ind]
if item is None:
raise KeyError(__a )
if item is _deleted:
continue
if item.key == key:
_UpperCamelCase : List[Any] = _deleted
self._len -= 1
break
if self._is_sparse():
self._size_down()
def __getitem__( self : str , __a : KEY ) -> VAL:
for ind in self._iterate_buckets(__a ):
_UpperCamelCase : Tuple = self._buckets[ind]
if item is None:
break
if item is _deleted:
continue
if item.key == key:
return item.val
raise KeyError(__a )
def __len__( self : List[Any] ) -> int:
return self._len
def __iter__( self : List[str] ) -> Iterator[KEY]:
yield from (item.key for item in self._buckets if item)
def __repr__( self : List[str] ) -> str:
_UpperCamelCase : Optional[int] = " ,".join(
F'''{item.key}: {item.val}''' for item in self._buckets if item )
return F'''HashMap({val_string})'''
| 51
| 1
|
"""simple docstring"""
from __future__ import annotations
def lowercase__ ( lowercase_ = 4 ) -> list[list[int]]:
"""simple docstring"""
_UpperCamelCase : Any = abs(lowercase_ ) or 4
return [[1 + x + y * row_size for x in range(lowercase_ )] for y in range(lowercase_ )]
def lowercase__ ( lowercase_ ) -> list[list[int]]:
"""simple docstring"""
return reverse_row(transpose(lowercase_ ) )
# OR.. transpose(reverse_column(matrix))
def lowercase__ ( lowercase_ ) -> list[list[int]]:
"""simple docstring"""
return reverse_row(reverse_column(lowercase_ ) )
# OR.. reverse_column(reverse_row(matrix))
def lowercase__ ( lowercase_ ) -> list[list[int]]:
"""simple docstring"""
return reverse_column(transpose(lowercase_ ) )
# OR.. transpose(reverse_row(matrix))
def lowercase__ ( lowercase_ ) -> list[list[int]]:
"""simple docstring"""
_UpperCamelCase : List[Any] = [list(lowercase_ ) for x in zip(*lowercase_ )]
return matrix
def lowercase__ ( lowercase_ ) -> list[list[int]]:
"""simple docstring"""
_UpperCamelCase : Tuple = matrix[::-1]
return matrix
def lowercase__ ( lowercase_ ) -> list[list[int]]:
"""simple docstring"""
_UpperCamelCase : List[str] = [x[::-1] for x in matrix]
return matrix
def lowercase__ ( lowercase_ ) -> None:
"""simple docstring"""
for i in matrix:
print(*lowercase_ )
if __name__ == "__main__":
lowerCamelCase__ = make_matrix()
print("\norigin:\n")
print_matrix(matrix)
print("\nrotate 90 counterclockwise:\n")
print_matrix(rotate_aa(matrix))
lowerCamelCase__ = make_matrix()
print("\norigin:\n")
print_matrix(matrix)
print("\nrotate 180:\n")
print_matrix(rotate_aaa(matrix))
lowerCamelCase__ = make_matrix()
print("\norigin:\n")
print_matrix(matrix)
print("\nrotate 270 counterclockwise:\n")
print_matrix(rotate_aaa(matrix))
| 51
|
"""simple docstring"""
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : Optional[Any] , __a : list[int] ) -> None:
_UpperCamelCase : Tuple = len(__a )
_UpperCamelCase : Dict = [0] * len_array
if len_array > 0:
_UpperCamelCase : Optional[Any] = array[0]
for i in range(1 , __a ):
_UpperCamelCase : Tuple = self.prefix_sum[i - 1] + array[i]
def __SCREAMING_SNAKE_CASE ( self : Dict , __a : int , __a : int ) -> int:
if start == 0:
return self.prefix_sum[end]
return self.prefix_sum[end] - self.prefix_sum[start - 1]
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __a : int ) -> bool:
_UpperCamelCase : int = {0}
for sum_item in self.prefix_sum:
if sum_item - target_sum in sums:
return True
sums.add(__a )
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 51
| 1
|
"""simple docstring"""
def lowercase__ ( lowercase_ ) -> bool:
"""simple docstring"""
_UpperCamelCase : str = [int(lowercase_ ) for i in ip_va_address.split("." ) if i.isdigit()]
return len(lowercase_ ) == 4 and all(0 <= int(lowercase_ ) <= 254 for octet in octets )
if __name__ == "__main__":
lowerCamelCase__ = input().strip()
lowerCamelCase__ = "valid" if is_ip_va_address_valid(ip) else "invalid"
print(f"""{ip} is a {valid_or_invalid} IP v4 address.""")
| 51
|
"""simple docstring"""
import os
import zipfile
import requests
from get_ci_error_statistics import download_artifact, get_artifacts_links
def lowercase__ ( lowercase_ ,lowercase_=7 ) -> Tuple:
"""simple docstring"""
_UpperCamelCase : Optional[int] = None
if token is not None:
_UpperCamelCase : Optional[Any] = {"Accept": "application/vnd.github+json", "Authorization": F'''Bearer {token}'''}
# The id of a workflow (not of a workflow run)
_UpperCamelCase : Any = "636036"
_UpperCamelCase : Tuple = F'''https://api.github.com/repos/huggingface/transformers/actions/workflows/{workflow_id}/runs'''
# On `main` branch + event being `schedule` + not returning PRs + only `num_runs` results
url += F'''?branch=main&event=schedule&exclude_pull_requests=true&per_page={num_runs}'''
_UpperCamelCase : Dict = requests.get(lowercase_ ,headers=lowercase_ ).json()
return result["workflow_runs"]
def lowercase__ ( lowercase_ ) -> List[str]:
"""simple docstring"""
_UpperCamelCase : List[Any] = get_daily_ci_runs(lowercase_ )
_UpperCamelCase : Tuple = None
for workflow_run in workflow_runs:
if workflow_run["status"] == "completed":
_UpperCamelCase : Union[str, Any] = workflow_run["id"]
break
return workflow_run_id
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase : str = get_last_daily_ci_runs(lowercase_ )
if workflow_run_id is not None:
_UpperCamelCase : int = get_artifacts_links(worflow_run_id=lowercase_ ,token=lowercase_ )
for artifact_name in artifact_names:
if artifact_name in artifacts_links:
_UpperCamelCase : Dict = artifacts_links[artifact_name]
download_artifact(
artifact_name=lowercase_ ,artifact_url=lowercase_ ,output_dir=lowercase_ ,token=lowercase_ )
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ) -> int:
"""simple docstring"""
get_last_daily_ci_artifacts(lowercase_ ,lowercase_ ,lowercase_ )
_UpperCamelCase : Dict = {}
for artifact_name in artifact_names:
_UpperCamelCase : Union[str, Any] = os.path.join(lowercase_ ,F'''{artifact_name}.zip''' )
if os.path.isfile(lowercase_ ):
_UpperCamelCase : int = {}
with zipfile.ZipFile(lowercase_ ) as z:
for filename in z.namelist():
if not os.path.isdir(lowercase_ ):
# read the file
with z.open(lowercase_ ) as f:
_UpperCamelCase : int = f.read().decode("UTF-8" )
return results
| 51
| 1
|
"""simple docstring"""
import logging
import os
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
from tqdm import auto as tqdm_lib
lowerCamelCase__ = {
"debug": logging.DEBUG,
"info": logging.INFO,
"warning": logging.WARNING,
"error": logging.ERROR,
"critical": logging.CRITICAL,
}
lowerCamelCase__ = logging.WARNING
def lowercase__ ( ) -> List[str]:
"""simple docstring"""
_UpperCamelCase : List[Any] = os.getenv("DATASETS_VERBOSITY" ,lowercase_ )
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
F'''Unknown option DATASETS_VERBOSITY={env_level_str}, '''
F'''has to be one of: { ', '.join(log_levels.keys() ) }''' )
return _default_log_level
def lowercase__ ( ) -> str:
"""simple docstring"""
return __name__.split("." )[0]
def lowercase__ ( ) -> logging.Logger:
"""simple docstring"""
return logging.getLogger(_get_library_name() )
def lowercase__ ( ) -> None:
"""simple docstring"""
_UpperCamelCase : List[Any] = _get_library_root_logger()
library_root_logger.setLevel(_get_default_logging_level() )
def lowercase__ ( ) -> None:
"""simple docstring"""
_UpperCamelCase : List[Any] = _get_library_root_logger()
library_root_logger.setLevel(logging.NOTSET )
def lowercase__ ( lowercase_ = None ) -> logging.Logger:
"""simple docstring"""
if name is None:
_UpperCamelCase : int = _get_library_name()
return logging.getLogger(lowercase_ )
def lowercase__ ( ) -> int:
"""simple docstring"""
return _get_library_root_logger().getEffectiveLevel()
def lowercase__ ( lowercase_ ) -> None:
"""simple docstring"""
_get_library_root_logger().setLevel(lowercase_ )
def lowercase__ ( ) -> List[str]:
"""simple docstring"""
return set_verbosity(lowercase_ )
def lowercase__ ( ) -> Optional[Any]:
"""simple docstring"""
return set_verbosity(lowercase_ )
def lowercase__ ( ) -> List[str]:
"""simple docstring"""
return set_verbosity(lowercase_ )
def lowercase__ ( ) -> Optional[int]:
"""simple docstring"""
return set_verbosity(lowercase_ )
def lowercase__ ( ) -> None:
"""simple docstring"""
_UpperCamelCase : str = False
def lowercase__ ( ) -> None:
"""simple docstring"""
_UpperCamelCase : str = True
# Configure the library root logger at the module level (singleton-like)
_configure_library_root_logger()
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : Optional[Any] , *__a : int , **__a : List[str] ) -> Tuple: # pylint: disable=unused-argument
_UpperCamelCase : Union[str, Any] = args[0] if args else None
def __iter__( self : str ) -> List[str]:
return iter(self._iterator )
def __getattr__( self : Any , __a : str ) -> Union[str, Any]:
def empty_fn(*__a : Optional[int] , **__a : Tuple ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__( self : str ) -> Dict:
return self
def __exit__( self : int , __a : Tuple , __a : Dict , __a : Dict ) -> Optional[Any]:
return
lowerCamelCase__ = True
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __call__( self : Optional[Any] , *__a : int , __a : Optional[Any]=False , **__a : int ) -> List[Any]:
if _tqdm_active and not disable:
return tqdm_lib.tqdm(*__a , **__a )
else:
return EmptyTqdm(*__a , **__a )
def __SCREAMING_SNAKE_CASE ( self : Dict , *__a : List[str] , **__a : Any ) -> Optional[Any]:
_UpperCamelCase : Union[str, Any] = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*__a , **__a )
def __SCREAMING_SNAKE_CASE ( self : int ) -> Dict:
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
lowerCamelCase__ = _tqdm_cls()
def lowercase__ ( ) -> bool:
"""simple docstring"""
global _tqdm_active
return bool(_tqdm_active )
def lowercase__ ( ) -> str:
"""simple docstring"""
global _tqdm_active
_UpperCamelCase : Union[str, Any] = True
def lowercase__ ( ) -> str:
"""simple docstring"""
global _tqdm_active
_UpperCamelCase : Tuple = False
| 51
|
"""simple docstring"""
import math
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self : Any , __a : list[list[float]] , __a : list[int] ) -> int:
_UpperCamelCase : List[Any] = 0.0
_UpperCamelCase : Union[str, Any] = 0.0
for i in range(len(__a ) ):
da += math.pow((sample[i] - weights[0][i]) , 2 )
da += math.pow((sample[i] - weights[1][i]) , 2 )
return 0 if da > da else 1
return 0
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __a : list[list[int | float]] , __a : list[int] , __a : int , __a : float ) -> list[list[int | float]]:
for i in range(len(__a ) ):
weights[j][i] += alpha * (sample[i] - weights[j][i])
return weights
def lowercase__ ( ) -> None:
"""simple docstring"""
_UpperCamelCase : Optional[int] = [[1, 1, 0, 0], [0, 0, 0, 1], [1, 0, 0, 0], [0, 0, 1, 1]]
# weight initialization ( n, C )
_UpperCamelCase : List[str] = [[0.2, 0.6, 0.5, 0.9], [0.8, 0.4, 0.7, 0.3]]
# training
_UpperCamelCase : List[Any] = SelfOrganizingMap()
_UpperCamelCase : int = 3
_UpperCamelCase : List[Any] = 0.5
for _ in range(lowercase_ ):
for j in range(len(lowercase_ ) ):
# training sample
_UpperCamelCase : int = training_samples[j]
# Compute the winning vector
_UpperCamelCase : Tuple = self_organizing_map.get_winner(lowercase_ ,lowercase_ )
# Update the winning vector
_UpperCamelCase : int = self_organizing_map.update(lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ )
# classify test sample
_UpperCamelCase : Optional[int] = [0, 0, 0, 1]
_UpperCamelCase : Dict = self_organizing_map.get_winner(lowercase_ ,lowercase_ )
# results
print(F'''Clusters that the test sample belongs to : {winner}''' )
print(F'''Weights that have been trained : {weights}''' )
# running the main() function
if __name__ == "__main__":
main()
| 51
| 1
|
"""simple docstring"""
def lowercase__ ( lowercase_ ) -> bool:
"""simple docstring"""
if num < 0:
return False
_UpperCamelCase : int = num
_UpperCamelCase : int = 0
while num > 0:
_UpperCamelCase : Optional[Any] = rev_num * 10 + (num % 10)
num //= 10
return num_copy == rev_num
if __name__ == "__main__":
import doctest
doctest.testmod()
| 51
|
"""simple docstring"""
import argparse
import collections
import os
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_table.py
lowerCamelCase__ = "src/transformers"
lowerCamelCase__ = "docs/source/en"
lowerCamelCase__ = "."
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ) -> List[str]:
"""simple docstring"""
with open(lowercase_ ,"r" ,encoding="utf-8" ,newline="\n" ) as f:
_UpperCamelCase : Union[str, Any] = f.readlines()
# Find the start prompt.
_UpperCamelCase : Dict = 0
while not lines[start_index].startswith(lowercase_ ):
start_index += 1
start_index += 1
_UpperCamelCase : Optional[int] = start_index
while not lines[end_index].startswith(lowercase_ ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# Add here suffixes that are used to identify models, separated by |
lowerCamelCase__ = "Model|Encoder|Decoder|ForConditionalGeneration"
# Regexes that match TF/Flax/PT model names.
lowerCamelCase__ = re.compile(R"TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
lowerCamelCase__ = re.compile(R"Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
lowerCamelCase__ = re.compile(R"(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
# This is to make sure the transformers module imported is the one in the repo.
lowerCamelCase__ = direct_transformers_import(TRANSFORMERS_PATH)
def lowercase__ ( lowercase_ ) -> Any:
"""simple docstring"""
_UpperCamelCase : Tuple = re.finditer(".+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)" ,lowercase_ )
return [m.group(0 ) for m in matches]
def lowercase__ ( lowercase_ ,lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase : str = 2 if text == "✅" or text == "❌" else len(lowercase_ )
_UpperCamelCase : Union[str, Any] = (width - text_length) // 2
_UpperCamelCase : Dict = width - text_length - left_indent
return " " * left_indent + text + " " * right_indent
def lowercase__ ( ) -> str:
"""simple docstring"""
_UpperCamelCase : Optional[Any] = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
_UpperCamelCase : str = {
name: config_maping_names[code]
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if code in config_maping_names
}
_UpperCamelCase : Dict = {name: config.replace("Config" ,"" ) for name, config in model_name_to_config.items()}
# Dictionaries flagging if each model prefix has a slow/fast tokenizer, backend in PT/TF/Flax.
_UpperCamelCase : int = collections.defaultdict(lowercase_ )
_UpperCamelCase : Dict = collections.defaultdict(lowercase_ )
_UpperCamelCase : Dict = collections.defaultdict(lowercase_ )
_UpperCamelCase : int = collections.defaultdict(lowercase_ )
_UpperCamelCase : str = collections.defaultdict(lowercase_ )
# Let's lookup through all transformers object (once).
for attr_name in dir(lowercase_ ):
_UpperCamelCase : List[str] = None
if attr_name.endswith("Tokenizer" ):
_UpperCamelCase : Tuple = slow_tokenizers
_UpperCamelCase : Any = attr_name[:-9]
elif attr_name.endswith("TokenizerFast" ):
_UpperCamelCase : Optional[Any] = fast_tokenizers
_UpperCamelCase : List[str] = attr_name[:-13]
elif _re_tf_models.match(lowercase_ ) is not None:
_UpperCamelCase : List[Any] = tf_models
_UpperCamelCase : Dict = _re_tf_models.match(lowercase_ ).groups()[0]
elif _re_flax_models.match(lowercase_ ) is not None:
_UpperCamelCase : Dict = flax_models
_UpperCamelCase : Union[str, Any] = _re_flax_models.match(lowercase_ ).groups()[0]
elif _re_pt_models.match(lowercase_ ) is not None:
_UpperCamelCase : Optional[int] = pt_models
_UpperCamelCase : Any = _re_pt_models.match(lowercase_ ).groups()[0]
if lookup_dict is not None:
while len(lowercase_ ) > 0:
if attr_name in model_name_to_prefix.values():
_UpperCamelCase : Dict = True
break
# Try again after removing the last word in the name
_UpperCamelCase : List[str] = "".join(camel_case_split(lowercase_ )[:-1] )
# Let's build that table!
_UpperCamelCase : Any = list(model_name_to_config.keys() )
model_names.sort(key=str.lower )
_UpperCamelCase : List[str] = ["Model", "Tokenizer slow", "Tokenizer fast", "PyTorch support", "TensorFlow support", "Flax Support"]
# We'll need widths to properly display everything in the center (+2 is to leave one extra space on each side).
_UpperCamelCase : Union[str, Any] = [len(lowercase_ ) + 2 for c in columns]
_UpperCamelCase : Any = max([len(lowercase_ ) for name in model_names] ) + 2
# Build the table per se
_UpperCamelCase : Tuple = "|" + "|".join([_center_text(lowercase_ ,lowercase_ ) for c, w in zip(lowercase_ ,lowercase_ )] ) + "|\n"
# Use ":-----:" format to center-aligned table cell texts
table += "|" + "|".join([":" + "-" * (w - 2) + ":" for w in widths] ) + "|\n"
_UpperCamelCase : Union[str, Any] = {True: "✅", False: "❌"}
for name in model_names:
_UpperCamelCase : Optional[int] = model_name_to_prefix[name]
_UpperCamelCase : Tuple = [
name,
check[slow_tokenizers[prefix]],
check[fast_tokenizers[prefix]],
check[pt_models[prefix]],
check[tf_models[prefix]],
check[flax_models[prefix]],
]
table += "|" + "|".join([_center_text(lowercase_ ,lowercase_ ) for l, w in zip(lowercase_ ,lowercase_ )] ) + "|\n"
return table
def lowercase__ ( lowercase_=False ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase : str = _find_text_in_file(
filename=os.path.join(lowercase_ ,"index.md" ) ,start_prompt="<!--This table is updated automatically from the auto modules" ,end_prompt="<!-- End table-->" ,)
_UpperCamelCase : Any = get_model_table_from_auto_modules()
if current_table != new_table:
if overwrite:
with open(os.path.join(lowercase_ ,"index.md" ) ,"w" ,encoding="utf-8" ,newline="\n" ) as f:
f.writelines(lines[:start_index] + [new_table] + lines[end_index:] )
else:
raise ValueError(
"The model table in the `index.md` has not been updated. Run `make fix-copies` to fix this." )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
lowerCamelCase__ = parser.parse_args()
check_model_table(args.fix_and_overwrite)
| 51
| 1
|
"""simple docstring"""
import unittest
from parameterized import parameterized
from transformers import OpenLlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : Union[str, Any] , __a : Union[str, Any] , __a : Tuple=13 , __a : str=7 , __a : Tuple=True , __a : Tuple=True , __a : int=False , __a : Optional[Any]=True , __a : Union[str, Any]=99 , __a : Tuple=32 , __a : List[Any]=5 , __a : Dict=4 , __a : Tuple=37 , __a : Tuple="gelu" , __a : Union[str, Any]=0.1 , __a : Dict=0.1 , __a : Optional[Any]=512 , __a : Tuple=16 , __a : Tuple=2 , __a : str=0.02 , __a : Optional[Any]=3 , __a : Optional[Any]=4 , __a : Tuple=None , ) -> List[str]:
_UpperCamelCase : Optional[Any] = parent
_UpperCamelCase : str = batch_size
_UpperCamelCase : Any = seq_length
_UpperCamelCase : str = is_training
_UpperCamelCase : Optional[Any] = use_input_mask
_UpperCamelCase : Optional[int] = use_token_type_ids
_UpperCamelCase : Optional[Any] = use_labels
_UpperCamelCase : Tuple = vocab_size
_UpperCamelCase : str = hidden_size
_UpperCamelCase : int = num_hidden_layers
_UpperCamelCase : str = num_attention_heads
_UpperCamelCase : Tuple = intermediate_size
_UpperCamelCase : Dict = hidden_act
_UpperCamelCase : str = hidden_dropout_prob
_UpperCamelCase : List[str] = attention_probs_dropout_prob
_UpperCamelCase : Optional[int] = max_position_embeddings
_UpperCamelCase : List[Any] = type_vocab_size
_UpperCamelCase : Any = type_sequence_label_size
_UpperCamelCase : Union[str, Any] = initializer_range
_UpperCamelCase : List[Any] = num_labels
_UpperCamelCase : List[str] = num_choices
_UpperCamelCase : Any = scope
def __SCREAMING_SNAKE_CASE ( self : Any ) -> int:
_UpperCamelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCamelCase : Union[str, Any] = None
if self.use_input_mask:
_UpperCamelCase : int = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCamelCase : int = None
if self.use_token_type_ids:
_UpperCamelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_UpperCamelCase : Optional[int] = None
_UpperCamelCase : Optional[int] = None
_UpperCamelCase : Tuple = None
if self.use_labels:
_UpperCamelCase : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCamelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_UpperCamelCase : Dict = ids_tensor([self.batch_size] , self.num_choices )
_UpperCamelCase : Optional[int] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> Union[str, Any]:
return OpenLlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__a , initializer_range=self.initializer_range , use_stable_embedding=__a , )
def __SCREAMING_SNAKE_CASE ( self : Any , __a : Tuple , __a : List[str] , __a : List[str] , __a : str , __a : Union[str, Any] , __a : Optional[Any] , __a : Any ) -> List[str]:
_UpperCamelCase : List[Any] = OpenLlamaModel(config=__a )
model.to(__a )
model.eval()
_UpperCamelCase : Dict = model(__a , attention_mask=__a )
_UpperCamelCase : Tuple = model(__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __SCREAMING_SNAKE_CASE ( self : Any , __a : Union[str, Any] , __a : Union[str, Any] , __a : List[str] , __a : int , __a : Optional[int] , __a : Any , __a : str , __a : Optional[Any] , __a : str , ) -> List[Any]:
_UpperCamelCase : Optional[Any] = True
_UpperCamelCase : Dict = OpenLlamaModel(__a )
model.to(__a )
model.eval()
_UpperCamelCase : Dict = model(
__a , attention_mask=__a , encoder_hidden_states=__a , encoder_attention_mask=__a , )
_UpperCamelCase : List[Any] = model(
__a , attention_mask=__a , encoder_hidden_states=__a , )
_UpperCamelCase : List[Any] = model(__a , attention_mask=__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __a : int , __a : List[Any] , __a : List[str] , __a : Union[str, Any] , __a : str , __a : Dict , __a : str , __a : List[Any] , __a : Optional[int] , ) -> List[str]:
_UpperCamelCase : List[str] = OpenLlamaForCausalLM(config=__a )
model.to(__a )
model.eval()
_UpperCamelCase : Any = model(__a , attention_mask=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __SCREAMING_SNAKE_CASE ( self : Tuple , __a : Dict , __a : Optional[int] , __a : Any , __a : Optional[Any] , __a : Optional[Any] , __a : Optional[int] , __a : List[str] , __a : int , __a : int , ) -> List[Any]:
_UpperCamelCase : str = True
_UpperCamelCase : int = True
_UpperCamelCase : Optional[Any] = OpenLlamaForCausalLM(config=__a )
model.to(__a )
model.eval()
# first forward pass
_UpperCamelCase : Dict = model(
__a , attention_mask=__a , encoder_hidden_states=__a , encoder_attention_mask=__a , use_cache=__a , )
_UpperCamelCase : List[Any] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
_UpperCamelCase : List[Any] = ids_tensor((self.batch_size, 3) , config.vocab_size )
_UpperCamelCase : Any = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
_UpperCamelCase : Optional[int] = torch.cat([input_ids, next_tokens] , dim=-1 )
_UpperCamelCase : str = torch.cat([input_mask, next_mask] , dim=-1 )
_UpperCamelCase : Tuple = model(
__a , attention_mask=__a , encoder_hidden_states=__a , encoder_attention_mask=__a , output_hidden_states=__a , )["hidden_states"][0]
_UpperCamelCase : List[Any] = model(
__a , attention_mask=__a , encoder_hidden_states=__a , encoder_attention_mask=__a , past_key_values=__a , output_hidden_states=__a , )["hidden_states"][0]
# select random slice
_UpperCamelCase : Dict = ids_tensor((1,) , output_from_past.shape[-1] ).item()
_UpperCamelCase : List[str] = output_from_no_past[:, -3:, random_slice_idx].detach()
_UpperCamelCase : Union[str, Any] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__a , __a , atol=1e-3 ) )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Any:
_UpperCamelCase : Dict = self.prepare_config_and_inputs()
(
(
_UpperCamelCase
), (
_UpperCamelCase
), (
_UpperCamelCase
), (
_UpperCamelCase
), (
_UpperCamelCase
), (
_UpperCamelCase
), (
_UpperCamelCase
),
) : List[Any] = config_and_inputs
_UpperCamelCase : Optional[int] = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :List[str] = (
(OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else ()
)
SCREAMING_SNAKE_CASE__ :Dict = (OpenLlamaForCausalLM,) if is_torch_available() else ()
SCREAMING_SNAKE_CASE__ :int = (
{
"feature-extraction": OpenLlamaModel,
"text-classification": OpenLlamaForSequenceClassification,
"text-generation": OpenLlamaForCausalLM,
"zero-shot": OpenLlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE__ :Dict = False
SCREAMING_SNAKE_CASE__ :List[str] = False
def __SCREAMING_SNAKE_CASE ( self : Any ) -> Any:
_UpperCamelCase : List[Any] = OpenLlamaModelTester(self )
_UpperCamelCase : Optional[Any] = ConfigTester(self , config_class=__a , hidden_size=37 )
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Any:
self.config_tester.run_common_tests()
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[Any]:
_UpperCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Dict:
_UpperCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_UpperCamelCase : Any = type
self.model_tester.create_and_check_model(*__a )
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[Any]:
_UpperCamelCase, _UpperCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCamelCase : Optional[int] = 3
_UpperCamelCase : Any = input_dict["input_ids"]
_UpperCamelCase : Dict = input_ids.ne(1 ).to(__a )
_UpperCamelCase : Tuple = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
_UpperCamelCase : List[str] = OpenLlamaForSequenceClassification(__a )
model.to(__a )
model.eval()
_UpperCamelCase : List[str] = model(__a , attention_mask=__a , labels=__a )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> int:
_UpperCamelCase, _UpperCamelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCamelCase : Optional[Any] = 3
_UpperCamelCase : Optional[Any] = "single_label_classification"
_UpperCamelCase : Any = input_dict["input_ids"]
_UpperCamelCase : List[str] = input_ids.ne(1 ).to(__a )
_UpperCamelCase : Any = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
_UpperCamelCase : Dict = OpenLlamaForSequenceClassification(__a )
model.to(__a )
model.eval()
_UpperCamelCase : int = model(__a , attention_mask=__a , labels=__a )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def __SCREAMING_SNAKE_CASE ( self : Any ) -> Tuple:
_UpperCamelCase, _UpperCamelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCamelCase : Tuple = 3
_UpperCamelCase : Tuple = "multi_label_classification"
_UpperCamelCase : List[Any] = input_dict["input_ids"]
_UpperCamelCase : Tuple = input_ids.ne(1 ).to(__a )
_UpperCamelCase : Dict = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
_UpperCamelCase : int = OpenLlamaForSequenceClassification(__a )
model.to(__a )
model.eval()
_UpperCamelCase : Any = model(__a , attention_mask=__a , labels=__a )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip("Open-Llama buffers include complex numbers, which breaks this test" )
def __SCREAMING_SNAKE_CASE ( self : str ) -> Union[str, Any]:
pass
@parameterized.expand([("linear",), ("dynamic",)] )
def __SCREAMING_SNAKE_CASE ( self : Tuple , __a : List[Any] ) -> str:
_UpperCamelCase, _UpperCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCamelCase : Optional[int] = ids_tensor([1, 10] , config.vocab_size )
_UpperCamelCase : Tuple = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
_UpperCamelCase : List[Any] = OpenLlamaModel(__a )
original_model.to(__a )
original_model.eval()
_UpperCamelCase : List[Any] = original_model(__a ).last_hidden_state
_UpperCamelCase : Dict = original_model(__a ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
_UpperCamelCase : Optional[int] = {"type": scaling_type, "factor": 10.0}
_UpperCamelCase : Optional[Any] = OpenLlamaModel(__a )
scaled_model.to(__a )
scaled_model.eval()
_UpperCamelCase : Union[str, Any] = scaled_model(__a ).last_hidden_state
_UpperCamelCase : str = scaled_model(__a ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(__a , __a , atol=1e-5 ) )
else:
self.assertFalse(torch.allclose(__a , __a , atol=1e-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(__a , __a , atol=1e-5 ) )
| 51
|
"""simple docstring"""
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
lowerCamelCase__ = 200
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
lowerCamelCase__ = 50
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
lowerCamelCase__ = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 1000))
def lowercase__ ( lowercase_ ,lowercase_ ) -> tuple[str, float]:
"""simple docstring"""
_UpperCamelCase : str = len([g for position, g in enumerate(lowercase_ ) if g == main_target[position]] )
return (item, float(lowercase_ ))
def lowercase__ ( lowercase_ ,lowercase_ ) -> tuple[str, str]:
"""simple docstring"""
_UpperCamelCase : Tuple = random.randint(0 ,len(lowercase_ ) - 1 )
_UpperCamelCase : Dict = parent_a[:random_slice] + parent_a[random_slice:]
_UpperCamelCase : Tuple = parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def lowercase__ ( lowercase_ ,lowercase_ ) -> str:
"""simple docstring"""
_UpperCamelCase : int = list(lowercase_ )
if random.uniform(0 ,1 ) < MUTATION_PROBABILITY:
_UpperCamelCase : int = random.choice(lowercase_ )
return "".join(lowercase_ )
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,) -> list[str]:
"""simple docstring"""
_UpperCamelCase : Optional[Any] = []
# Generate more children proportionally to the fitness score.
_UpperCamelCase : List[str] = int(parent_a[1] * 100 ) + 1
_UpperCamelCase : Union[str, Any] = 10 if child_n >= 10 else child_n
for _ in range(lowercase_ ):
_UpperCamelCase : Dict = population_score[random.randint(0 ,lowercase_ )][0]
_UpperCamelCase, _UpperCamelCase : Dict = crossover(parent_a[0] ,lowercase_ )
# Append new string to the population list.
pop.append(mutate(lowercase_ ,lowercase_ ) )
pop.append(mutate(lowercase_ ,lowercase_ ) )
return pop
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ = True ) -> tuple[int, int, str]:
"""simple docstring"""
if N_POPULATION < N_SELECTED:
_UpperCamelCase : List[str] = F'''{N_POPULATION} must be bigger than {N_SELECTED}'''
raise ValueError(lowercase_ )
# Verify that the target contains no genes besides the ones inside genes variable.
_UpperCamelCase : int = sorted({c for c in target if c not in genes} )
if not_in_genes_list:
_UpperCamelCase : int = F'''{not_in_genes_list} is not in genes list, evolution cannot converge'''
raise ValueError(lowercase_ )
# Generate random starting population.
_UpperCamelCase : Union[str, Any] = []
for _ in range(lowercase_ ):
population.append("".join([random.choice(lowercase_ ) for i in range(len(lowercase_ ) )] ) )
# Just some logs to know what the algorithms is doing.
_UpperCamelCase, _UpperCamelCase : str = 0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(lowercase_ )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
_UpperCamelCase : int = [evaluate(lowercase_ ,lowercase_ ) for item in population]
# Check if there is a matching evolution.
_UpperCamelCase : Optional[Any] = sorted(lowercase_ ,key=lambda lowercase_ : x[1] ,reverse=lowercase_ )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
F'''\nGeneration: {generation}'''
F'''\nTotal Population:{total_population}'''
F'''\nBest score: {population_score[0][1]}'''
F'''\nBest string: {population_score[0][0]}''' )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
_UpperCamelCase : str = population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(lowercase_ )
# Normalize population score to be between 0 and 1.
_UpperCamelCase : str = [
(item, score / len(lowercase_ )) for item, score in population_score
]
# This is selection
for i in range(lowercase_ ):
population.extend(select(population_score[int(lowercase_ )] ,lowercase_ ,lowercase_ ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(lowercase_ ) > N_POPULATION:
break
if __name__ == "__main__":
lowerCamelCase__ = (
"This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!"
)
lowerCamelCase__ = list(
" ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm"
"nopqrstuvwxyz.,;!?+-*#@^'èéòà€ù=)(&%$£/\\"
)
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = basic(target_str, genes_list)
print(
f"""\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}"""
)
| 51
| 1
|
"""simple docstring"""
import torch
from transformers import AutoModel
class __SCREAMING_SNAKE_CASE ( torch.nn.Module ):
'''simple docstring'''
def __init__( self : Tuple , __a : List[Any]="sayef/fsner-bert-base-uncased" ) -> List[Any]:
super(__a , self ).__init__()
_UpperCamelCase : Union[str, Any] = AutoModel.from_pretrained(__a , return_dict=__a )
_UpperCamelCase : Optional[Any] = torch.nn.CosineSimilarity(3 , 1e-0_8 )
_UpperCamelCase : Optional[Any] = torch.nn.Softmax(dim=1 )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , **__a : Optional[int] ) -> Optional[int]:
return self.bert(**__a ).last_hidden_state
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __a : Any ) -> Union[str, Any]:
return token_embeddings.sum(2 , keepdim=__a )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __a : List[Any] , __a : List[Any] , __a : int=1 ) -> str:
return self.softmax(T * self.cos(__a , __a ) )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __a : Optional[int] , __a : Dict ) -> Optional[int]:
_UpperCamelCase : str = W_supports["sizes"].tolist()
_UpperCamelCase : Union[str, Any] = W_supports["start_token_id"].item()
_UpperCamelCase : Tuple = W_supports["end_token_id"].item()
del W_supports["sizes"]
del W_supports["start_token_id"]
del W_supports["end_token_id"]
_UpperCamelCase : List[str] = self.BERT(**__a )
_UpperCamelCase : int = self.BERT(**__a )
_UpperCamelCase : Dict = None
_UpperCamelCase : int = None
_UpperCamelCase : Tuple = W_supports["input_ids"] == start_token_id
_UpperCamelCase : Optional[Any] = W_supports["input_ids"] == end_token_id
for i, size in enumerate(__a ):
if i == 0:
_UpperCamelCase : Dict = 0
else:
_UpperCamelCase : Tuple = support_sizes[i - 1]
_UpperCamelCase : Dict = S[s : s + size][start_token_masks[s : s + size]]
_UpperCamelCase : str = S[s : s + size][end_token_masks[s : s + size]]
_UpperCamelCase : Optional[int] = torch.matmul(q[i] , s_start.T ).sum(1 ).softmax(0 )
_UpperCamelCase : str = torch.matmul(q[i] , s_end.T ).sum(1 ).softmax(0 )
if p_starts is not None:
_UpperCamelCase : Any = torch.vstack((p_starts, p_start) )
_UpperCamelCase : Union[str, Any] = torch.vstack((p_ends, p_end) )
else:
_UpperCamelCase : List[str] = p_start
_UpperCamelCase : Dict = p_end
return p_starts, p_ends
| 51
|
"""simple docstring"""
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = ["model.decoder.embed_positions.weights"]
def lowercase__ ( lowercase_ ) -> Optional[Any]:
"""simple docstring"""
if "emb" in name:
_UpperCamelCase : List[str] = name.replace("emb" ,"model.decoder.embed_tokens" )
if "transformer" in name:
_UpperCamelCase : Optional[int] = name.replace("transformer" ,"model.decoder" )
if "cross_attention" in name:
_UpperCamelCase : Optional[int] = name.replace("cross_attention" ,"encoder_attn" )
if "linear1" in name:
_UpperCamelCase : Optional[Any] = name.replace("linear1" ,"fc1" )
if "linear2" in name:
_UpperCamelCase : Union[str, Any] = name.replace("linear2" ,"fc2" )
if "norm1" in name:
_UpperCamelCase : Optional[Any] = name.replace("norm1" ,"self_attn_layer_norm" )
if "norm_cross" in name:
_UpperCamelCase : Dict = name.replace("norm_cross" ,"encoder_attn_layer_norm" )
if "norm2" in name:
_UpperCamelCase : Union[str, Any] = name.replace("norm2" ,"final_layer_norm" )
if "out_norm" in name:
_UpperCamelCase : Union[str, Any] = name.replace("out_norm" ,"model.decoder.layer_norm" )
if "linears" in name:
_UpperCamelCase : List[str] = name.replace("linears" ,"lm_heads" )
if "condition_provider.conditioners.description.output_proj" in name:
_UpperCamelCase : Any = name.replace("condition_provider.conditioners.description.output_proj" ,"enc_to_dec_proj" )
return name
def lowercase__ ( lowercase_ ,lowercase_ ) -> Tuple[Dict, Dict]:
"""simple docstring"""
_UpperCamelCase : str = list(state_dict.keys() )
_UpperCamelCase : Optional[Any] = {}
for key in keys:
_UpperCamelCase : Optional[int] = state_dict.pop(lowercase_ )
_UpperCamelCase : List[Any] = rename_keys(lowercase_ )
if "in_proj_weight" in key:
# split fused qkv proj
_UpperCamelCase : Tuple = val[:hidden_size, :]
_UpperCamelCase : Optional[Any] = val[hidden_size : 2 * hidden_size, :]
_UpperCamelCase : Optional[Any] = val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
_UpperCamelCase : Optional[Any] = val
else:
_UpperCamelCase : List[str] = val
return state_dict, enc_dec_proj_state_dict
def lowercase__ ( lowercase_ ) -> MusicgenDecoderConfig:
"""simple docstring"""
if checkpoint == "small":
# default config values
_UpperCamelCase : List[Any] = 1_024
_UpperCamelCase : List[str] = 24
_UpperCamelCase : Any = 16
elif checkpoint == "medium":
_UpperCamelCase : Tuple = 1_536
_UpperCamelCase : Dict = 48
_UpperCamelCase : Tuple = 24
elif checkpoint == "large":
_UpperCamelCase : int = 2_048
_UpperCamelCase : Optional[int] = 48
_UpperCamelCase : Dict = 32
else:
raise ValueError(F'''Checkpoint should be one of `[\'small\', \'medium\', \'large\']`, got {checkpoint}.''' )
_UpperCamelCase : str = MusicgenDecoderConfig(
hidden_size=lowercase_ ,ffn_dim=hidden_size * 4 ,num_hidden_layers=lowercase_ ,num_attention_heads=lowercase_ ,)
return config
@torch.no_grad()
def lowercase__ ( lowercase_ ,lowercase_=None ,lowercase_=None ,lowercase_="cpu" ) -> List[str]:
"""simple docstring"""
_UpperCamelCase : str = MusicGen.get_pretrained(lowercase_ ,device=lowercase_ )
_UpperCamelCase : Union[str, Any] = decoder_config_from_checkpoint(lowercase_ )
_UpperCamelCase : Optional[int] = fairseq_model.lm.state_dict()
_UpperCamelCase, _UpperCamelCase : Optional[Any] = rename_state_dict(
lowercase_ ,hidden_size=decoder_config.hidden_size )
_UpperCamelCase : Tuple = TaEncoderModel.from_pretrained("t5-base" )
_UpperCamelCase : Union[str, Any] = EncodecModel.from_pretrained("facebook/encodec_32khz" )
_UpperCamelCase : str = MusicgenForCausalLM(lowercase_ ).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
_UpperCamelCase, _UpperCamelCase : str = decoder.load_state_dict(lowercase_ ,strict=lowercase_ )
for key in missing_keys.copy():
if key.startswith(("text_encoder", "audio_encoder") ) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(lowercase_ )
if len(lowercase_ ) > 0:
raise ValueError(F'''Missing key(s) in state_dict: {missing_keys}''' )
if len(lowercase_ ) > 0:
raise ValueError(F'''Unexpected key(s) in state_dict: {unexpected_keys}''' )
# init the composite model
_UpperCamelCase : str = MusicgenForConditionalGeneration(text_encoder=lowercase_ ,audio_encoder=lowercase_ ,decoder=lowercase_ )
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(lowercase_ )
# check we can do a forward pass
_UpperCamelCase : List[str] = torch.arange(0 ,8 ,dtype=torch.long ).reshape(2 ,-1 )
_UpperCamelCase : Dict = input_ids.reshape(2 * 4 ,-1 )
with torch.no_grad():
_UpperCamelCase : Tuple = model(input_ids=lowercase_ ,decoder_input_ids=lowercase_ ).logits
if logits.shape != (8, 1, 2_048):
raise ValueError("Incorrect shape for logits" )
# now construct the processor
_UpperCamelCase : int = AutoTokenizer.from_pretrained("t5-base" )
_UpperCamelCase : str = AutoFeatureExtractor.from_pretrained("facebook/encodec_32khz" ,padding_side="left" )
_UpperCamelCase : Optional[int] = MusicgenProcessor(feature_extractor=lowercase_ ,tokenizer=lowercase_ )
# set the appropriate bos/pad token ids
_UpperCamelCase : str = 2_048
_UpperCamelCase : str = 2_048
# set other default generation config params
_UpperCamelCase : Optional[Any] = int(30 * audio_encoder.config.frame_rate )
_UpperCamelCase : List[str] = True
_UpperCamelCase : int = 3.0
if pytorch_dump_folder is not None:
Path(lowercase_ ).mkdir(exist_ok=lowercase_ )
logger.info(F'''Saving model {checkpoint} to {pytorch_dump_folder}''' )
model.save_pretrained(lowercase_ )
processor.save_pretrained(lowercase_ )
if repo_id:
logger.info(F'''Pushing model {checkpoint} to {repo_id}''' )
model.push_to_hub(lowercase_ )
processor.push_to_hub(lowercase_ )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint",
default="small",
type=str,
help="Checkpoint size of the MusicGen model you'd like to convert. Can be one of: `['small', 'medium', 'large']`.",
)
parser.add_argument(
"--pytorch_dump_folder",
required=True,
default=None,
type=str,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
parser.add_argument(
"--device", default="cpu", type=str, help="Torch device to run the conversion, either cpu or cuda."
)
lowerCamelCase__ = parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
| 51
| 1
|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import is_tf_available, is_torch_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, is_pt_tf_cross_test, slow
if is_tf_available():
from transformers import (
AutoConfig,
BertConfig,
GPTaConfig,
TaConfig,
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
if is_torch_available():
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelWithLMHead,
BertForMaskedLM,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertModel,
GPTaLMHeadModel,
RobertaForMaskedLM,
TaForConditionalGeneration,
)
@is_pt_tf_cross_test
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@slow
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Union[str, Any]:
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
_UpperCamelCase : Tuple = AutoConfig.from_pretrained(__a )
self.assertIsNotNone(__a )
self.assertIsInstance(__a , __a )
_UpperCamelCase : str = TFAutoModel.from_pretrained(__a , from_pt=__a )
self.assertIsNotNone(__a )
self.assertIsInstance(__a , __a )
_UpperCamelCase : List[str] = AutoModel.from_pretrained(__a , from_tf=__a )
self.assertIsNotNone(__a )
self.assertIsInstance(__a , __a )
@slow
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> str:
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
_UpperCamelCase : Dict = AutoConfig.from_pretrained(__a )
self.assertIsNotNone(__a )
self.assertIsInstance(__a , __a )
_UpperCamelCase : Optional[int] = TFAutoModelForPreTraining.from_pretrained(__a , from_pt=__a )
self.assertIsNotNone(__a )
self.assertIsInstance(__a , __a )
_UpperCamelCase : Dict = AutoModelForPreTraining.from_pretrained(__a , from_tf=__a )
self.assertIsNotNone(__a )
self.assertIsInstance(__a , __a )
@slow
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Dict:
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase : Any = AutoConfig.from_pretrained(__a )
self.assertIsNotNone(__a )
self.assertIsInstance(__a , __a )
_UpperCamelCase : Union[str, Any] = TFAutoModelForCausalLM.from_pretrained(__a , from_pt=__a )
_UpperCamelCase, _UpperCamelCase : int = TFAutoModelForCausalLM.from_pretrained(
__a , output_loading_info=__a , from_pt=__a )
self.assertIsNotNone(__a )
self.assertIsInstance(__a , __a )
_UpperCamelCase : Optional[int] = AutoModelForCausalLM.from_pretrained(__a , from_tf=__a )
_UpperCamelCase, _UpperCamelCase : str = AutoModelForCausalLM.from_pretrained(
__a , output_loading_info=__a , from_tf=__a )
self.assertIsNotNone(__a )
self.assertIsInstance(__a , __a )
@slow
def __SCREAMING_SNAKE_CASE ( self : int ) -> Any:
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase : str = AutoConfig.from_pretrained(__a )
self.assertIsNotNone(__a )
self.assertIsInstance(__a , __a )
_UpperCamelCase : List[Any] = TFAutoModelWithLMHead.from_pretrained(__a , from_pt=__a )
self.assertIsNotNone(__a )
self.assertIsInstance(__a , __a )
_UpperCamelCase : List[str] = AutoModelWithLMHead.from_pretrained(__a , from_tf=__a )
self.assertIsNotNone(__a )
self.assertIsInstance(__a , __a )
@slow
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> str:
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase : Optional[Any] = AutoConfig.from_pretrained(__a )
self.assertIsNotNone(__a )
self.assertIsInstance(__a , __a )
_UpperCamelCase : Union[str, Any] = TFAutoModelForMaskedLM.from_pretrained(__a , from_pt=__a )
_UpperCamelCase, _UpperCamelCase : List[str] = TFAutoModelForMaskedLM.from_pretrained(
__a , output_loading_info=__a , from_pt=__a )
self.assertIsNotNone(__a )
self.assertIsInstance(__a , __a )
_UpperCamelCase : List[Any] = AutoModelForMaskedLM.from_pretrained(__a , from_tf=__a )
_UpperCamelCase, _UpperCamelCase : Union[str, Any] = AutoModelForMaskedLM.from_pretrained(
__a , output_loading_info=__a , from_tf=__a )
self.assertIsNotNone(__a )
self.assertIsInstance(__a , __a )
@slow
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> str:
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase : int = AutoConfig.from_pretrained(__a )
self.assertIsNotNone(__a )
self.assertIsInstance(__a , __a )
_UpperCamelCase : int = TFAutoModelForSeqaSeqLM.from_pretrained(__a , from_pt=__a )
_UpperCamelCase, _UpperCamelCase : Tuple = TFAutoModelForSeqaSeqLM.from_pretrained(
__a , output_loading_info=__a , from_pt=__a )
self.assertIsNotNone(__a )
self.assertIsInstance(__a , __a )
_UpperCamelCase : Tuple = AutoModelForSeqaSeqLM.from_pretrained(__a , from_tf=__a )
_UpperCamelCase, _UpperCamelCase : Optional[int] = AutoModelForSeqaSeqLM.from_pretrained(
__a , output_loading_info=__a , from_tf=__a )
self.assertIsNotNone(__a )
self.assertIsInstance(__a , __a )
@slow
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Any:
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
_UpperCamelCase : List[Any] = AutoConfig.from_pretrained(__a )
self.assertIsNotNone(__a )
self.assertIsInstance(__a , __a )
_UpperCamelCase : str = TFAutoModelForSequenceClassification.from_pretrained(__a , from_pt=__a )
self.assertIsNotNone(__a )
self.assertIsInstance(__a , __a )
_UpperCamelCase : Optional[int] = AutoModelForSequenceClassification.from_pretrained(__a , from_tf=__a )
self.assertIsNotNone(__a )
self.assertIsInstance(__a , __a )
@slow
def __SCREAMING_SNAKE_CASE ( self : str ) -> Tuple:
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
_UpperCamelCase : int = AutoConfig.from_pretrained(__a )
self.assertIsNotNone(__a )
self.assertIsInstance(__a , __a )
_UpperCamelCase : List[Any] = TFAutoModelForQuestionAnswering.from_pretrained(__a , from_pt=__a )
self.assertIsNotNone(__a )
self.assertIsInstance(__a , __a )
_UpperCamelCase : str = AutoModelForQuestionAnswering.from_pretrained(__a , from_tf=__a )
self.assertIsNotNone(__a )
self.assertIsInstance(__a , __a )
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> str:
_UpperCamelCase : List[str] = TFAutoModelWithLMHead.from_pretrained(__a , from_pt=__a )
self.assertIsInstance(__a , __a )
self.assertEqual(model.num_parameters() , 1_4410 )
self.assertEqual(model.num_parameters(only_trainable=__a ) , 1_4410 )
_UpperCamelCase : Optional[int] = AutoModelWithLMHead.from_pretrained(__a , from_tf=__a )
self.assertIsInstance(__a , __a )
self.assertEqual(model.num_parameters() , 1_4410 )
self.assertEqual(model.num_parameters(only_trainable=__a ) , 1_4410 )
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[Any]:
_UpperCamelCase : Optional[int] = TFAutoModelWithLMHead.from_pretrained(__a , from_pt=__a )
self.assertIsInstance(__a , __a )
self.assertEqual(model.num_parameters() , 1_4410 )
self.assertEqual(model.num_parameters(only_trainable=__a ) , 1_4410 )
_UpperCamelCase : Any = AutoModelWithLMHead.from_pretrained(__a , from_tf=__a )
self.assertIsInstance(__a , __a )
self.assertEqual(model.num_parameters() , 1_4410 )
self.assertEqual(model.num_parameters(only_trainable=__a ) , 1_4410 )
| 51
|
"""simple docstring"""
from datetime import datetime
import requests
from bsa import BeautifulSoup
if __name__ == "__main__":
lowerCamelCase__ = input("Enter image url: ").strip()
print(f"""Downloading image from {url} ...""")
lowerCamelCase__ = BeautifulSoup(requests.get(url).content, "html.parser")
# The image URL is in the content field of the first meta tag with property og:image
lowerCamelCase__ = soup.find("meta", {"property": "og:image"})["content"]
lowerCamelCase__ = requests.get(image_url).content
lowerCamelCase__ = f"""{datetime.now():%Y-%m-%d_%H:%M:%S}.jpg"""
with open(file_name, "wb") as fp:
fp.write(image_data)
print(f"""Done. Image saved to disk as {file_name}.""")
| 51
| 1
|
"""simple docstring"""
import inspect
import unittest
from transformers import ConvNextVaConfig
from transformers.models.auto import get_values
from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel
from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : Union[str, Any] , __a : str , __a : Union[str, Any]=13 , __a : str=32 , __a : Any=3 , __a : List[str]=4 , __a : int=[10, 20, 30, 40] , __a : int=[2, 2, 3, 2] , __a : Union[str, Any]=True , __a : str=True , __a : Dict=37 , __a : Tuple="gelu" , __a : List[str]=10 , __a : List[str]=0.02 , __a : List[str]=["stage2", "stage3", "stage4"] , __a : Any=[2, 3, 4] , __a : List[Any]=None , ) -> Dict:
_UpperCamelCase : str = parent
_UpperCamelCase : Union[str, Any] = batch_size
_UpperCamelCase : Tuple = image_size
_UpperCamelCase : Any = num_channels
_UpperCamelCase : List[Any] = num_stages
_UpperCamelCase : str = hidden_sizes
_UpperCamelCase : Union[str, Any] = depths
_UpperCamelCase : Union[str, Any] = is_training
_UpperCamelCase : Any = use_labels
_UpperCamelCase : Optional[int] = intermediate_size
_UpperCamelCase : Optional[Any] = hidden_act
_UpperCamelCase : List[str] = num_labels
_UpperCamelCase : List[str] = initializer_range
_UpperCamelCase : List[Any] = out_features
_UpperCamelCase : Any = out_indices
_UpperCamelCase : Any = scope
def __SCREAMING_SNAKE_CASE ( self : str ) -> Any:
_UpperCamelCase : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCamelCase : str = None
if self.use_labels:
_UpperCamelCase : Optional[int] = ids_tensor([self.batch_size] , self.num_labels )
_UpperCamelCase : Optional[int] = self.get_config()
return config, pixel_values, labels
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> int:
return ConvNextVaConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=__a , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __a : List[Any] , __a : Union[str, Any] , __a : Any ) -> Union[str, Any]:
_UpperCamelCase : Union[str, Any] = ConvNextVaModel(config=__a )
model.to(__a )
model.eval()
_UpperCamelCase : Union[str, Any] = model(__a )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def __SCREAMING_SNAKE_CASE ( self : Tuple , __a : List[Any] , __a : Tuple , __a : str ) -> Optional[int]:
_UpperCamelCase : Tuple = ConvNextVaForImageClassification(__a )
model.to(__a )
model.eval()
_UpperCamelCase : Optional[int] = model(__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __a : List[str] , __a : List[Any] , __a : Tuple ) -> Tuple:
_UpperCamelCase : Union[str, Any] = ConvNextVaBackbone(config=__a )
model.to(__a )
model.eval()
_UpperCamelCase : List[str] = model(__a )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
_UpperCamelCase : Dict = None
_UpperCamelCase : str = ConvNextVaBackbone(config=__a )
model.to(__a )
model.eval()
_UpperCamelCase : Union[str, Any] = model(__a )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[Any]:
_UpperCamelCase : int = self.prepare_config_and_inputs()
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase : int = config_and_inputs
_UpperCamelCase : str = {"pixel_values": pixel_values}
return config, inputs_dict
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[Any]:
_UpperCamelCase : List[Any] = self.prepare_config_and_inputs()
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase : int = config_and_inputs
_UpperCamelCase : Optional[int] = {"pixel_values": pixel_values, "labels": labels}
return config, inputs_dict
@require_torch
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Tuple = (
(
ConvNextVaModel,
ConvNextVaForImageClassification,
ConvNextVaBackbone,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE__ :List[Any] = (
{"feature-extraction": ConvNextVaModel, "image-classification": ConvNextVaForImageClassification}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE__ :Dict = False
SCREAMING_SNAKE_CASE__ :List[Any] = False
SCREAMING_SNAKE_CASE__ :Dict = False
SCREAMING_SNAKE_CASE__ :Union[str, Any] = False
SCREAMING_SNAKE_CASE__ :Union[str, Any] = False
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[Any]:
_UpperCamelCase : List[Any] = ConvNextVaModelTester(self )
_UpperCamelCase : Dict = ConfigTester(self , config_class=__a , has_text_modality=__a , hidden_size=37 )
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> int:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> str:
return
@unittest.skip(reason="ConvNextV2 does not use inputs_embeds" )
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Tuple:
pass
@unittest.skip(reason="ConvNextV2 does not support input and output embeddings" )
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Tuple:
pass
@unittest.skip(reason="ConvNextV2 does not use feedforward chunking" )
def __SCREAMING_SNAKE_CASE ( self : str ) -> Tuple:
pass
def __SCREAMING_SNAKE_CASE ( self : int ) -> Tuple:
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
_UpperCamelCase, _UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_with_labels()
_UpperCamelCase : Any = True
if model_class.__name__ in [
*get_values(__a ),
*get_values(__a ),
]:
continue
_UpperCamelCase : Tuple = model_class(__a )
model.to(__a )
model.train()
_UpperCamelCase : List[str] = self._prepare_for_class(__a , __a , return_labels=__a )
_UpperCamelCase : Optional[Any] = model(**__a ).loss
loss.backward()
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[int]:
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
_UpperCamelCase, _UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_with_labels()
_UpperCamelCase : Dict = False
_UpperCamelCase : Optional[int] = True
if (
model_class.__name__
in [*get_values(__a ), *get_values(__a )]
or not model_class.supports_gradient_checkpointing
):
continue
_UpperCamelCase : List[Any] = model_class(__a )
model.to(__a )
model.gradient_checkpointing_enable()
model.train()
_UpperCamelCase : Optional[int] = self._prepare_for_class(__a , __a , return_labels=__a )
_UpperCamelCase : Optional[int] = model(**__a ).loss
loss.backward()
def __SCREAMING_SNAKE_CASE ( self : Any ) -> List[Any]:
_UpperCamelCase, _UpperCamelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase : Optional[int] = model_class(__a )
_UpperCamelCase : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCamelCase : Optional[Any] = [*signature.parameters.keys()]
_UpperCamelCase : Tuple = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __a )
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> int:
_UpperCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def __SCREAMING_SNAKE_CASE ( self : str ) -> Tuple:
def check_hidden_states_output(__a : Optional[Any] , __a : Optional[Any] , __a : List[Any] ):
_UpperCamelCase : int = model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
_UpperCamelCase : List[Any] = model(**self._prepare_for_class(__a , __a ) )
_UpperCamelCase : List[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_UpperCamelCase : Optional[Any] = self.model_tester.num_stages
self.assertEqual(len(__a ) , expected_num_stages + 1 )
# ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
_UpperCamelCase, _UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase : Dict = True
check_hidden_states_output(__a , __a , __a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_UpperCamelCase : Optional[int] = True
check_hidden_states_output(__a , __a , __a )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[Any]:
_UpperCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__a )
@slow
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> Any:
for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase : Any = ConvNextVaModel.from_pretrained(__a )
self.assertIsNotNone(__a )
def lowercase__ ( ) -> Dict:
"""simple docstring"""
_UpperCamelCase : Dict = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[str]:
return AutoImageProcessor.from_pretrained("facebook/convnextv2-tiny-1k-224" ) if is_vision_available() else None
@slow
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Union[str, Any]:
_UpperCamelCase : Tuple = ConvNextVaForImageClassification.from_pretrained("facebook/convnextv2-tiny-1k-224" ).to(__a )
_UpperCamelCase : List[Any] = self.default_image_processor
_UpperCamelCase : Optional[Any] = prepare_img()
_UpperCamelCase : List[Any] = preprocessor(images=__a , return_tensors="pt" ).to(__a )
# forward pass
with torch.no_grad():
_UpperCamelCase : List[str] = model(**__a )
# verify the logits
_UpperCamelCase : Union[str, Any] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , __a )
_UpperCamelCase : Union[str, Any] = torch.tensor([0.99_96, 0.19_66, -0.43_86] ).to(__a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __a , atol=1e-4 ) )
| 51
|
"""simple docstring"""
import importlib
import os
import sys
# This is required to make the module import works (when the python process is running from the root of the repo)
sys.path.append(".")
def lowercase__ ( lowercase_ ) -> int:
"""simple docstring"""
_UpperCamelCase : Any = test_file.split(os.path.sep )
if components[0:2] != ["tests", "models"]:
raise ValueError(
"`test_file` should start with `tests/models/` (with `/` being the OS specific path separator). Got "
F'''{test_file} instead.''' )
_UpperCamelCase : str = components[-1]
if not test_fn.endswith("py" ):
raise ValueError(F'''`test_file` should be a python file. Got {test_fn} instead.''' )
if not test_fn.startswith("test_modeling_" ):
raise ValueError(
F'''`test_file` should point to a file name of the form `test_modeling_*.py`. Got {test_fn} instead.''' )
_UpperCamelCase : Dict = components[:-1] + [test_fn.replace(".py" ,"" )]
_UpperCamelCase : List[str] = ".".join(lowercase_ )
return test_module_path
def lowercase__ ( lowercase_ ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase : Optional[Any] = get_module_path(lowercase_ )
_UpperCamelCase : str = importlib.import_module(lowercase_ )
return test_module
def lowercase__ ( lowercase_ ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase : Union[str, Any] = []
_UpperCamelCase : List[Any] = get_test_module(lowercase_ )
for attr in dir(lowercase_ ):
if attr.endswith("ModelTester" ):
tester_classes.append(getattr(lowercase_ ,lowercase_ ) )
# sort with class names
return sorted(lowercase_ ,key=lambda lowercase_ : x.__name__ )
def lowercase__ ( lowercase_ ) -> Tuple:
"""simple docstring"""
_UpperCamelCase : Optional[Any] = []
_UpperCamelCase : Any = get_test_module(lowercase_ )
for attr in dir(lowercase_ ):
_UpperCamelCase : int = getattr(lowercase_ ,lowercase_ )
# (TF/Flax)ModelTesterMixin is also an attribute in specific model test module. Let's exclude them by checking
# `all_model_classes` is not empty (which also excludes other special classes).
_UpperCamelCase : Optional[Any] = getattr(lowercase_ ,"all_model_classes" ,[] )
if len(lowercase_ ) > 0:
test_classes.append(lowercase_ )
# sort with class names
return sorted(lowercase_ ,key=lambda lowercase_ : x.__name__ )
def lowercase__ ( lowercase_ ) -> Any:
"""simple docstring"""
_UpperCamelCase : Dict = get_test_classes(lowercase_ )
_UpperCamelCase : int = set()
for test_class in test_classes:
model_classes.update(test_class.all_model_classes )
# sort with class names
return sorted(lowercase_ ,key=lambda lowercase_ : x.__name__ )
def lowercase__ ( lowercase_ ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase : List[str] = test_class()
if hasattr(lowercase_ ,"setUp" ):
test.setUp()
_UpperCamelCase : Tuple = None
if hasattr(lowercase_ ,"model_tester" ):
# `(TF/Flax)ModelTesterMixin` has this attribute default to `None`. Let's skip this case.
if test.model_tester is not None:
_UpperCamelCase : Tuple = test.model_tester.__class__
return model_tester
def lowercase__ ( lowercase_ ,lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase : str = get_test_classes(lowercase_ )
_UpperCamelCase : Dict = []
for test_class in test_classes:
if model_class in test_class.all_model_classes:
target_test_classes.append(lowercase_ )
# sort with class names
return sorted(lowercase_ ,key=lambda lowercase_ : x.__name__ )
def lowercase__ ( lowercase_ ,lowercase_ ) -> Dict:
"""simple docstring"""
_UpperCamelCase : Any = get_test_classes_for_model(lowercase_ ,lowercase_ )
_UpperCamelCase : List[Any] = []
for test_class in test_classes:
_UpperCamelCase : List[Any] = get_model_tester_from_test_class(lowercase_ )
if tester_class is not None:
tester_classes.append(lowercase_ )
# sort with class names
return sorted(lowercase_ ,key=lambda lowercase_ : x.__name__ )
def lowercase__ ( lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase : Any = get_test_classes(lowercase_ )
_UpperCamelCase : Tuple = {test_class: get_model_tester_from_test_class(lowercase_ ) for test_class in test_classes}
return test_tester_mapping
def lowercase__ ( lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase : List[Any] = get_model_classes(lowercase_ )
_UpperCamelCase : Optional[int] = {
model_class: get_test_classes_for_model(lowercase_ ,lowercase_ ) for model_class in model_classes
}
return model_test_mapping
def lowercase__ ( lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase : Optional[Any] = get_model_classes(lowercase_ )
_UpperCamelCase : Tuple = {
model_class: get_tester_classes_for_model(lowercase_ ,lowercase_ ) for model_class in model_classes
}
return model_to_tester_mapping
def lowercase__ ( lowercase_ ) -> Optional[int]:
"""simple docstring"""
if isinstance(lowercase_ ,lowercase_ ):
return o
elif isinstance(lowercase_ ,lowercase_ ):
return o.__name__
elif isinstance(lowercase_ ,(list, tuple) ):
return [to_json(lowercase_ ) for x in o]
elif isinstance(lowercase_ ,lowercase_ ):
return {to_json(lowercase_ ): to_json(lowercase_ ) for k, v in o.items()}
else:
return o
| 51
| 1
|
"""simple docstring"""
from packaging import version
from .import_utils import is_accelerate_available
if is_accelerate_available():
import accelerate
def lowercase__ ( lowercase_ ) -> Optional[Any]:
"""simple docstring"""
if not is_accelerate_available():
return method
_UpperCamelCase : str = version.parse(accelerate.__version__ ).base_version
if version.parse(lowercase_ ) < version.parse("0.17.0" ):
return method
def wrapper(self ,*lowercase_ ,**lowercase_ ):
if hasattr(self ,"_hf_hook" ) and hasattr(self._hf_hook ,"pre_forward" ):
self._hf_hook.pre_forward(self )
return method(self ,*lowercase_ ,**lowercase_ )
return wrapper
| 51
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCamelCase__ = {
"configuration_mask2former": [
"MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"Mask2FormerConfig",
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["Mask2FormerImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"Mask2FormerForUniversalSegmentation",
"Mask2FormerModel",
"Mask2FormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_maskaformer import MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskaFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_maskaformer import MaskaFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskaformer import (
MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskaFormerForUniversalSegmentation,
MaskaFormerModel,
MaskaFormerPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 51
| 1
|
"""simple docstring"""
import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Tuple = StableUnCLIPPipeline
SCREAMING_SNAKE_CASE__ :Union[str, Any] = TEXT_TO_IMAGE_PARAMS
SCREAMING_SNAKE_CASE__ :List[str] = TEXT_TO_IMAGE_BATCH_PARAMS
SCREAMING_SNAKE_CASE__ :Any = TEXT_TO_IMAGE_IMAGE_PARAMS
SCREAMING_SNAKE_CASE__ :str = TEXT_TO_IMAGE_IMAGE_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
SCREAMING_SNAKE_CASE__ :Union[str, Any] = False
def __SCREAMING_SNAKE_CASE ( self : str ) -> Tuple:
_UpperCamelCase : Tuple = 32
_UpperCamelCase : List[str] = embedder_hidden_size
# prior components
torch.manual_seed(0 )
_UpperCamelCase : Optional[int] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
torch.manual_seed(0 )
_UpperCamelCase : int = CLIPTextModelWithProjection(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=__a , projection_dim=__a , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
_UpperCamelCase : str = PriorTransformer(
num_attention_heads=2 , attention_head_dim=12 , embedding_dim=__a , num_layers=1 , )
torch.manual_seed(0 )
_UpperCamelCase : List[Any] = DDPMScheduler(
variance_type="fixed_small_log" , prediction_type="sample" , num_train_timesteps=1000 , clip_sample=__a , clip_sample_range=5.0 , beta_schedule="squaredcos_cap_v2" , )
# regular denoising components
torch.manual_seed(0 )
_UpperCamelCase : str = StableUnCLIPImageNormalizer(embedding_dim=__a )
_UpperCamelCase : Optional[Any] = DDPMScheduler(beta_schedule="squaredcos_cap_v2" )
torch.manual_seed(0 )
_UpperCamelCase : int = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
torch.manual_seed(0 )
_UpperCamelCase : int = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=__a , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
_UpperCamelCase : Union[str, Any] = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("CrossAttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "CrossAttnUpBlock2D") , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type="projection" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=__a , layers_per_block=1 , upcast_attention=__a , use_linear_projection=__a , )
torch.manual_seed(0 )
_UpperCamelCase : Optional[Any] = DDIMScheduler(
beta_schedule="scaled_linear" , beta_start=0.0_00_85 , beta_end=0.0_12 , prediction_type="v_prediction" , set_alpha_to_one=__a , steps_offset=1 , )
torch.manual_seed(0 )
_UpperCamelCase : Union[str, Any] = AutoencoderKL()
_UpperCamelCase : List[str] = {
# prior components
"prior_tokenizer": prior_tokenizer,
"prior_text_encoder": prior_text_encoder,
"prior": prior,
"prior_scheduler": prior_scheduler,
# image noising components
"image_normalizer": image_normalizer,
"image_noising_scheduler": image_noising_scheduler,
# regular denoising components
"tokenizer": tokenizer,
"text_encoder": text_encoder,
"unet": unet,
"scheduler": scheduler,
"vae": vae,
}
return components
def __SCREAMING_SNAKE_CASE ( self : Tuple , __a : str , __a : List[str]=0 ) -> Tuple:
if str(__a ).startswith("mps" ):
_UpperCamelCase : Optional[Any] = torch.manual_seed(__a )
else:
_UpperCamelCase : List[str] = torch.Generator(device=__a ).manual_seed(__a )
_UpperCamelCase : List[Any] = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"prior_num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Any:
_UpperCamelCase : Optional[int] = torch_device == "cpu"
self._test_attention_slicing_forward_pass(test_max_difference=__a )
def __SCREAMING_SNAKE_CASE ( self : Any ) -> Dict:
_UpperCamelCase : Optional[Any] = torch_device in ["cpu", "mps"]
self._test_inference_batch_single_identical(test_max_difference=__a )
@slow
@require_torch_gpu
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __SCREAMING_SNAKE_CASE ( self : int ) -> Any:
_UpperCamelCase : Any = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy" )
_UpperCamelCase : Tuple = StableUnCLIPPipeline.from_pretrained("fusing/stable-unclip-2-1-l" , torch_dtype=torch.floataa )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
_UpperCamelCase : List[str] = torch.Generator(device="cpu" ).manual_seed(0 )
_UpperCamelCase : Optional[Any] = pipe("anime turle" , generator=__a , output_type="np" )
_UpperCamelCase : Dict = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(__a , __a )
def __SCREAMING_SNAKE_CASE ( self : int ) -> Optional[int]:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
_UpperCamelCase : Optional[Any] = StableUnCLIPPipeline.from_pretrained("fusing/stable-unclip-2-1-l" , torch_dtype=torch.floataa )
_UpperCamelCase : Optional[int] = pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
_UpperCamelCase : str = pipe(
"anime turtle" , prior_num_inference_steps=2 , num_inference_steps=2 , output_type="np" , )
_UpperCamelCase : Dict = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 51
|
"""simple docstring"""
lowerCamelCase__ = "\n# Installazione di Transformers\n! pip install transformers datasets\n# Per installare dalla fonte invece dell'ultima versione rilasciata, commenta il comando sopra e\n# rimuovi la modalità commento al comando seguente.\n# ! pip install git+https://github.com/huggingface/transformers.git\n"
lowerCamelCase__ = [{"type": "code", "content": INSTALL_CONTENT}]
lowerCamelCase__ = {
"{processor_class}": "FakeProcessorClass",
"{model_class}": "FakeModelClass",
"{object_class}": "FakeObjectClass",
}
| 51
| 1
|
"""simple docstring"""
import argparse
import json
import logging
import os
import sys
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, get_gpu_count, slow
lowerCamelCase__ = [
os.path.join(os.path.dirname(__file__), dirname)
for dirname in [
"text-classification",
"language-modeling",
"summarization",
"token-classification",
"question-answering",
]
]
sys.path.extend(SRC_DIRS)
if SRC_DIRS is not None:
import run_clm_flax
import run_flax_glue
import run_flax_ner
import run_mlm_flax
import run_qa
import run_summarization_flax
import run_ta_mlm_flax
logging.basicConfig(level=logging.DEBUG)
lowerCamelCase__ = logging.getLogger()
def lowercase__ ( ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase : Any = argparse.ArgumentParser()
parser.add_argument("-f" )
_UpperCamelCase : Tuple = parser.parse_args()
return args.f
def lowercase__ ( lowercase_ ,lowercase_="eval" ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase : Dict = os.path.join(lowercase_ ,F'''{split}_results.json''' )
if os.path.exists(lowercase_ ):
with open(lowercase_ ,"r" ) as f:
return json.load(lowercase_ )
raise ValueError(F'''can\'t find {path}''' )
lowerCamelCase__ = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self : str ) -> Any:
_UpperCamelCase : Optional[Any] = self.get_auto_remove_tmp_dir()
_UpperCamelCase : Tuple = F'''
run_glue.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--eval_steps=2
--warmup_steps=2
--seed=42
--max_seq_length=128
'''.split()
with patch.object(__a , "argv" , __a ):
run_flax_glue.main()
_UpperCamelCase : Tuple = get_results(__a )
self.assertGreaterEqual(result["eval_accuracy"] , 0.75 )
@slow
def __SCREAMING_SNAKE_CASE ( self : str ) -> Optional[int]:
_UpperCamelCase : List[Any] = self.get_auto_remove_tmp_dir()
_UpperCamelCase : List[str] = F'''
run_clm_flax.py
--model_name_or_path distilgpt2
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--block_size 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--num_train_epochs 2
--logging_steps 2 --eval_steps 2
--output_dir {tmp_dir}
--overwrite_output_dir
'''.split()
with patch.object(__a , "argv" , __a ):
run_clm_flax.main()
_UpperCamelCase : Tuple = get_results(__a )
self.assertLess(result["eval_perplexity"] , 100 )
@slow
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> Dict:
_UpperCamelCase : Tuple = self.get_auto_remove_tmp_dir()
_UpperCamelCase : Any = F'''
run_summarization.py
--model_name_or_path t5-small
--train_file tests/fixtures/tests_samples/xsum/sample.json
--validation_file tests/fixtures/tests_samples/xsum/sample.json
--test_file tests/fixtures/tests_samples/xsum/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--num_train_epochs=3
--warmup_steps=8
--do_train
--do_eval
--do_predict
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--predict_with_generate
'''.split()
with patch.object(__a , "argv" , __a ):
run_summarization_flax.main()
_UpperCamelCase : Tuple = get_results(__a , split="test" )
self.assertGreaterEqual(result["test_rouge1"] , 10 )
self.assertGreaterEqual(result["test_rouge2"] , 2 )
self.assertGreaterEqual(result["test_rougeL"] , 7 )
self.assertGreaterEqual(result["test_rougeLsum"] , 7 )
@slow
def __SCREAMING_SNAKE_CASE ( self : str ) -> Any:
_UpperCamelCase : int = self.get_auto_remove_tmp_dir()
_UpperCamelCase : Optional[Any] = F'''
run_mlm.py
--model_name_or_path distilroberta-base
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--output_dir {tmp_dir}
--overwrite_output_dir
--max_seq_length 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--logging_steps 2 --eval_steps 2
--do_train
--do_eval
--num_train_epochs=1
'''.split()
with patch.object(__a , "argv" , __a ):
run_mlm_flax.main()
_UpperCamelCase : List[str] = get_results(__a )
self.assertLess(result["eval_perplexity"] , 42 )
@slow
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> List[Any]:
_UpperCamelCase : str = self.get_auto_remove_tmp_dir()
_UpperCamelCase : Any = F'''
run_t5_mlm_flax.py
--model_name_or_path t5-small
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--max_seq_length 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--num_train_epochs 2
--logging_steps 2 --eval_steps 2
--output_dir {tmp_dir}
--overwrite_output_dir
'''.split()
with patch.object(__a , "argv" , __a ):
run_ta_mlm_flax.main()
_UpperCamelCase : Optional[Any] = get_results(__a )
self.assertGreaterEqual(result["eval_accuracy"] , 0.42 )
@slow
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[Any]:
# with so little data distributed training needs more epochs to get the score on par with 0/1 gpu
_UpperCamelCase : Tuple = 7 if get_gpu_count() > 1 else 2
_UpperCamelCase : int = self.get_auto_remove_tmp_dir()
_UpperCamelCase : Optional[int] = F'''
run_flax_ner.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/conll/sample.json
--validation_file tests/fixtures/tests_samples/conll/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--do_train
--do_eval
--warmup_steps=2
--learning_rate=2e-4
--logging_steps 2 --eval_steps 2
--per_device_train_batch_size=2
--per_device_eval_batch_size=2
--num_train_epochs={epochs}
--seed 7
'''.split()
with patch.object(__a , "argv" , __a ):
run_flax_ner.main()
_UpperCamelCase : List[str] = get_results(__a )
self.assertGreaterEqual(result["eval_accuracy"] , 0.75 )
self.assertGreaterEqual(result["eval_f1"] , 0.3 )
@slow
def __SCREAMING_SNAKE_CASE ( self : Any ) -> Union[str, Any]:
_UpperCamelCase : Union[str, Any] = self.get_auto_remove_tmp_dir()
_UpperCamelCase : List[str] = F'''
run_qa.py
--model_name_or_path bert-base-uncased
--version_2_with_negative
--train_file tests/fixtures/tests_samples/SQUAD/sample.json
--validation_file tests/fixtures/tests_samples/SQUAD/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--num_train_epochs=3
--warmup_steps=2
--do_train
--do_eval
--logging_steps 2 --eval_steps 2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
'''.split()
with patch.object(__a , "argv" , __a ):
run_qa.main()
_UpperCamelCase : List[Any] = get_results(__a )
self.assertGreaterEqual(result["eval_f1"] , 30 )
self.assertGreaterEqual(result["eval_exact"] , 30 )
| 51
|
"""simple docstring"""
import os
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers.models.realm.configuration_realm import RealmConfig
from transformers.models.realm.retrieval_realm import _REALM_BLOCK_RECORDS_FILENAME, RealmRetriever
from transformers.models.realm.tokenization_realm import VOCAB_FILES_NAMES, RealmTokenizer
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[Any]:
_UpperCamelCase : Tuple = tempfile.mkdtemp()
_UpperCamelCase : str = 5
# Realm tok
_UpperCamelCase : Tuple = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"test",
"question",
"this",
"is",
"the",
"first",
"second",
"third",
"fourth",
"fifth",
"record",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
_UpperCamelCase : Optional[int] = os.path.join(self.tmpdirname , "realm_tokenizer" )
os.makedirs(__a , exist_ok=__a )
_UpperCamelCase : Optional[Any] = os.path.join(__a , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
_UpperCamelCase : Optional[int] = os.path.join(self.tmpdirname , "realm_block_records" )
os.makedirs(__a , exist_ok=__a )
def __SCREAMING_SNAKE_CASE ( self : str ) -> RealmTokenizer:
return RealmTokenizer.from_pretrained(os.path.join(self.tmpdirname , "realm_tokenizer" ) )
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Dict:
shutil.rmtree(self.tmpdirname )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[str]:
_UpperCamelCase : Optional[Any] = RealmConfig(num_block_records=self.num_block_records )
return config
def __SCREAMING_SNAKE_CASE ( self : int ) -> int:
_UpperCamelCase : Any = Dataset.from_dict(
{
"id": ["0", "1"],
"question": ["foo", "bar"],
"answers": [["Foo", "Bar"], ["Bar"]],
} )
return dataset
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> str:
_UpperCamelCase : int = np.array(
[
b"This is the first record",
b"This is the second record",
b"This is the third record",
b"This is the fourth record",
b"This is the fifth record",
b"This is a longer longer longer record",
] , dtype=__a , )
return block_records
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[Any]:
_UpperCamelCase : List[str] = RealmRetriever(
block_records=self.get_dummy_block_records() , tokenizer=self.get_tokenizer() , )
return retriever
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Tuple:
_UpperCamelCase : Tuple = self.get_config()
_UpperCamelCase : int = self.get_dummy_retriever()
_UpperCamelCase : Tuple = retriever.tokenizer
_UpperCamelCase : List[str] = np.array([0, 3] , dtype="long" )
_UpperCamelCase : Union[str, Any] = tokenizer(["Test question"] ).input_ids
_UpperCamelCase : List[str] = tokenizer(
["the fourth"] , add_special_tokens=__a , return_token_type_ids=__a , return_attention_mask=__a , ).input_ids
_UpperCamelCase : str = config.reader_seq_len
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase : List[Any] = retriever(
__a , __a , answer_ids=__a , max_length=__a , return_tensors="np" )
self.assertEqual(len(__a ) , 2 )
self.assertEqual(len(__a ) , 2 )
self.assertEqual(len(__a ) , 2 )
self.assertEqual(concat_inputs.input_ids.shape , (2, 10) )
self.assertEqual(concat_inputs.attention_mask.shape , (2, 10) )
self.assertEqual(concat_inputs.token_type_ids.shape , (2, 10) )
self.assertEqual(concat_inputs.special_tokens_mask.shape , (2, 10) )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[0] ) , ["[CLS]", "test", "question", "[SEP]", "this", "is", "the", "first", "record", "[SEP]"] , )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[1] ) , ["[CLS]", "test", "question", "[SEP]", "this", "is", "the", "fourth", "record", "[SEP]"] , )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[Any]:
_UpperCamelCase : Any = self.get_config()
_UpperCamelCase : Dict = self.get_dummy_retriever()
_UpperCamelCase : Dict = retriever.tokenizer
_UpperCamelCase : List[Any] = np.array([0, 3, 5] , dtype="long" )
_UpperCamelCase : Optional[int] = tokenizer(["Test question"] ).input_ids
_UpperCamelCase : str = tokenizer(
["the fourth", "longer longer"] , add_special_tokens=__a , return_token_type_ids=__a , return_attention_mask=__a , ).input_ids
_UpperCamelCase : Union[str, Any] = config.reader_seq_len
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase : Optional[Any] = retriever(
__a , __a , answer_ids=__a , max_length=__a , return_tensors="np" )
self.assertEqual([False, True, True] , __a )
self.assertEqual([[-1, -1, -1], [6, -1, -1], [6, 7, 8]] , __a )
self.assertEqual([[-1, -1, -1], [7, -1, -1], [7, 8, 9]] , __a )
def __SCREAMING_SNAKE_CASE ( self : int ) -> List[str]:
_UpperCamelCase : List[Any] = self.get_dummy_retriever()
retriever.save_pretrained(os.path.join(self.tmpdirname , "realm_block_records" ) )
# Test local path
_UpperCamelCase : int = retriever.from_pretrained(os.path.join(self.tmpdirname , "realm_block_records" ) )
self.assertEqual(retriever.block_records[0] , b"This is the first record" )
# Test mocked remote path
with patch("transformers.models.realm.retrieval_realm.hf_hub_download" ) as mock_hf_hub_download:
_UpperCamelCase : List[Any] = os.path.join(
os.path.join(self.tmpdirname , "realm_block_records" ) , _REALM_BLOCK_RECORDS_FILENAME )
_UpperCamelCase : int = RealmRetriever.from_pretrained("google/realm-cc-news-pretrained-openqa" )
self.assertEqual(retriever.block_records[0] , b"This is the first record" )
| 51
| 1
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"facebook/convnextv2-tiny-1k-224": "https://huggingface.co/facebook/convnextv2-tiny-1k-224/resolve/main/config.json",
}
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Optional[Any] = "convnextv2"
def __init__( self : List[Any] , __a : Optional[int]=3 , __a : str=4 , __a : Tuple=4 , __a : Dict=None , __a : Union[str, Any]=None , __a : List[Any]="gelu" , __a : Tuple=0.02 , __a : List[str]=1e-1_2 , __a : Optional[int]=0.0 , __a : int=224 , __a : List[Any]=None , __a : Tuple=None , **__a : Tuple , ) -> Optional[int]:
super().__init__(**__a )
_UpperCamelCase : int = num_channels
_UpperCamelCase : int = patch_size
_UpperCamelCase : int = num_stages
_UpperCamelCase : Tuple = [96, 192, 384, 768] if hidden_sizes is None else hidden_sizes
_UpperCamelCase : Any = [3, 3, 9, 3] if depths is None else depths
_UpperCamelCase : Any = hidden_act
_UpperCamelCase : str = initializer_range
_UpperCamelCase : Any = layer_norm_eps
_UpperCamelCase : List[Any] = drop_path_rate
_UpperCamelCase : Tuple = image_size
_UpperCamelCase : Optional[int] = ["stem"] + [F'''stage{idx}''' for idx in range(1 , len(self.depths ) + 1 )]
_UpperCamelCase, _UpperCamelCase : str = get_aligned_output_features_output_indices(
out_features=__a , out_indices=__a , stage_names=self.stage_names )
| 51
|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import LEDConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFLEDForConditionalGeneration, TFLEDModel
@require_tf
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Any = LEDConfig
SCREAMING_SNAKE_CASE__ :str = {}
SCREAMING_SNAKE_CASE__ :List[str] = "gelu"
def __init__( self : List[Any] , __a : Union[str, Any] , __a : List[Any]=13 , __a : int=7 , __a : str=True , __a : Any=False , __a : str=99 , __a : str=32 , __a : Union[str, Any]=2 , __a : Optional[Any]=4 , __a : List[Any]=37 , __a : List[Any]=0.1 , __a : Tuple=0.1 , __a : Dict=20 , __a : str=2 , __a : Dict=1 , __a : Any=0 , __a : List[Any]=4 , ) -> List[Any]:
_UpperCamelCase : Optional[Any] = parent
_UpperCamelCase : List[str] = batch_size
_UpperCamelCase : str = seq_length
_UpperCamelCase : str = is_training
_UpperCamelCase : Any = use_labels
_UpperCamelCase : Any = vocab_size
_UpperCamelCase : List[str] = hidden_size
_UpperCamelCase : Optional[Any] = num_hidden_layers
_UpperCamelCase : Dict = num_attention_heads
_UpperCamelCase : Optional[Any] = intermediate_size
_UpperCamelCase : int = hidden_dropout_prob
_UpperCamelCase : Dict = attention_probs_dropout_prob
_UpperCamelCase : str = max_position_embeddings
_UpperCamelCase : int = eos_token_id
_UpperCamelCase : Dict = pad_token_id
_UpperCamelCase : Optional[Any] = bos_token_id
_UpperCamelCase : str = attention_window
# `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size
# [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention
# returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1]
# because its local attention only attends to `self.attention_window` and one before and one after
_UpperCamelCase : List[str] = self.attention_window + 2
# because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for
# the `test_attention_outputs` and `test_hidden_states_output` tests
_UpperCamelCase : int = (
self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window
)
def __SCREAMING_SNAKE_CASE ( self : int ) -> str:
_UpperCamelCase : Tuple = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
_UpperCamelCase : Optional[Any] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
_UpperCamelCase : Tuple = tf.concat([input_ids, eos_tensor] , axis=1 )
_UpperCamelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCamelCase : List[Any] = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , attention_window=self.attention_window , **self.config_updates , )
_UpperCamelCase : Dict = prepare_led_inputs_dict(__a , __a , __a )
_UpperCamelCase : Union[str, Any] = tf.concat(
[tf.zeros_like(__a )[:, :-1], tf.ones_like(__a )[:, -1:]] , axis=-1 , )
_UpperCamelCase : Union[str, Any] = global_attention_mask
return config, inputs_dict
def __SCREAMING_SNAKE_CASE ( self : List[str] , __a : List[Any] , __a : int ) -> Tuple:
_UpperCamelCase : Tuple = TFLEDModel(config=__a ).get_decoder()
_UpperCamelCase : Tuple = inputs_dict["input_ids"]
_UpperCamelCase : int = input_ids[:1, :]
_UpperCamelCase : List[str] = inputs_dict["attention_mask"][:1, :]
_UpperCamelCase : List[Any] = 1
# first forward pass
_UpperCamelCase : Any = model(__a , attention_mask=__a , use_cache=__a )
_UpperCamelCase, _UpperCamelCase : Union[str, Any] = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_UpperCamelCase : Any = ids_tensor((self.batch_size, 3) , config.vocab_size )
_UpperCamelCase : Any = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
_UpperCamelCase : List[str] = tf.concat([input_ids, next_tokens] , axis=-1 )
_UpperCamelCase : Union[str, Any] = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
_UpperCamelCase : Tuple = model(__a , attention_mask=__a )[0]
_UpperCamelCase : int = model(__a , attention_mask=__a , past_key_values=__a )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
_UpperCamelCase : List[Any] = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
_UpperCamelCase : List[str] = output_from_no_past[:, -3:, random_slice_idx]
_UpperCamelCase : Optional[int] = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(__a , __a , rtol=1e-3 )
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_=None ,lowercase_=None ,lowercase_=None ,lowercase_=None ,) -> Dict:
"""simple docstring"""
if attention_mask is None:
_UpperCamelCase : str = tf.cast(tf.math.not_equal(lowercase_ ,config.pad_token_id ) ,tf.inta )
if decoder_attention_mask is None:
_UpperCamelCase : str = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape ,dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] ,config.pad_token_id ) ,tf.inta ),
] ,axis=-1 ,)
if head_mask is None:
_UpperCamelCase : List[str] = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
_UpperCamelCase : List[str] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"attention_mask": attention_mask,
"decoder_input_ids": decoder_input_ids,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
}
@require_tf
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Any = (TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else ()
SCREAMING_SNAKE_CASE__ :List[str] = (TFLEDForConditionalGeneration,) if is_tf_available() else ()
SCREAMING_SNAKE_CASE__ :List[str] = (
{
"conversational": TFLEDForConditionalGeneration,
"feature-extraction": TFLEDModel,
"summarization": TFLEDForConditionalGeneration,
"text2text-generation": TFLEDForConditionalGeneration,
"translation": TFLEDForConditionalGeneration,
}
if is_tf_available()
else {}
)
SCREAMING_SNAKE_CASE__ :Tuple = True
SCREAMING_SNAKE_CASE__ :str = False
SCREAMING_SNAKE_CASE__ :Optional[Any] = False
SCREAMING_SNAKE_CASE__ :int = False
def __SCREAMING_SNAKE_CASE ( self : str ) -> List[Any]:
_UpperCamelCase : int = TFLEDModelTester(self )
_UpperCamelCase : Any = ConfigTester(self , config_class=__a )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[Any]:
self.config_tester.run_common_tests()
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> int:
_UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*__a )
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[str]:
_UpperCamelCase, _UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCamelCase : Optional[int] = tf.zeros_like(inputs_dict["attention_mask"] )
_UpperCamelCase : Union[str, Any] = 2
_UpperCamelCase : str = tf.where(
tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices , 1 , inputs_dict["global_attention_mask"] , )
_UpperCamelCase : Dict = True
_UpperCamelCase : str = self.model_tester.seq_length
_UpperCamelCase : Union[str, Any] = self.model_tester.encoder_seq_length
def check_decoder_attentions_output(__a : Optional[int] ):
_UpperCamelCase : Optional[int] = outputs.decoder_attentions
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
def check_encoder_attentions_output(__a : Optional[Any] ):
_UpperCamelCase : Union[str, Any] = [t.numpy() for t in outputs.encoder_attentions]
_UpperCamelCase : List[Any] = [t.numpy() for t in outputs.encoder_global_attentions]
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
self.assertListEqual(
list(global_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices] , )
for model_class in self.all_model_classes:
_UpperCamelCase : Dict = True
_UpperCamelCase : Optional[Any] = False
_UpperCamelCase : int = False
_UpperCamelCase : Optional[int] = model_class(__a )
_UpperCamelCase : int = model(self._prepare_for_class(__a , __a ) )
_UpperCamelCase : Any = len(__a )
self.assertEqual(config.output_hidden_states , __a )
check_encoder_attentions_output(__a )
if self.is_encoder_decoder:
_UpperCamelCase : Optional[Any] = model_class(__a )
_UpperCamelCase : List[Any] = model(self._prepare_for_class(__a , __a ) )
self.assertEqual(config.output_hidden_states , __a )
check_decoder_attentions_output(__a )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
_UpperCamelCase : int = True
_UpperCamelCase : Tuple = model_class(__a )
_UpperCamelCase : str = model(self._prepare_for_class(__a , __a ) )
self.assertEqual(config.output_hidden_states , __a )
check_encoder_attentions_output(__a )
# Check attention is always last and order is fine
_UpperCamelCase : Any = True
_UpperCamelCase : List[str] = True
_UpperCamelCase : Tuple = model_class(__a )
_UpperCamelCase : int = model(self._prepare_for_class(__a , __a ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(__a ) )
self.assertEqual(model.config.output_hidden_states , __a )
check_encoder_attentions_output(__a )
@unittest.skip("LED keeps using potentially symbolic tensors in conditionals and breaks tracing." )
def __SCREAMING_SNAKE_CASE ( self : str ) -> Dict:
pass
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Tuple:
# TODO: Head-masking not yet implement
pass
def lowercase__ ( lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
return tf.constant(lowercase_ ,dtype=tf.intaa )
lowerCamelCase__ = 1E-4
@slow
@require_tf
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> Union[str, Any]:
_UpperCamelCase : Any = TFLEDForConditionalGeneration.from_pretrained("allenai/led-base-16384" ).led
# change to intended input here
_UpperCamelCase : int = _long_tensor([512 * [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69]] )
_UpperCamelCase : Tuple = _long_tensor([128 * [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69]] )
_UpperCamelCase : Optional[Any] = prepare_led_inputs_dict(model.config , __a , __a )
_UpperCamelCase : Optional[int] = model(**__a )[0]
_UpperCamelCase : Optional[int] = (1, 1024, 768)
self.assertEqual(output.shape , __a )
# change to expected output here
_UpperCamelCase : Tuple = tf.convert_to_tensor(
[[2.30_50, 2.82_79, 0.65_31], [-1.84_57, -0.14_55, -3.56_61], [-1.01_86, 0.45_86, -2.20_43]] , )
tf.debugging.assert_near(output[:, :3, :3] , __a , atol=1e-3 )
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> str:
_UpperCamelCase : Optional[int] = TFLEDForConditionalGeneration.from_pretrained("allenai/led-base-16384" )
# change to intended input here
_UpperCamelCase : Optional[int] = _long_tensor([512 * [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69]] )
_UpperCamelCase : List[str] = _long_tensor([128 * [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69]] )
_UpperCamelCase : Optional[Any] = prepare_led_inputs_dict(model.config , __a , __a )
_UpperCamelCase : Union[str, Any] = model(**__a )[0]
_UpperCamelCase : int = (1, 1024, model.config.vocab_size)
self.assertEqual(output.shape , __a )
# change to expected output here
_UpperCamelCase : Optional[int] = tf.convert_to_tensor(
[[33.65_07, 6.45_72, 16.80_89], [5.87_39, -2.42_38, 11.29_02], [-3.21_39, -4.31_49, 4.27_83]] , )
tf.debugging.assert_near(output[:, :3, :3] , __a , atol=1e-3 , rtol=1e-3 )
| 51
| 1
|
"""simple docstring"""
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :int
SCREAMING_SNAKE_CASE__ :TreeNode | None = None
SCREAMING_SNAKE_CASE__ :TreeNode | None = None
lowerCamelCase__ = namedtuple("CoinsDistribResult", "moves excess")
def lowercase__ ( lowercase_ ) -> int:
"""simple docstring"""
if root is None:
return 0
# Validation
def count_nodes(lowercase_ ) -> int:
if node is None:
return 0
return count_nodes(node.left ) + count_nodes(node.right ) + 1
def count_coins(lowercase_ ) -> int:
if node is None:
return 0
return count_coins(node.left ) + count_coins(node.right ) + node.data
if count_nodes(lowercase_ ) != count_coins(lowercase_ ):
raise ValueError("The nodes number should be same as the number of coins" )
# Main calculation
def get_distrib(lowercase_ ) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0 ,1 )
_UpperCamelCase, _UpperCamelCase : int = get_distrib(node.left )
_UpperCamelCase, _UpperCamelCase : List[str] = get_distrib(node.right )
_UpperCamelCase : Tuple = 1 - left_distrib_excess
_UpperCamelCase : Union[str, Any] = 1 - right_distrib_excess
_UpperCamelCase : List[Any] = (
left_distrib_moves
+ right_distrib_moves
+ abs(lowercase_ )
+ abs(lowercase_ )
)
_UpperCamelCase : Optional[int] = node.data - coins_to_left - coins_to_right
return CoinsDistribResult(lowercase_ ,lowercase_ )
return get_distrib(lowercase_ )[0]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 51
|
"""simple docstring"""
import json
import os
import unittest
from transformers.models.roc_bert.tokenization_roc_bert import (
VOCAB_FILES_NAMES,
RoCBertBasicTokenizer,
RoCBertTokenizer,
RoCBertWordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Union[str, Any] = RoCBertTokenizer
SCREAMING_SNAKE_CASE__ :Dict = None
SCREAMING_SNAKE_CASE__ :List[Any] = False
SCREAMING_SNAKE_CASE__ :Union[str, Any] = True
SCREAMING_SNAKE_CASE__ :Union[str, Any] = filter_non_english
def __SCREAMING_SNAKE_CASE ( self : str ) -> Dict:
super().setUp()
_UpperCamelCase : Any = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "你", "好", "是", "谁", "a", "b", "c", "d"]
_UpperCamelCase : List[str] = {}
_UpperCamelCase : Tuple = {}
for i, value in enumerate(__a ):
_UpperCamelCase : List[str] = i
_UpperCamelCase : Optional[Any] = i
_UpperCamelCase : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
_UpperCamelCase : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["word_shape_file"] )
_UpperCamelCase : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["word_pronunciation_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
with open(self.word_shape_file , "w" , encoding="utf-8" ) as word_shape_writer:
json.dump(__a , __a , ensure_ascii=__a )
with open(self.word_pronunciation_file , "w" , encoding="utf-8" ) as word_pronunciation_writer:
json.dump(__a , __a , ensure_ascii=__a )
def __SCREAMING_SNAKE_CASE ( self : int ) -> Optional[int]:
_UpperCamelCase : Tuple = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
_UpperCamelCase : int = tokenizer.tokenize("你好[SEP]你是谁" )
self.assertListEqual(__a , ["你", "好", "[SEP]", "你", "是", "谁"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__a ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_shape_ids(__a ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_pronunciation_ids(__a ) , [5, 6, 2, 5, 7, 8] )
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> Dict:
_UpperCamelCase : Dict = RoCBertBasicTokenizer()
self.assertListEqual(tokenizer.tokenize("ah\u535A\u63A8zz" ) , ["ah", "\u535A", "\u63A8", "zz"] )
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Any:
_UpperCamelCase : List[Any] = RoCBertBasicTokenizer(do_lower_case=__a )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["hello", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple:
_UpperCamelCase : Optional[Any] = RoCBertBasicTokenizer(do_lower_case=__a , strip_accents=__a )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hällo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["h\u00E9llo"] )
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> str:
_UpperCamelCase : Dict = RoCBertBasicTokenizer(do_lower_case=__a , strip_accents=__a )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Union[str, Any]:
_UpperCamelCase : List[str] = RoCBertBasicTokenizer(do_lower_case=__a )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def __SCREAMING_SNAKE_CASE ( self : str ) -> Optional[int]:
_UpperCamelCase : Tuple = RoCBertBasicTokenizer(do_lower_case=__a )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["HeLLo", "!", "how", "Are", "yoU", "?"] )
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[str]:
_UpperCamelCase : Union[str, Any] = RoCBertBasicTokenizer(do_lower_case=__a , strip_accents=__a )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HäLLo", "!", "how", "Are", "yoU", "?"] )
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[Any]:
_UpperCamelCase : Tuple = RoCBertBasicTokenizer(do_lower_case=__a , strip_accents=__a )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HaLLo", "!", "how", "Are", "yoU", "?"] )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> int:
_UpperCamelCase : int = RoCBertBasicTokenizer(do_lower_case=__a , never_split=["[UNK]"] )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? [UNK]" ) , ["HeLLo", "!", "how", "Are", "yoU", "?", "[UNK]"] )
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[Any]:
_UpperCamelCase : Optional[int] = ["[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", "##ing"]
_UpperCamelCase : Any = {}
for i, token in enumerate(__a ):
_UpperCamelCase : str = i
_UpperCamelCase : Optional[int] = RoCBertWordpieceTokenizer(vocab=__a , unk_token="[UNK]" )
self.assertListEqual(tokenizer.tokenize("" ) , [] )
self.assertListEqual(tokenizer.tokenize("unwanted running" ) , ["un", "##want", "##ed", "runn", "##ing"] )
self.assertListEqual(tokenizer.tokenize("unwantedX running" ) , ["[UNK]", "runn", "##ing"] )
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[Any]:
self.assertTrue(_is_whitespace(" " ) )
self.assertTrue(_is_whitespace("\t" ) )
self.assertTrue(_is_whitespace("\r" ) )
self.assertTrue(_is_whitespace("\n" ) )
self.assertTrue(_is_whitespace("\u00A0" ) )
self.assertFalse(_is_whitespace("A" ) )
self.assertFalse(_is_whitespace("-" ) )
def __SCREAMING_SNAKE_CASE ( self : Any ) -> Dict:
self.assertTrue(_is_control("\u0005" ) )
self.assertFalse(_is_control("A" ) )
self.assertFalse(_is_control(" " ) )
self.assertFalse(_is_control("\t" ) )
self.assertFalse(_is_control("\r" ) )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[int]:
self.assertTrue(_is_punctuation("-" ) )
self.assertTrue(_is_punctuation("$" ) )
self.assertTrue(_is_punctuation("`" ) )
self.assertTrue(_is_punctuation("." ) )
self.assertFalse(_is_punctuation("A" ) )
self.assertFalse(_is_punctuation(" " ) )
def __SCREAMING_SNAKE_CASE ( self : int ) -> List[Any]:
_UpperCamelCase : Optional[Any] = self.get_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(__a ) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]] )
if self.test_rust_tokenizer:
_UpperCamelCase : Tuple = self.get_rust_tokenizer()
self.assertListEqual(
[rust_tokenizer.tokenize(__a ) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]] )
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[Any]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
_UpperCamelCase : int = self.rust_tokenizer_class.from_pretrained(__a , **__a )
_UpperCamelCase : Union[str, Any] = F'''A, naïve {tokenizer_r.mask_token} AllenNLP sentence.'''
_UpperCamelCase : Optional[Any] = tokenizer_r.encode_plus(
__a , return_attention_mask=__a , return_token_type_ids=__a , return_offsets_mapping=__a , add_special_tokens=__a , )
_UpperCamelCase : List[Any] = tokenizer_r.do_lower_case if hasattr(__a , "do_lower_case" ) else False
_UpperCamelCase : Dict = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), "A"),
((1, 2), ","),
((3, 5), "na"),
((5, 6), "##ï"),
((6, 8), "##ve"),
((9, 15), tokenizer_r.mask_token),
((16, 21), "Allen"),
((21, 23), "##NL"),
((23, 24), "##P"),
((25, 33), "sentence"),
((33, 34), "."),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), "a"),
((1, 2), ","),
((3, 8), "naive"),
((9, 15), tokenizer_r.mask_token),
((16, 21), "allen"),
((21, 23), "##nl"),
((23, 24), "##p"),
((25, 33), "sentence"),
((33, 34), "."),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens["input_ids"] ) )
self.assertEqual([e[0] for e in expected_results] , tokens["offset_mapping"] )
def __SCREAMING_SNAKE_CASE ( self : int ) -> List[Any]:
_UpperCamelCase : Optional[Any] = ["的", "人", "有"]
_UpperCamelCase : int = "".join(__a )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
_UpperCamelCase : int = True
_UpperCamelCase : Any = self.tokenizer_class.from_pretrained(__a , **__a )
_UpperCamelCase : Optional[Any] = self.rust_tokenizer_class.from_pretrained(__a , **__a )
_UpperCamelCase : int = tokenizer_p.encode(__a , add_special_tokens=__a )
_UpperCamelCase : int = tokenizer_r.encode(__a , add_special_tokens=__a )
_UpperCamelCase : List[Any] = tokenizer_r.convert_ids_to_tokens(__a )
_UpperCamelCase : Union[str, Any] = tokenizer_p.convert_ids_to_tokens(__a )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(__a , __a )
self.assertListEqual(__a , __a )
_UpperCamelCase : Any = False
_UpperCamelCase : Dict = self.rust_tokenizer_class.from_pretrained(__a , **__a )
_UpperCamelCase : Any = self.tokenizer_class.from_pretrained(__a , **__a )
_UpperCamelCase : Any = tokenizer_r.encode(__a , add_special_tokens=__a )
_UpperCamelCase : Any = tokenizer_p.encode(__a , add_special_tokens=__a )
_UpperCamelCase : Union[str, Any] = tokenizer_r.convert_ids_to_tokens(__a )
_UpperCamelCase : Dict = tokenizer_p.convert_ids_to_tokens(__a )
# it is expected that only the first Chinese character is not preceded by "##".
_UpperCamelCase : Any = [
F'''##{token}''' if idx != 0 else token for idx, token in enumerate(__a )
]
self.assertListEqual(__a , __a )
self.assertListEqual(__a , __a )
@slow
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Any:
_UpperCamelCase : Dict = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
_UpperCamelCase : Optional[int] = tokenizer.encode("你好" , add_special_tokens=__a )
_UpperCamelCase : Dict = tokenizer.encode("你是谁" , add_special_tokens=__a )
_UpperCamelCase : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(__a )
_UpperCamelCase : Tuple = tokenizer.build_inputs_with_special_tokens(__a , __a )
assert encoded_sentence == [1] + text + [2]
assert encoded_pair == [1] + text + [2] + text_a + [2]
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> int:
_UpperCamelCase : Optional[Any] = self.get_tokenizers(do_lower_case=__a )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
_UpperCamelCase : int = "你好,你是谁"
_UpperCamelCase : Any = tokenizer.tokenize(__a )
_UpperCamelCase : Optional[Any] = tokenizer.convert_tokens_to_ids(__a )
_UpperCamelCase : List[str] = tokenizer.convert_tokens_to_shape_ids(__a )
_UpperCamelCase : Any = tokenizer.convert_tokens_to_pronunciation_ids(__a )
_UpperCamelCase : Optional[int] = tokenizer.prepare_for_model(
__a , __a , __a , add_special_tokens=__a )
_UpperCamelCase : Tuple = tokenizer.encode_plus(__a , add_special_tokens=__a )
self.assertEqual(__a , __a )
| 51
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.