code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
def lowerCamelCase__ ( A__ : str , A__ : str ):
'''simple docstring'''
assert x is not None
assert y is not None
__lowerCamelCase = len(A__ )
__lowerCamelCase = len(A__ )
# declaring the array for storing the dp values
__lowerCamelCase = [[0] * (n + 1) for _ in range(m + 1 )] # noqa: E741
for i in range(1 , m + 1 ):
for j in range(1 , n + 1 ):
__lowerCamelCase = 1 if x[i - 1] == y[j - 1] else 0
__lowerCamelCase = max(l[i - 1][j] , l[i][j - 1] , l[i - 1][j - 1] + match )
__lowerCamelCase = """"""
__lowerCamelCase, __lowerCamelCase = m, n
while i > 0 and j > 0:
__lowerCamelCase = 1 if x[i - 1] == y[j - 1] else 0
if l[i][j] == l[i - 1][j - 1] + match:
if match == 1:
__lowerCamelCase = x[i - 1] + seq
i -= 1
j -= 1
elif l[i][j] == l[i - 1][j]:
i -= 1
else:
j -= 1
return l[m][n], seq
if __name__ == "__main__":
UpperCAmelCase_ = 'AGGTAB'
UpperCAmelCase_ = 'GXTXAYB'
UpperCAmelCase_ = 4
UpperCAmelCase_ = 'GTAB'
UpperCAmelCase_ , UpperCAmelCase_ = longest_common_subsequence(a, b)
print('len =', ln, ', sub-sequence =', subseq)
import doctest
doctest.testmod()
| 12
|
'''simple docstring'''
# Lint as: python3
import itertools
import os
import re
a : Tuple = re.compile(R"([A-Z]+)([A-Z][a-z])")
a : Union[str, Any] = re.compile(R"([a-z\d])([A-Z])")
a : str = re.compile(R"(?<!_)_(?!_)")
a : List[Any] = re.compile(R"(_{2,})")
a : List[Any] = R"^\w+(\.\w+)*$"
a : Dict = R"<>:/\|?*"
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Dict = _uppercase_uppercase_re.sub(R"\1_\2" , __magic_name__ )
UpperCAmelCase : List[str] = _lowercase_uppercase_re.sub(R"\1_\2" , __magic_name__ )
return name.lower()
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Any = _single_underscore_re.split(__magic_name__ )
UpperCAmelCase : Union[str, Any] = [_multiple_underscores_re.split(__magic_name__ ) for n in name]
return "".join(n.capitalize() for n in itertools.chain.from_iterable(__magic_name__ ) if n != "" )
def lowercase ( __magic_name__ ):
'''simple docstring'''
if os.path.basename(__magic_name__ ) != name:
raise ValueError(F"Should be a dataset name, not a path: {name}" )
return camelcase_to_snakecase(__magic_name__ )
def lowercase ( __magic_name__ , __magic_name__ ):
'''simple docstring'''
if os.path.basename(__magic_name__ ) != name:
raise ValueError(F"Should be a dataset name, not a path: {name}" )
if not re.match(_split_re , __magic_name__ ):
raise ValueError(F"Split name should match '{_split_re}'' but got '{split}'." )
return F"{filename_prefix_for_name(__magic_name__ )}-{split}"
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=None ):
'''simple docstring'''
UpperCAmelCase : List[str] = filename_prefix_for_split(__magic_name__ , __magic_name__ )
if filetype_suffix:
prefix += F".{filetype_suffix}"
UpperCAmelCase : int = os.path.join(__magic_name__ , __magic_name__ )
return F"{filepath}*"
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=None , __magic_name__=None ):
'''simple docstring'''
UpperCAmelCase : List[str] = filename_prefix_for_split(__magic_name__ , __magic_name__ )
UpperCAmelCase : int = os.path.join(__magic_name__ , __magic_name__ )
if shard_lengths:
UpperCAmelCase : Tuple = len(__magic_name__ )
UpperCAmelCase : Optional[int] = [F"{prefix}-{shard_id:05d}-of-{num_shards:05d}" for shard_id in range(__magic_name__ )]
if filetype_suffix:
UpperCAmelCase : Optional[int] = [filename + F".{filetype_suffix}" for filename in filenames]
return filenames
else:
UpperCAmelCase : int = prefix
if filetype_suffix:
filename += F".{filetype_suffix}"
return [filename]
| 311
| 0
|
import argparse
from torch import nn
# transformers_old should correspond to branch `save_old_prophetnet_model_structure` here
# original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively
from transformers_old.modeling_prophetnet import (
ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld,
)
from transformers_old.modeling_xlm_prophetnet import (
XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld,
)
from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging
__lowercase = logging.get_logger(__name__)
logging.set_verbosity_info()
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if "xprophetnet" in prophetnet_checkpoint_path:
__UpperCamelCase :Dict = XLMProphetNetForConditionalGenerationOld.from_pretrained(SCREAMING_SNAKE_CASE )
__UpperCamelCase , __UpperCamelCase :int = XLMProphetNetForConditionalGeneration.from_pretrained(
SCREAMING_SNAKE_CASE , output_loading_info=SCREAMING_SNAKE_CASE )
else:
__UpperCamelCase :Union[str, Any] = ProphetNetForConditionalGenerationOld.from_pretrained(SCREAMING_SNAKE_CASE )
__UpperCamelCase , __UpperCamelCase :Union[str, Any] = ProphetNetForConditionalGeneration.from_pretrained(
SCREAMING_SNAKE_CASE , output_loading_info=SCREAMING_SNAKE_CASE )
__UpperCamelCase :Dict = ['''key_proj''', '''value_proj''', '''query_proj''']
__UpperCamelCase :Optional[Any] = {
'''self_attn''': '''ngram_self_attn''',
'''cross_attn''': '''encoder_attn''',
'''cross_attn_layer_norm''': '''encoder_attn_layer_norm''',
'''feed_forward_layer_norm''': '''final_layer_norm''',
'''feed_forward''': '''''',
'''intermediate''': '''fc1''',
'''output''': '''fc2''',
'''key_proj''': '''k_proj''',
'''query_proj''': '''q_proj''',
'''value_proj''': '''v_proj''',
'''word_embeddings''': '''embed_tokens''',
'''embeddings_layer_norm''': '''emb_layer_norm''',
'''relative_pos_embeddings''': '''relative_linear''',
'''ngram_embeddings''': '''ngram_input_embed''',
'''position_embeddings''': '''embed_positions''',
}
for key in loading_info["missing_keys"]:
__UpperCamelCase :Tuple = key.split('''.''' )
if attributes[0] == "lm_head":
__UpperCamelCase :Union[str, Any] = prophet
__UpperCamelCase :Any = prophet_old
else:
__UpperCamelCase :Any = prophet.prophetnet
__UpperCamelCase :int = prophet_old.model
__UpperCamelCase :Optional[Any] = False
for attribute in attributes:
if attribute in mapping:
__UpperCamelCase :str = mapping[attribute]
if not hasattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) and len(SCREAMING_SNAKE_CASE ) > 0:
__UpperCamelCase :Optional[int] = attribute
elif hasattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
__UpperCamelCase :List[Any] = attribute
if attribute == "weight":
assert old_model.weight.shape == model.weight.shape, "Shapes have to match!"
__UpperCamelCase :Tuple = old_model.weight
logger.info(f"""{attribute} is initialized.""" )
__UpperCamelCase :Union[str, Any] = True
break
elif attribute == "bias":
assert old_model.bias.shape == model.bias.shape, "Shapes have to match!"
__UpperCamelCase :Union[str, Any] = old_model.bias
logger.info(f"""{attribute} is initialized""" )
__UpperCamelCase :List[Any] = True
break
elif attribute in special_keys and hasattr(SCREAMING_SNAKE_CASE , '''in_proj_weight''' ):
__UpperCamelCase :str = old_model.in_proj_weight.shape[0] // 3
__UpperCamelCase :Optional[Any] = getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match"
param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match"
if attribute == "query_proj":
__UpperCamelCase :Optional[int] = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] )
__UpperCamelCase :Tuple = nn.Parameter(old_model.in_proj_bias[:embed_dim] )
elif attribute == "key_proj":
__UpperCamelCase :List[Any] = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] )
__UpperCamelCase :Dict = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] )
elif attribute == "value_proj":
__UpperCamelCase :Tuple = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] )
__UpperCamelCase :Dict = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] )
__UpperCamelCase :Optional[int] = True
break
elif attribute == "position_embeddings":
assert (
model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1]
), "Hidden size has to match"
assert model.position_embeddings.weight.shape[0] == 512, "We want 512 position_embeddings."
__UpperCamelCase :Optional[int] = nn.Parameter(old_model.embed_positions.weight[:512, :] )
__UpperCamelCase :List[Any] = True
break
if attribute.isdigit():
__UpperCamelCase :List[Any] = model[int(SCREAMING_SNAKE_CASE )]
__UpperCamelCase :Optional[int] = old_model[int(SCREAMING_SNAKE_CASE )]
else:
__UpperCamelCase :Optional[Any] = getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if old_attribute == "":
__UpperCamelCase :Any = old_model
else:
if not hasattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
raise ValueError(f"""{old_model} does not have {old_attribute}""" )
__UpperCamelCase :Any = getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if not is_key_init:
raise ValueError(f"""{key} was not correctly initialized!""" )
print(f"""Saving model to {pytorch_dump_folder_path}""" )
prophet.save_pretrained(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--prophetnet_checkpoint_path''', default=None, type=str, required=True, help='''Path the official PyTorch dump.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
__lowercase = parser.parse_args()
convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
| 105
|
import os
import pytest
from transformers.dynamic_module_utils import get_imports
__lowercase = '''
import os
'''
__lowercase = '''
def foo():
import os
return False
'''
__lowercase = '''
def foo():
def bar():
if True:
import os
return False
return bar()
'''
__lowercase = '''
import os
try:
import bar
except ImportError:
raise ValueError()
'''
__lowercase = '''
import os
def foo():
try:
import bar
except ImportError:
raise ValueError()
'''
__lowercase = '''
import os
try:
import bar
except (ImportError, AttributeError):
raise ValueError()
'''
__lowercase = '''
import os
try:
import bar
except ImportError as e:
raise ValueError()
'''
__lowercase = '''
import os
try:
import bar
except:
raise ValueError()
'''
__lowercase = '''
import os
try:
import bar
import baz
except ImportError:
raise ValueError()
'''
__lowercase = '''
import os
try:
import bar
import baz
except ImportError:
x = 1
raise ValueError()
'''
__lowercase = [
TOP_LEVEL_IMPORT,
IMPORT_IN_FUNCTION,
DEEPLY_NESTED_IMPORT,
TOP_LEVEL_TRY_IMPORT,
GENERIC_EXCEPT_IMPORT,
MULTILINE_TRY_IMPORT,
MULTILINE_BOTH_IMPORT,
MULTIPLE_EXCEPTS_IMPORT,
EXCEPT_AS_IMPORT,
TRY_IMPORT_IN_FUNCTION,
]
@pytest.mark.parametrize('''case''' , SCREAMING_SNAKE_CASE )
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :List[Any] = os.path.join(SCREAMING_SNAKE_CASE , '''test_file.py''' )
with open(SCREAMING_SNAKE_CASE , '''w''' ) as _tmp_file:
_tmp_file.write(SCREAMING_SNAKE_CASE )
__UpperCamelCase :Dict = get_imports(SCREAMING_SNAKE_CASE )
assert parsed_imports == ["os"]
| 105
| 1
|
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
__SCREAMING_SNAKE_CASE :Optional[int] = logging.get_logger(__name__)
class A_ ( lowerCAmelCase_ ):
_lowerCamelCase : Tuple = ["""pixel_values"""]
def __init__( self : List[str] , snake_case_ : bool = True , snake_case_ : Dict[str, int] = None , snake_case_ : float = None , snake_case_ : PILImageResampling = PILImageResampling.BILINEAR , snake_case_ : bool = True , snake_case_ : Union[int, float] = 1 / 2_5_5 , snake_case_ : bool = True , snake_case_ : Optional[Union[float, List[float]]] = None , snake_case_ : Optional[Union[float, List[float]]] = None , **snake_case_ : Tuple , ):
super().__init__(**snake_case_ )
_UpperCAmelCase = size if size is not None else {"shortest_edge": 3_8_4}
_UpperCAmelCase = get_size_dict(snake_case_ , default_to_square=snake_case_ )
_UpperCAmelCase = do_resize
_UpperCAmelCase = size
# Default value set here for backwards compatibility where the value in config is None
_UpperCAmelCase = crop_pct if crop_pct is not None else 2_2_4 / 2_5_6
_UpperCAmelCase = resample
_UpperCAmelCase = do_rescale
_UpperCAmelCase = rescale_factor
_UpperCAmelCase = do_normalize
_UpperCAmelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_UpperCAmelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowercase ( self : Any , snake_case_ : np.ndarray , snake_case_ : Dict[str, int] , snake_case_ : float , snake_case_ : PILImageResampling = PILImageResampling.BICUBIC , snake_case_ : Optional[Union[str, ChannelDimension]] = None , **snake_case_ : Optional[int] , ):
_UpperCAmelCase = get_size_dict(snake_case_ , default_to_square=snake_case_ )
if "shortest_edge" not in size:
raise ValueError(f'Size dictionary must contain \'shortest_edge\' key. Got {size.keys()}' )
_UpperCAmelCase = size["shortest_edge"]
if shortest_edge < 3_8_4:
# maintain same ratio, resizing shortest edge to shortest_edge/crop_pct
_UpperCAmelCase = int(shortest_edge / crop_pct )
_UpperCAmelCase = get_resize_output_image_size(snake_case_ , size=snake_case_ , default_to_square=snake_case_ )
_UpperCAmelCase = resize(image=snake_case_ , size=snake_case_ , resample=snake_case_ , data_format=snake_case_ , **snake_case_ )
# then crop to (shortest_edge, shortest_edge)
return center_crop(image=snake_case_ , size=(shortest_edge, shortest_edge) , data_format=snake_case_ , **snake_case_ )
else:
# warping (no cropping) when evaluated at 384 or larger
return resize(
snake_case_ , size=(shortest_edge, shortest_edge) , resample=snake_case_ , data_format=snake_case_ , **snake_case_ )
def lowercase ( self : int , snake_case_ : np.ndarray , snake_case_ : Union[int, float] , snake_case_ : Optional[Union[str, ChannelDimension]] = None , **snake_case_ : Tuple , ):
return rescale(snake_case_ , scale=snake_case_ , data_format=snake_case_ , **snake_case_ )
def lowercase ( self : Optional[int] , snake_case_ : np.ndarray , snake_case_ : Union[float, List[float]] , snake_case_ : Union[float, List[float]] , snake_case_ : Optional[Union[str, ChannelDimension]] = None , **snake_case_ : Union[str, Any] , ):
return normalize(snake_case_ , mean=snake_case_ , std=snake_case_ , data_format=snake_case_ , **snake_case_ )
def lowercase ( self : Any , snake_case_ : ImageInput , snake_case_ : bool = None , snake_case_ : Dict[str, int] = None , snake_case_ : float = None , snake_case_ : PILImageResampling = None , snake_case_ : bool = None , snake_case_ : float = None , snake_case_ : bool = None , snake_case_ : Optional[Union[float, List[float]]] = None , snake_case_ : Optional[Union[float, List[float]]] = None , snake_case_ : Optional[Union[str, TensorType]] = None , snake_case_ : ChannelDimension = ChannelDimension.FIRST , **snake_case_ : str , ):
_UpperCAmelCase = do_resize if do_resize is not None else self.do_resize
_UpperCAmelCase = crop_pct if crop_pct is not None else self.crop_pct
_UpperCAmelCase = resample if resample is not None else self.resample
_UpperCAmelCase = do_rescale if do_rescale is not None else self.do_rescale
_UpperCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
_UpperCAmelCase = do_normalize if do_normalize is not None else self.do_normalize
_UpperCAmelCase = image_mean if image_mean is not None else self.image_mean
_UpperCAmelCase = image_std if image_std is not None else self.image_std
_UpperCAmelCase = size if size is not None else self.size
_UpperCAmelCase = get_size_dict(snake_case_ , default_to_square=snake_case_ )
_UpperCAmelCase = make_list_of_images(snake_case_ )
if not valid_images(snake_case_ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_resize and size["shortest_edge"] < 3_8_4 and crop_pct is None:
raise ValueError("crop_pct must be specified if size < 384." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
_UpperCAmelCase = [to_numpy_array(snake_case_ ) for image in images]
if do_resize:
_UpperCAmelCase = [self.resize(image=snake_case_ , size=snake_case_ , crop_pct=snake_case_ , resample=snake_case_ ) for image in images]
if do_rescale:
_UpperCAmelCase = [self.rescale(image=snake_case_ , scale=snake_case_ ) for image in images]
if do_normalize:
_UpperCAmelCase = [self.normalize(image=snake_case_ , mean=snake_case_ , std=snake_case_ ) for image in images]
_UpperCAmelCase = [to_channel_dimension_format(snake_case_ , snake_case_ ) for image in images]
_UpperCAmelCase = {"pixel_values": images}
return BatchFeature(data=snake_case_ , tensor_type=snake_case_ )
| 22
|
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.test_utils import execute_subprocess_async
def __lowerCAmelCase ( snake_case__=None ):
if subparsers is not None:
__UpperCamelCase : Any = subparsers.add_parser("test" )
else:
__UpperCamelCase : Dict = argparse.ArgumentParser("Accelerate test command" )
parser.add_argument(
"--config_file" , default=snake_case__ , help=(
"The path to use to store the config file. Will default to a file named default_config.yaml in the cache "
"location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have "
"such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed "
"with 'huggingface'."
) , )
if subparsers is not None:
parser.set_defaults(func=snake_case__ )
return parser
def __lowerCAmelCase ( snake_case__ ):
__UpperCamelCase : str = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ["test_utils", "scripts", "test_script.py"] )
if args.config_file is None:
__UpperCamelCase : str = script_name
else:
__UpperCamelCase : Tuple = F"--config_file={args.config_file} {script_name}"
__UpperCamelCase : Optional[Any] = ["accelerate-launch"] + test_args.split()
__UpperCamelCase : Optional[Any] = execute_subprocess_async(snake_case__ , env=os.environ.copy() )
if result.returncode == 0:
print("Test is a success! You are ready for your distributed training!" )
def __lowerCAmelCase ( ):
__UpperCamelCase : int = test_command_parser()
__UpperCamelCase : Union[str, Any] = parser.parse_args()
test_command(snake_case__ )
if __name__ == "__main__":
main()
| 298
| 0
|
'''simple docstring'''
from typing import Optional, Tuple, Union
import torch
from einops import rearrange, reduce
from diffusers import DDIMScheduler, DDPMScheduler, DiffusionPipeline, ImagePipelineOutput, UNetaDConditionModel
from diffusers.schedulers.scheduling_ddim import DDIMSchedulerOutput
from diffusers.schedulers.scheduling_ddpm import DDPMSchedulerOutput
_lowercase : List[Any] = 8
def snake_case_ ( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : List[str]=BITS ):
"""simple docstring"""
lowercase_ : Union[str, Any] = x.device
lowercase_ : Union[str, Any] = (x * 255).int().clamp(0 , 255 )
lowercase_ : str = 2 ** torch.arange(bits - 1 , -1 , -1 , device=__SCREAMING_SNAKE_CASE )
lowercase_ : str = rearrange(__SCREAMING_SNAKE_CASE , '''d -> d 1 1''' )
lowercase_ : Optional[int] = rearrange(__SCREAMING_SNAKE_CASE , '''b c h w -> b c 1 h w''' )
lowercase_ : List[str] = ((x & mask) != 0).float()
lowercase_ : str = rearrange(__SCREAMING_SNAKE_CASE , '''b c d h w -> b (c d) h w''' )
lowercase_ : Any = bits * 2 - 1
return bits
def snake_case_ ( __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : str=BITS ):
"""simple docstring"""
lowercase_ : List[str] = x.device
lowercase_ : Tuple = (x > 0).int()
lowercase_ : Optional[int] = 2 ** torch.arange(bits - 1 , -1 , -1 , device=__SCREAMING_SNAKE_CASE , dtype=torch.intaa )
lowercase_ : Any = rearrange(__SCREAMING_SNAKE_CASE , '''d -> d 1 1''' )
lowercase_ : Optional[Any] = rearrange(__SCREAMING_SNAKE_CASE , '''b (c d) h w -> b c d h w''' , d=8 )
lowercase_ : Any = reduce(x * mask , '''b c d h w -> b c h w''' , '''sum''' )
return (dec / 255).clamp(0.0 , 1.0 )
def snake_case_ ( self : List[str] , __SCREAMING_SNAKE_CASE : torch.FloatTensor , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : torch.FloatTensor , __SCREAMING_SNAKE_CASE : float = 0.0 , __SCREAMING_SNAKE_CASE : bool = True , __SCREAMING_SNAKE_CASE : Any=None , __SCREAMING_SNAKE_CASE : bool = True , ):
"""simple docstring"""
if self.num_inference_steps is None:
raise ValueError(
'''Number of inference steps is \'None\', you need to run \'set_timesteps\' after creating the scheduler''' )
# See formulas (12) and (16) of DDIM paper https://arxiv.org/pdf/2010.02502.pdf
# Ideally, read DDIM paper in-detail understanding
# Notation (<variable name> -> <name in paper>
# - pred_noise_t -> e_theta(x_t, t)
# - pred_original_sample -> f_theta(x_t, t) or x_0
# - std_dev_t -> sigma_t
# - eta -> η
# - pred_sample_direction -> "direction pointing to x_t"
# - pred_prev_sample -> "x_t-1"
# 1. get previous step value (=t-1)
lowercase_ : int = timestep - self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
lowercase_ : Optional[int] = self.alphas_cumprod[timestep]
lowercase_ : Optional[int] = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod
lowercase_ : Dict = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
lowercase_ : Optional[Any] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
# 4. Clip "predicted x_0"
lowercase_ : List[Any] = self.bit_scale
if self.config.clip_sample:
lowercase_ : Dict = torch.clamp(__SCREAMING_SNAKE_CASE , -scale , __SCREAMING_SNAKE_CASE )
# 5. compute variance: "sigma_t(η)" -> see formula (16)
# σ_t = sqrt((1 − α_t−1)/(1 − α_t)) * sqrt(1 − α_t/α_t−1)
lowercase_ : Optional[int] = self._get_variance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowercase_ : Tuple = eta * variance ** 0.5
if use_clipped_model_output:
# the model_output is always re-derived from the clipped x_0 in Glide
lowercase_ : Tuple = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
# 6. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
lowercase_ : List[Any] = (1 - alpha_prod_t_prev - std_dev_t**2) ** 0.5 * model_output
# 7. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
lowercase_ : str = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if eta > 0:
# randn_like does not support generator https://github.com/pytorch/pytorch/issues/27072
lowercase_ : Union[str, Any] = model_output.device if torch.is_tensor(__SCREAMING_SNAKE_CASE ) else '''cpu'''
lowercase_ : Dict = torch.randn(model_output.shape , dtype=model_output.dtype , generator=__SCREAMING_SNAKE_CASE ).to(__SCREAMING_SNAKE_CASE )
lowercase_ : int = self._get_variance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ** 0.5 * eta * noise
lowercase_ : Union[str, Any] = prev_sample + variance
if not return_dict:
return (prev_sample,)
return DDIMSchedulerOutput(prev_sample=__SCREAMING_SNAKE_CASE , pred_original_sample=__SCREAMING_SNAKE_CASE )
def snake_case_ ( self : List[Any] , __SCREAMING_SNAKE_CASE : torch.FloatTensor , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : torch.FloatTensor , __SCREAMING_SNAKE_CASE : Optional[Any]="epsilon" , __SCREAMING_SNAKE_CASE : Any=None , __SCREAMING_SNAKE_CASE : bool = True , ):
"""simple docstring"""
lowercase_ : Tuple = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type in ["learned", "learned_range"]:
lowercase_ : str = torch.split(__SCREAMING_SNAKE_CASE , sample.shape[1] , dim=1 )
else:
lowercase_ : Union[str, Any] = None
# 1. compute alphas, betas
lowercase_ : Optional[Any] = self.alphas_cumprod[t]
lowercase_ : str = self.alphas_cumprod[t - 1] if t > 0 else self.one
lowercase_ : Tuple = 1 - alpha_prod_t
lowercase_ : List[str] = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if prediction_type == "epsilon":
lowercase_ : Optional[Any] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif prediction_type == "sample":
lowercase_ : List[str] = model_output
else:
raise ValueError(F'''Unsupported prediction_type {prediction_type}.''' )
# 3. Clip "predicted x_0"
lowercase_ : List[str] = self.bit_scale
if self.config.clip_sample:
lowercase_ : List[str] = torch.clamp(__SCREAMING_SNAKE_CASE , -scale , __SCREAMING_SNAKE_CASE )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
lowercase_ : int = (alpha_prod_t_prev ** 0.5 * self.betas[t]) / beta_prod_t
lowercase_ : Any = self.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
lowercase_ : List[Any] = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
lowercase_ : List[str] = 0
if t > 0:
lowercase_ : Union[str, Any] = torch.randn(
model_output.size() , dtype=model_output.dtype , layout=model_output.layout , generator=__SCREAMING_SNAKE_CASE ).to(model_output.device )
lowercase_ : List[Any] = (self._get_variance(__SCREAMING_SNAKE_CASE , predicted_variance=__SCREAMING_SNAKE_CASE ) ** 0.5) * noise
lowercase_ : Dict = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return DDPMSchedulerOutput(prev_sample=__SCREAMING_SNAKE_CASE , pred_original_sample=__SCREAMING_SNAKE_CASE )
class lowerCAmelCase__ ( lowerCamelCase_ ):
"""simple docstring"""
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 1.0 , ):
"""simple docstring"""
super().__init__()
lowercase_ : Optional[int] = bit_scale
lowercase_ : Optional[int] = (
ddim_bit_scheduler_step if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else ddpm_bit_scheduler_step
)
self.register_modules(unet=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE )
@torch.no_grad()
def __call__( self , __SCREAMING_SNAKE_CASE = 2_56 , __SCREAMING_SNAKE_CASE = 2_56 , __SCREAMING_SNAKE_CASE = 50 , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = 1 , __SCREAMING_SNAKE_CASE = "pil" , __SCREAMING_SNAKE_CASE = True , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
lowercase_ : List[str] = torch.randn(
(batch_size, self.unet.config.in_channels, height, width) , generator=__SCREAMING_SNAKE_CASE , )
lowercase_ : List[str] = decimal_to_bits(__SCREAMING_SNAKE_CASE ) * self.bit_scale
lowercase_ : Dict = latents.to(self.device )
self.scheduler.set_timesteps(__SCREAMING_SNAKE_CASE )
for t in self.progress_bar(self.scheduler.timesteps ):
# predict the noise residual
lowercase_ : Union[str, Any] = self.unet(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).sample
# compute the previous noisy sample x_t -> x_t-1
lowercase_ : int = self.scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).prev_sample
lowercase_ : Optional[int] = bits_to_decimal(__SCREAMING_SNAKE_CASE )
if output_type == "pil":
lowercase_ : List[str] = self.numpy_to_pil(__SCREAMING_SNAKE_CASE )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__SCREAMING_SNAKE_CASE )
| 359
|
'''simple docstring'''
from __future__ import annotations
class lowerCAmelCase__ :
def __init__( self , __SCREAMING_SNAKE_CASE = 0 ):
"""simple docstring"""
lowercase_ : Any = key
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
assert isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowercase_ : List[str] = key or self.__key or 1
# make sure key is an appropriate size
key %= 2_55
return [chr(ord(__SCREAMING_SNAKE_CASE ) ^ key ) for ch in content]
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
assert isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowercase_ : Optional[int] = key or self.__key or 1
# make sure key is an appropriate size
key %= 2_55
return [chr(ord(__SCREAMING_SNAKE_CASE ) ^ key ) for ch in content]
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 0 ):
"""simple docstring"""
assert isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowercase_ : List[Any] = key or self.__key or 1
# make sure key can be any size
while key > 2_55:
key -= 2_55
# This will be returned
lowercase_ : str = ''''''
for ch in content:
ans += chr(ord(__SCREAMING_SNAKE_CASE ) ^ key )
return ans
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 0 ):
"""simple docstring"""
assert isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowercase_ : Any = key or self.__key or 1
# make sure key can be any size
while key > 2_55:
key -= 2_55
# This will be returned
lowercase_ : Dict = ''''''
for ch in content:
ans += chr(ord(__SCREAMING_SNAKE_CASE ) ^ key )
return ans
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 0 ):
"""simple docstring"""
assert isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
try:
with open(__SCREAMING_SNAKE_CASE ) as fin, open('''encrypt.out''' , '''w+''' ) as fout:
# actual encrypt-process
for line in fin:
fout.write(self.encrypt_string(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
except OSError:
return False
return True
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
assert isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
try:
with open(__SCREAMING_SNAKE_CASE ) as fin, open('''decrypt.out''' , '''w+''' ) as fout:
# actual encrypt-process
for line in fin:
fout.write(self.decrypt_string(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
except OSError:
return False
return True
# Tests
# crypt = XORCipher()
# key = 67
# # test encrypt
# print(crypt.encrypt("hallo welt",key))
# # test decrypt
# print(crypt.decrypt(crypt.encrypt("hallo welt",key), key))
# # test encrypt_string
# print(crypt.encrypt_string("hallo welt",key))
# # test decrypt_string
# print(crypt.decrypt_string(crypt.encrypt_string("hallo welt",key),key))
# if (crypt.encrypt_file("test.txt",key)):
# print("encrypt successful")
# else:
# print("encrypt unsuccessful")
# if (crypt.decrypt_file("encrypt.out",key)):
# print("decrypt successful")
# else:
# print("decrypt unsuccessful")
| 264
| 0
|
'''simple docstring'''
import argparse
import collections
import os
import re
import tempfile
import pandas as pd
from datasets import Dataset
from huggingface_hub import hf_hub_download, upload_folder
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/update_metadata.py
_UpperCamelCase : Optional[int] = 'src/transformers'
# This is to make sure the transformers module imported is the one in the repo.
_UpperCamelCase : List[str] = direct_transformers_import(TRANSFORMERS_PATH)
# Regexes that match TF/Flax/PT model names.
_UpperCamelCase : Tuple = re.compile(R'TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
_UpperCamelCase : str = re.compile(R'Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
_UpperCamelCase : Optional[int] = re.compile(R'(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
# Fill this with tuples (pipeline_tag, model_mapping, auto_model)
_UpperCamelCase : List[str] = [
('pretraining', 'MODEL_FOR_PRETRAINING_MAPPING_NAMES', 'AutoModelForPreTraining'),
('feature-extraction', 'MODEL_MAPPING_NAMES', 'AutoModel'),
('audio-classification', 'MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForAudioClassification'),
('text-generation', 'MODEL_FOR_CAUSAL_LM_MAPPING_NAMES', 'AutoModelForCausalLM'),
('automatic-speech-recognition', 'MODEL_FOR_CTC_MAPPING_NAMES', 'AutoModelForCTC'),
('image-classification', 'MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForImageClassification'),
('image-segmentation', 'MODEL_FOR_IMAGE_SEGMENTATION_MAPPING_NAMES', 'AutoModelForImageSegmentation'),
('fill-mask', 'MODEL_FOR_MASKED_LM_MAPPING_NAMES', 'AutoModelForMaskedLM'),
('object-detection', 'MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES', 'AutoModelForObjectDetection'),
(
'zero-shot-object-detection',
'MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING_NAMES',
'AutoModelForZeroShotObjectDetection',
),
('question-answering', 'MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES', 'AutoModelForQuestionAnswering'),
('text2text-generation', 'MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES', 'AutoModelForSeq2SeqLM'),
('text-classification', 'MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForSequenceClassification'),
('automatic-speech-recognition', 'MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES', 'AutoModelForSpeechSeq2Seq'),
(
'table-question-answering',
'MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING_NAMES',
'AutoModelForTableQuestionAnswering',
),
('token-classification', 'MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForTokenClassification'),
('multiple-choice', 'MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES', 'AutoModelForMultipleChoice'),
(
'next-sentence-prediction',
'MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES',
'AutoModelForNextSentencePrediction',
),
(
'audio-frame-classification',
'MODEL_FOR_AUDIO_FRAME_CLASSIFICATION_MAPPING_NAMES',
'AutoModelForAudioFrameClassification',
),
('audio-xvector', 'MODEL_FOR_AUDIO_XVECTOR_MAPPING_NAMES', 'AutoModelForAudioXVector'),
(
'document-question-answering',
'MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES',
'AutoModelForDocumentQuestionAnswering',
),
(
'visual-question-answering',
'MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING_NAMES',
'AutoModelForVisualQuestionAnswering',
),
('image-to-text', 'MODEL_FOR_FOR_VISION_2_SEQ_MAPPING_NAMES', 'AutoModelForVision2Seq'),
(
'zero-shot-image-classification',
'MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING_NAMES',
'AutoModelForZeroShotImageClassification',
),
('depth-estimation', 'MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES', 'AutoModelForDepthEstimation'),
('video-classification', 'MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForVideoClassification'),
('mask-generation', 'MODEL_FOR_MASK_GENERATION_MAPPING_NAMES', 'AutoModelForMaskGeneration'),
]
def __UpperCAmelCase ( A : Optional[int] ) -> int:
UpperCAmelCase_ : Dict = re.finditer('''.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)''' , A )
return [m.group(0 ) for m in matches]
def __UpperCAmelCase ( ) -> str:
UpperCAmelCase_ : Optional[int] = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
UpperCAmelCase_ : Optional[Any] = {
config.replace('''Config''' , '''''' ): model_type for model_type, config in config_maping_names.items()
}
# Dictionaries flagging if each model prefix has a backend in PT/TF/Flax.
UpperCAmelCase_ : Dict = collections.defaultdict(A )
UpperCAmelCase_ : str = collections.defaultdict(A )
UpperCAmelCase_ : int = collections.defaultdict(A )
# Let's lookup through all transformers object (once) and find if models are supported by a given backend.
for attr_name in dir(A ):
UpperCAmelCase_ : int = None
if _re_tf_models.match(A ) is not None:
UpperCAmelCase_ : Optional[Any] = tf_models
UpperCAmelCase_ : Optional[int] = _re_tf_models.match(A ).groups()[0]
elif _re_flax_models.match(A ) is not None:
UpperCAmelCase_ : int = flax_models
UpperCAmelCase_ : Any = _re_flax_models.match(A ).groups()[0]
elif _re_pt_models.match(A ) is not None:
UpperCAmelCase_ : Union[str, Any] = pt_models
UpperCAmelCase_ : List[Any] = _re_pt_models.match(A ).groups()[0]
if lookup_dict is not None:
while len(A ) > 0:
if attr_name in model_prefix_to_model_type:
UpperCAmelCase_ : Optional[int] = True
break
# Try again after removing the last word in the name
UpperCAmelCase_ : List[Any] = ''''''.join(camel_case_split(A )[:-1] )
UpperCAmelCase_ : Tuple = set(list(pt_models.keys() ) + list(tf_models.keys() ) + list(flax_models.keys() ) )
UpperCAmelCase_ : List[Any] = list(A )
all_models.sort()
UpperCAmelCase_ : Dict = {'''model_type''': all_models}
UpperCAmelCase_ : Tuple = [pt_models[t] for t in all_models]
UpperCAmelCase_ : Dict = [tf_models[t] for t in all_models]
UpperCAmelCase_ : Optional[int] = [flax_models[t] for t in all_models]
# Now let's use the auto-mapping names to make sure
UpperCAmelCase_ : int = {}
for t in all_models:
if t in transformers_module.models.auto.processing_auto.PROCESSOR_MAPPING_NAMES:
UpperCAmelCase_ : Any = '''AutoProcessor'''
elif t in transformers_module.models.auto.tokenization_auto.TOKENIZER_MAPPING_NAMES:
UpperCAmelCase_ : Union[str, Any] = '''AutoTokenizer'''
elif t in transformers_module.models.auto.feature_extraction_auto.FEATURE_EXTRACTOR_MAPPING_NAMES:
UpperCAmelCase_ : int = '''AutoFeatureExtractor'''
else:
# Default to AutoTokenizer if a model has nothing, for backward compatibility.
UpperCAmelCase_ : Dict = '''AutoTokenizer'''
UpperCAmelCase_ : str = [processors[t] for t in all_models]
return pd.DataFrame(A )
def __UpperCAmelCase ( A : Optional[int] ) -> str:
UpperCAmelCase_ : int = [
transformers_module.models.auto.modeling_auto,
transformers_module.models.auto.modeling_tf_auto,
transformers_module.models.auto.modeling_flax_auto,
]
for pipeline_tag, model_mapping, auto_class in PIPELINE_TAGS_AND_AUTO_MODELS:
UpperCAmelCase_ : Tuple = [model_mapping, F"TF_{model_mapping}", F"FLAX_{model_mapping}"]
UpperCAmelCase_ : Tuple = [auto_class, F"TF_{auto_class}", F"Flax_{auto_class}"]
# Loop through all three frameworks
for module, cls, mapping in zip(A , A , A ):
# The type of pipeline may not exist in this framework
if not hasattr(A , A ):
continue
# First extract all model_names
UpperCAmelCase_ : List[str] = []
for name in getattr(A , A ).values():
if isinstance(A , A ):
model_names.append(A )
else:
model_names.extend(list(A ) )
# Add pipeline tag and auto model class for those models
table.update({model_name: (pipeline_tag, cls) for model_name in model_names} )
return table
def __UpperCAmelCase ( A : int , A : Any ) -> Tuple:
UpperCAmelCase_ : Tuple = get_frameworks_table()
UpperCAmelCase_ : Any = Dataset.from_pandas(A )
UpperCAmelCase_ : str = hf_hub_download(
'''huggingface/transformers-metadata''' , '''pipeline_tags.json''' , repo_type='''dataset''' , token=A )
UpperCAmelCase_ : Union[str, Any] = Dataset.from_json(A )
UpperCAmelCase_ : Optional[int] = {
tags_dataset[i]['''model_class''']: (tags_dataset[i]['''pipeline_tag'''], tags_dataset[i]['''auto_class'''])
for i in range(len(A ) )
}
UpperCAmelCase_ : str = update_pipeline_and_auto_class_table(A )
# Sort the model classes to avoid some nondeterministic updates to create false update commits.
UpperCAmelCase_ : Union[str, Any] = sorted(table.keys() )
UpperCAmelCase_ : Optional[Any] = pd.DataFrame(
{
'''model_class''': model_classes,
'''pipeline_tag''': [table[m][0] for m in model_classes],
'''auto_class''': [table[m][1] for m in model_classes],
} )
UpperCAmelCase_ : Dict = Dataset.from_pandas(A )
with tempfile.TemporaryDirectory() as tmp_dir:
frameworks_dataset.to_json(os.path.join(A , '''frameworks.json''' ) )
tags_dataset.to_json(os.path.join(A , '''pipeline_tags.json''' ) )
if commit_sha is not None:
UpperCAmelCase_ : List[str] = (
F"Update with commit {commit_sha}\n\nSee: "
F"https://github.com/huggingface/transformers/commit/{commit_sha}"
)
else:
UpperCAmelCase_ : int = '''Update'''
upload_folder(
repo_id='''huggingface/transformers-metadata''' , folder_path=A , repo_type='''dataset''' , token=A , commit_message=A , )
def __UpperCAmelCase ( ) -> int:
UpperCAmelCase_ : str = {tag: cls for tag, _, cls in PIPELINE_TAGS_AND_AUTO_MODELS}
UpperCAmelCase_ : List[str] = transformers_module.pipelines.SUPPORTED_TASKS
UpperCAmelCase_ : List[str] = []
for key in pipeline_tasks:
if key not in in_table:
UpperCAmelCase_ : Optional[Any] = pipeline_tasks[key]['''pt''']
if isinstance(A , (list, tuple) ):
UpperCAmelCase_ : Dict = model[0]
UpperCAmelCase_ : Any = model.__name__
if model not in in_table.values():
missing.append(A )
if len(A ) > 0:
UpperCAmelCase_ : List[Any] = ''', '''.join(A )
raise ValueError(
'''The following pipeline tags are not present in the `PIPELINE_TAGS_AND_AUTO_MODELS` constant inside '''
F"`utils/update_metadata.py`: {msg}. Please add them!" )
if __name__ == "__main__":
_UpperCamelCase : int = argparse.ArgumentParser()
parser.add_argument('--token', type=str, help='The token to use to push to the transformers-metadata dataset.')
parser.add_argument('--commit_sha', type=str, help='The sha of the commit going with this update.')
parser.add_argument('--check-only', action='store_true', help='Activate to just check all pipelines are present.')
_UpperCamelCase : Tuple = parser.parse_args()
if args.check_only:
check_pipeline_tags()
else:
update_metadata(args.token, args.commit_sha)
| 304
|
'''simple docstring'''
from dataclasses import dataclass
from typing import Tuple
import numpy as np
import torch
@dataclass
class snake_case__ :
a_ = 42 # [batch_size x 3]
a_ = 42 # [batch_size x 3]
a_ = 42 # [batch_size x 3]
a_ = 42 # [batch_size x 3]
a_ = 42
a_ = 42
a_ = 42
a_ = 42
a_ = 42
def A ( self : Tuple ) -> Optional[int]:
assert self.x.shape[0] == self.y.shape[0] == self.z.shape[0] == self.origin.shape[0]
assert self.x.shape[1] == self.y.shape[1] == self.z.shape[1] == self.origin.shape[1] == 3
assert len(self.x.shape ) == len(self.y.shape ) == len(self.z.shape ) == len(self.origin.shape ) == 2
def A ( self : List[Any] ) -> Union[str, Any]:
return torch.from_numpy(np.array([self.width, self.height] , dtype=np.floataa ) )
def A ( self : Any ) -> Optional[Any]:
return torch.from_numpy(np.array([self.x_fov, self.y_fov] , dtype=np.floataa ) )
def A ( self : Optional[int] ) -> torch.Tensor:
UpperCAmelCase_ : Dict = torch.arange(self.height * self.width )
UpperCAmelCase_ : int = torch.stack(
[
pixel_indices % self.width,
torch.div(_A , self.width , rounding_mode='''trunc''' ),
] , axis=1 , )
return coords
@property
def A ( self : Optional[Any] ) -> Optional[Any]:
UpperCAmelCase_ , *UpperCAmelCase_ : Union[str, Any] = self.shape
UpperCAmelCase_ : Optional[Any] = int(np.prod(_A ) )
UpperCAmelCase_ : Any = self.get_image_coords()
UpperCAmelCase_ : Any = torch.broadcast_to(coords.unsqueeze(0 ) , [batch_size * inner_batch_size, *coords.shape] )
UpperCAmelCase_ : Union[str, Any] = self.get_camera_rays(_A )
UpperCAmelCase_ : str = rays.view(_A , inner_batch_size * self.height * self.width , 2 , 3 )
return rays
def A ( self : Optional[int] , _A : torch.Tensor ) -> torch.Tensor:
UpperCAmelCase_ , *UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = coords.shape
assert n_coords == 2
assert batch_size == self.origin.shape[0]
UpperCAmelCase_ : Dict = coords.view(_A , -1 , 2 )
UpperCAmelCase_ : Union[str, Any] = self.resolution()
UpperCAmelCase_ : int = self.fov()
UpperCAmelCase_ : Dict = (flat.float() / (res - 1)) * 2 - 1
UpperCAmelCase_ : Optional[int] = fracs * torch.tan(fov / 2 )
UpperCAmelCase_ : Any = fracs.view(_A , -1 , 2 )
UpperCAmelCase_ : List[Any] = (
self.z.view(_A , 1 , 3 )
+ self.x.view(_A , 1 , 3 ) * fracs[:, :, :1]
+ self.y.view(_A , 1 , 3 ) * fracs[:, :, 1:]
)
UpperCAmelCase_ : Optional[Any] = directions / directions.norm(dim=-1 , keepdim=_A )
UpperCAmelCase_ : Union[str, Any] = torch.stack(
[
torch.broadcast_to(self.origin.view(_A , 1 , 3 ) , [batch_size, directions.shape[1], 3] ),
directions,
] , dim=2 , )
return rays.view(_A , *_A , 2 , 3 )
def A ( self : Tuple , _A : int , _A : int ) -> "DifferentiableProjectiveCamera":
assert width * self.height == height * self.width, "The aspect ratio should not change."
return DifferentiableProjectiveCamera(
origin=self.origin , x=self.x , y=self.y , z=self.z , width=_A , height=_A , x_fov=self.x_fov , y_fov=self.y_fov , )
def __UpperCAmelCase ( A : int ) -> DifferentiableProjectiveCamera:
UpperCAmelCase_ : List[str] = []
UpperCAmelCase_ : Optional[int] = []
UpperCAmelCase_ : Optional[Any] = []
UpperCAmelCase_ : str = []
for theta in np.linspace(0 , 2 * np.pi , num=2_0 ):
UpperCAmelCase_ : str = np.array([np.sin(A ), np.cos(A ), -0.5] )
z /= np.sqrt(np.sum(z**2 ) )
UpperCAmelCase_ : Optional[int] = -z * 4
UpperCAmelCase_ : Optional[int] = np.array([np.cos(A ), -np.sin(A ), 0.0] )
UpperCAmelCase_ : List[Any] = np.cross(A , A )
origins.append(A )
xs.append(A )
ys.append(A )
zs.append(A )
return DifferentiableProjectiveCamera(
origin=torch.from_numpy(np.stack(A , axis=0 ) ).float() , x=torch.from_numpy(np.stack(A , axis=0 ) ).float() , y=torch.from_numpy(np.stack(A , axis=0 ) ).float() , z=torch.from_numpy(np.stack(A , axis=0 ) ).float() , width=A , height=A , x_fov=0.7 , y_fov=0.7 , shape=(1, len(A )) , )
| 304
| 1
|
"""simple docstring"""
def A_ ( ) -> Union[str, Any]:
UpperCamelCase : List[str] = []
UpperCamelCase : Any = 1
while len(lowercase__ ) < 1e6:
constant.append(str(lowercase__ ) )
i += 1
UpperCamelCase : List[str] = "".join(lowercase__ )
return (
int(constant[0] )
* int(constant[9] )
* int(constant[99] )
* int(constant[999] )
* int(constant[9999] )
* int(constant[9_9999] )
* int(constant[99_9999] )
)
if __name__ == "__main__":
print(solution())
| 363
|
import time
import warnings
from abc import ABC
from copy import deepcopy
from typing import Optional
import torch
from ..utils import add_start_docstrings, logging
__lowerCamelCase : List[str] = logging.get_logger(__name__)
__lowerCamelCase : Any = r"""
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
scores (`torch.FloatTensor` of shape `(batch_size, config.vocab_size)`):
Prediction scores of a language modeling head. These can be scores for each vocabulary token before SoftMax
or scores for each vocabulary token after SoftMax.
kwargs (`Dict[str, Any]`, *optional*):
Additional stopping criteria specific kwargs.
Return:
`bool`. `False` indicates we should continue, `True` indicates we should stop.
"""
class A__ ( __snake_case ):
@add_start_docstrings(A_ )
def __call__( self , A_ , A_ , **A_ ):
'''simple docstring'''
raise NotImplementedError("StoppingCriteria needs to be subclassed" )
class A__ ( __snake_case ):
def __init__( self , A_ , A_ = None ):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = max_length
UpperCamelCase : Dict = max_position_embeddings
@add_start_docstrings(A_ )
def __call__( self , A_ , A_ , **A_ ):
'''simple docstring'''
UpperCamelCase : int = input_ids.shape[-1]
UpperCamelCase : str = cur_len >= self.max_length
if self.max_position_embeddings is not None and not is_done and cur_len >= self.max_position_embeddings:
logger.warning_once(
"This is a friendly reminder - the current text generation call will exceed the model's predefined "
F"""maximum length ({self.max_position_embeddings}). Depending on the model, you may observe """
"exceptions, performance degradation, or nothing at all." )
return is_done
class A__ ( __snake_case ):
def __init__( self , A_ , A_ ):
'''simple docstring'''
warnings.warn(
"The class `MaxNewTokensCriteria` is deprecated. "
F"""Please use `MaxLengthCriteria(max_length={start_length + max_new_tokens})` """
"with `max_length = start_length + max_new_tokens` instead." , A_ , )
UpperCamelCase : Union[str, Any] = start_length
UpperCamelCase : List[str] = max_new_tokens
UpperCamelCase : Tuple = start_length + max_new_tokens
@add_start_docstrings(A_ )
def __call__( self , A_ , A_ , **A_ ):
'''simple docstring'''
return input_ids.shape[-1] >= self.max_length
class A__ ( __snake_case ):
def __init__( self , A_ , A_ = None ):
'''simple docstring'''
UpperCamelCase : Optional[int] = max_time
UpperCamelCase : Dict = time.time() if initial_timestamp is None else initial_timestamp
@add_start_docstrings(A_ )
def __call__( self , A_ , A_ , **A_ ):
'''simple docstring'''
return time.time() - self.initial_timestamp > self.max_time
class A__ ( __snake_case ):
@add_start_docstrings(A_ )
def __call__( self , A_ , A_ , **A_ ):
'''simple docstring'''
return any(criteria(A_ , A_ ) for criteria in self )
@property
def __UpperCamelCase( self ):
'''simple docstring'''
for stopping_criterium in self:
if isinstance(A_ , A_ ):
return stopping_criterium.max_length
elif isinstance(A_ , A_ ):
return stopping_criterium.max_length
return None
def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> StoppingCriteriaList:
UpperCamelCase : Tuple = stopping_criteria.max_length
UpperCamelCase : Union[str, Any] = deepcopy(_lowerCAmelCase )
if stopping_max_length is not None and stopping_max_length != max_length:
warnings.warn("You set different `max_length` for stopping criteria and `max_length` parameter" , _lowerCAmelCase )
elif stopping_max_length is None:
new_stopping_criteria.append(MaxLengthCriteria(max_length=_lowerCAmelCase ) )
return new_stopping_criteria
| 140
| 0
|
"""simple docstring"""
import warnings
from transformers import AutoTokenizer
from transformers.utils import is_torch_available
from transformers.utils.generic import ExplicitEnum
from ...processing_utils import ProcessorMixin
if is_torch_available():
import torch
class __UpperCamelCase ( a__ ):
lowerCamelCase : List[str] ="""char"""
lowerCamelCase : Optional[Any] ="""bpe"""
lowerCamelCase : int ="""wp"""
a : Optional[int] = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE)
class __UpperCamelCase ( a__ ):
lowerCamelCase : Any =["""image_processor""", """char_tokenizer"""]
lowerCamelCase : int ="""ViTImageProcessor"""
lowerCamelCase : Any ="""MgpstrTokenizer"""
def __init__( self , lowerCAmelCase__=None , lowerCAmelCase__=None , **lowerCAmelCase__ ) -> Optional[Any]:
a : Dict = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , lowerCAmelCase__ , )
a : Optional[int] = kwargs.pop("feature_extractor" )
a : Optional[Any] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
a : Tuple = tokenizer
a : int = AutoTokenizer.from_pretrained("gpt2" )
a : Tuple = AutoTokenizer.from_pretrained("bert-base-uncased" )
super().__init__(lowerCAmelCase__ , lowerCAmelCase__ )
def __call__( self , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , **lowerCAmelCase__ ) -> Optional[int]:
if images is None and text is None:
raise ValueError("You need to specify either an `images` or `text` input to process." )
if images is not None:
a : Dict = self.image_processor(lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , **lowerCAmelCase__ )
if text is not None:
a : Tuple = self.char_tokenizer(lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , **lowerCAmelCase__ )
if text is None:
return inputs
elif images is None:
return encodings
else:
a : Tuple = encodings["input_ids"]
return inputs
def __a ( self , lowerCAmelCase__ ) -> Dict:
a, a, a : str = sequences
a : int = char_preds.size(0 )
a, a : Union[str, Any] = self._decode_helper(lowerCAmelCase__ , "char" )
a, a : Dict = self._decode_helper(lowerCAmelCase__ , "bpe" )
a, a : Any = self._decode_helper(lowerCAmelCase__ , "wp" )
a : Dict = []
a : int = []
for i in range(lowerCAmelCase__ ):
a : Union[str, Any] = [char_scores[i], bpe_scores[i], wp_scores[i]]
a : Optional[int] = [char_strs[i], bpe_strs[i], wp_strs[i]]
a : str = scores.index(max(lowerCAmelCase__ ) )
final_strs.append(strs[max_score_index] )
final_scores.append(scores[max_score_index] )
a : List[str] = {}
a : int = final_strs
a : List[str] = final_scores
a : List[Any] = char_strs
a : Tuple = bpe_strs
a : Any = wp_strs
return out
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> int:
if format == DecodeType.CHARACTER:
a : Optional[int] = self.char_decode
a : Optional[int] = 1
a : Optional[int] = "[s]"
elif format == DecodeType.BPE:
a : Any = self.bpe_decode
a : Union[str, Any] = 2
a : str = "#"
elif format == DecodeType.WORDPIECE:
a : Dict = self.wp_decode
a : str = 102
a : Optional[Any] = "[SEP]"
else:
raise ValueError(f"""Format {format} is not supported.""" )
a, a : List[Any] = [], []
a : Optional[int] = pred_logits.size(0 )
a : List[Any] = pred_logits.size(1 )
a, a : List[Any] = pred_logits.topk(1 , dim=-1 , largest=lowerCAmelCase__ , sorted=lowerCAmelCase__ )
a : List[str] = preds_index.view(-1 , lowerCAmelCase__ )[:, 1:]
a : Dict = decoder(lowerCAmelCase__ )
a, a : Dict = torch.nn.functional.softmax(lowerCAmelCase__ , dim=2 ).max(dim=2 )
a : List[Any] = preds_max_prob[:, 1:]
for index in range(lowerCAmelCase__ ):
a : Optional[int] = preds_str[index].find(lowerCAmelCase__ )
a : List[Any] = preds_str[index][:pred_eos]
a : Optional[int] = preds_index[index].cpu().tolist()
a : Any = pred_index.index(lowerCAmelCase__ ) if eos_token in pred_index else -1
a : Dict = preds_max_prob[index][: pred_eos_index + 1]
a : List[str] = pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0
dec_strs.append(lowerCAmelCase__ )
conf_scores.append(lowerCAmelCase__ )
return dec_strs, conf_scores
def __a ( self , lowerCAmelCase__ ) -> str:
a : List[Any] = [seq.replace(" " , "" ) for seq in self.char_tokenizer.batch_decode(lowerCAmelCase__ )]
return decode_strs
def __a ( self , lowerCAmelCase__ ) -> str:
return self.bpe_tokenizer.batch_decode(lowerCAmelCase__ )
def __a ( self , lowerCAmelCase__ ) -> Dict:
a : Optional[int] = [seq.replace(" " , "" ) for seq in self.wp_tokenizer.batch_decode(lowerCAmelCase__ )]
return decode_strs
| 105
|
'''simple docstring'''
import contextlib
import importlib
import io
import unittest
import transformers
# Try to import everything from transformers to ensure every object can be loaded.
from transformers import * # noqa F406
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, require_tf, require_torch
from transformers.utils import ContextManagers, find_labels, is_flax_available, is_tf_available, is_torch_available
if is_torch_available():
from transformers import BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification
if is_tf_available():
from transformers import TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification
if is_flax_available():
from transformers import FlaxBertForPreTraining, FlaxBertForQuestionAnswering, FlaxBertForSequenceClassification
a : str = DUMMY_UNKNOWN_IDENTIFIER
# An actual model hosted on huggingface.co
a : int = """main"""
# Default branch name
a : Any = """f2c752cfc5c0ab6f4bdec59acea69eefbee381c2"""
# One particular commit (not the top of `main`)
a : str = """aaaaaaa"""
# This commit does not exist, so we should 404.
a : int = """d9e9f15bc825e4b2c9249e9578f884bbcb5e3684"""
# Sha-1 of config.json on the top of `main`, for checking purposes
a : Any = """4b243c475af8d0a7754e87d7d096c92e5199ec2fe168a2ee7998e3b8e9bcb1d3"""
@contextlib.contextmanager
def __lowerCamelCase ( ) -> List[str]:
print("""Welcome!""" )
yield
print("""Bye!""" )
@contextlib.contextmanager
def __lowerCamelCase ( ) -> Optional[int]:
print("""Bonjour!""" )
yield
print("""Au revoir!""" )
class UpperCamelCase_ ( unittest.TestCase ):
def _lowercase( self ) -> List[Any]:
# If the spec is missing, importlib would not be able to import the module dynamically.
assert transformers.__spec__ is not None
assert importlib.util.find_spec("""transformers""" ) is not None
class UpperCamelCase_ ( unittest.TestCase ):
@unittest.mock.patch("""sys.stdout""" , new_callable=io.StringIO )
def _lowercase( self , A ) -> Tuple:
with ContextManagers([] ):
print("""Transformers are awesome!""" )
# The print statement adds a new line at the end of the output
self.assertEqual(mock_stdout.getvalue() , """Transformers are awesome!\n""" )
@unittest.mock.patch("""sys.stdout""" , new_callable=io.StringIO )
def _lowercase( self , A ) -> Dict:
with ContextManagers([context_en()] ):
print("""Transformers are awesome!""" )
# The output should be wrapped with an English welcome and goodbye
self.assertEqual(mock_stdout.getvalue() , """Welcome!\nTransformers are awesome!\nBye!\n""" )
@unittest.mock.patch("""sys.stdout""" , new_callable=io.StringIO )
def _lowercase( self , A ) -> Union[str, Any]:
with ContextManagers([context_fr(), context_en()] ):
print("""Transformers are awesome!""" )
# The output should be wrapped with an English and French welcome and goodbye
self.assertEqual(mock_stdout.getvalue() , """Bonjour!\nWelcome!\nTransformers are awesome!\nBye!\nAu revoir!\n""" )
@require_torch
def _lowercase( self ) -> Optional[int]:
self.assertEqual(find_labels(A ) , ["""labels"""] )
self.assertEqual(find_labels(A ) , ["""labels""", """next_sentence_label"""] )
self.assertEqual(find_labels(A ) , ["""start_positions""", """end_positions"""] )
class UpperCamelCase_ ( __magic_name__ ):
pass
self.assertEqual(find_labels(A ) , ["""labels"""] )
@require_tf
def _lowercase( self ) -> int:
self.assertEqual(find_labels(A ) , ["""labels"""] )
self.assertEqual(find_labels(A ) , ["""labels""", """next_sentence_label"""] )
self.assertEqual(find_labels(A ) , ["""start_positions""", """end_positions"""] )
class UpperCamelCase_ ( __magic_name__ ):
pass
self.assertEqual(find_labels(A ) , ["""labels"""] )
@require_flax
def _lowercase( self ) -> Any:
# Flax models don't have labels
self.assertEqual(find_labels(A ) , [] )
self.assertEqual(find_labels(A ) , [] )
self.assertEqual(find_labels(A ) , [] )
class UpperCamelCase_ ( __magic_name__ ):
pass
self.assertEqual(find_labels(A ) , [] )
| 265
| 0
|
from __future__ import annotations
from collections.abc import Iterable, Iterator
from dataclasses import dataclass
_A = (3, 9, -11, 0, 7, 5, 1, -1)
_A = (4, 6, 2, 0, 8, 10, 3, -2)
@dataclass
class UpperCAmelCase__ :
"""simple docstring"""
UpperCAmelCase__ : int
UpperCAmelCase__ : Node | None
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self , A_ ) -> None:
__UpperCamelCase =None
for i in sorted(A_ , reverse=A_ ):
__UpperCamelCase =Node(A_ , self.head )
def __iter__( self ) -> Iterator[int]:
__UpperCamelCase =self.head
while node:
yield node.data
__UpperCamelCase =node.next_node
def __len__( self ) -> int:
return sum(1 for _ in self )
def __str__( self ) -> str:
return " -> ".join([str(A_ ) for node in self] )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : SortedLinkedList , SCREAMING_SNAKE_CASE__ : SortedLinkedList ):
return SortedLinkedList(list(SCREAMING_SNAKE_CASE__ ) + list(SCREAMING_SNAKE_CASE__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
_A = SortedLinkedList
print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
| 117
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
is_vision_available,
)
_A = {'configuration_vit': ['VIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ViTConfig', 'ViTOnnxConfig']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = ['ViTFeatureExtractor']
_A = ['ViTImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
'VIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'ViTForImageClassification',
'ViTForMaskedImageModeling',
'ViTModel',
'ViTPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
'TFViTForImageClassification',
'TFViTModel',
'TFViTPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
'FlaxViTForImageClassification',
'FlaxViTModel',
'FlaxViTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig, ViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_vit import ViTFeatureExtractor
from .image_processing_vit import ViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit import (
VIT_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTForImageClassification,
ViTForMaskedImageModeling,
ViTModel,
ViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit import TFViTForImageClassification, TFViTModel, TFViTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel, FlaxViTPreTrainedModel
else:
import sys
_A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 117
| 1
|
"""simple docstring"""
import unittest
from transformers import MobileBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertModel,
)
class A_ :
'''simple docstring'''
def __init__( self , lowercase_ , lowercase_=13 , lowercase_=7 , lowercase_=True , lowercase_=True , lowercase_=True , lowercase_=True , lowercase_=99 , lowercase_=64 , lowercase_=32 , lowercase_=5 , lowercase_=4 , lowercase_=37 , lowercase_="gelu" , lowercase_=0.1 , lowercase_=0.1 , lowercase_=512 , lowercase_=16 , lowercase_=2 , lowercase_=0.02 , lowercase_=3 , lowercase_=4 , lowercase_=None , ):
"""simple docstring"""
UpperCAmelCase_ : Tuple = parent
UpperCAmelCase_ : Union[str, Any] = batch_size
UpperCAmelCase_ : List[str] = seq_length
UpperCAmelCase_ : Optional[Any] = is_training
UpperCAmelCase_ : Any = use_input_mask
UpperCAmelCase_ : List[str] = use_token_type_ids
UpperCAmelCase_ : str = use_labels
UpperCAmelCase_ : int = vocab_size
UpperCAmelCase_ : Optional[Any] = hidden_size
UpperCAmelCase_ : Optional[int] = embedding_size
UpperCAmelCase_ : Union[str, Any] = num_hidden_layers
UpperCAmelCase_ : List[Any] = num_attention_heads
UpperCAmelCase_ : str = intermediate_size
UpperCAmelCase_ : List[Any] = hidden_act
UpperCAmelCase_ : List[Any] = hidden_dropout_prob
UpperCAmelCase_ : str = attention_probs_dropout_prob
UpperCAmelCase_ : List[str] = max_position_embeddings
UpperCAmelCase_ : List[str] = type_vocab_size
UpperCAmelCase_ : Optional[int] = type_sequence_label_size
UpperCAmelCase_ : Tuple = initializer_range
UpperCAmelCase_ : Tuple = num_labels
UpperCAmelCase_ : str = num_choices
UpperCAmelCase_ : List[str] = scope
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase_ : List[str] = None
if self.use_input_mask:
UpperCAmelCase_ : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase_ : List[str] = None
if self.use_token_type_ids:
UpperCAmelCase_ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase_ : str = None
UpperCAmelCase_ : Dict = None
UpperCAmelCase_ : Union[str, Any] = None
if self.use_labels:
UpperCAmelCase_ : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase_ : int = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase_ : int = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase__ ( self ):
"""simple docstring"""
return MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowercase_ , initializer_range=self.initializer_range , )
def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = MobileBertModel(config=lowercase_ )
model.to(lowercase_ )
model.eval()
UpperCAmelCase_ : Optional[int] = model(lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ )
UpperCAmelCase_ : Dict = model(lowercase_ , token_type_ids=lowercase_ )
UpperCAmelCase_ : Union[str, Any] = model(lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = MobileBertForMaskedLM(config=lowercase_ )
model.to(lowercase_ )
model.eval()
UpperCAmelCase_ : str = model(lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : Any = MobileBertForNextSentencePrediction(config=lowercase_ )
model.to(lowercase_ )
model.eval()
UpperCAmelCase_ : Optional[Any] = model(
lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , labels=lowercase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = MobileBertForPreTraining(config=lowercase_ )
model.to(lowercase_ )
model.eval()
UpperCAmelCase_ : Tuple = model(
lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , labels=lowercase_ , next_sentence_label=lowercase_ , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = MobileBertForQuestionAnswering(config=lowercase_ )
model.to(lowercase_ )
model.eval()
UpperCAmelCase_ : List[str] = model(
lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , start_positions=lowercase_ , end_positions=lowercase_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : int = self.num_labels
UpperCAmelCase_ : List[str] = MobileBertForSequenceClassification(lowercase_ )
model.to(lowercase_ )
model.eval()
UpperCAmelCase_ : Dict = model(lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : str = self.num_labels
UpperCAmelCase_ : Tuple = MobileBertForTokenClassification(config=lowercase_ )
model.to(lowercase_ )
model.eval()
UpperCAmelCase_ : Union[str, Any] = model(lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : str = self.num_choices
UpperCAmelCase_ : Dict = MobileBertForMultipleChoice(config=lowercase_ )
model.to(lowercase_ )
model.eval()
UpperCAmelCase_ : Dict = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase_ : Dict = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase_ : int = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase_ : str = model(
lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , labels=lowercase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : int = self.prepare_config_and_inputs()
(
(
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) ,
) : List[Any] = config_and_inputs
UpperCAmelCase_ : Dict = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class A_ (lowercase__ ,lowercase__ ,unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = (
(
MobileBertModel,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE__ : Optional[Any] = (
{
"""feature-extraction""": MobileBertModel,
"""fill-mask""": MobileBertForMaskedLM,
"""question-answering""": MobileBertForQuestionAnswering,
"""text-classification""": MobileBertForSequenceClassification,
"""token-classification""": MobileBertForTokenClassification,
"""zero-shot""": MobileBertForSequenceClassification,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE__ : Any = True
def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_=False ):
"""simple docstring"""
UpperCAmelCase_ : str = super()._prepare_for_class(lowercase_ , lowercase_ , return_labels=lowercase_ )
if return_labels:
if model_class in get_values(lowercase_ ):
UpperCAmelCase_ : Optional[Any] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=lowercase_ )
UpperCAmelCase_ : Dict = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowercase_ )
return inputs_dict
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : str = MobileBertModelTester(self )
UpperCAmelCase_ : Tuple = ConfigTester(self , config_class=lowercase_ , hidden_size=37 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*lowercase_ )
def __a ( __lowerCamelCase ):
return torch.tensor(
__lowerCamelCase, dtype=torch.long, device=__lowerCamelCase, )
_a = 1e-3
@require_torch
@require_sentencepiece
@require_tokenizers
class A_ (unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : int = MobileBertModel.from_pretrained("google/mobilebert-uncased" ).to(lowercase_ )
UpperCAmelCase_ : int = _long_tensor([[101, 7110, 1005, 1056, 2023, 1_1333, 1_7413, 1029, 102]] )
with torch.no_grad():
UpperCAmelCase_ : Union[str, Any] = model(lowercase_ )[0]
UpperCAmelCase_ : int = torch.Size((1, 9, 512) )
self.assertEqual(output.shape , lowercase_ )
UpperCAmelCase_ : List[str] = torch.tensor(
[
[
[-2.4_7_3_6_5_2_6E0_7, 8.2_6_9_1_6_5_6E0_4, 1.6_5_2_1_8_3_8E0_5],
[-5.7_5_4_1_7_0_4E-0_1, 3.9_0_5_6_0_2_2E0_0, 4.4_0_1_1_5_0_7E0_0],
[2.6_0_4_7_3_5_9E0_0, 1.5_6_7_7_6_5_2E0_0, -1.7_3_2_4_1_8_8E-0_1],
]
] , device=lowercase_ , )
# MobileBERT results range from 10e0 to 10e8. Even a 0.0000001% difference with a value of 10e8 results in a
# ~1 difference, it's therefore not a good idea to measure using addition.
# Here, we instead divide the expected result with the result in order to obtain ~1. We then check that the
# result is held between bounds: 1 - TOLERANCE < expected_result / result < 1 + TOLERANCE
UpperCAmelCase_ : Tuple = torch.all((expected_slice / output[..., :3, :3]) >= 1 - TOLERANCE )
UpperCAmelCase_ : str = torch.all((expected_slice / output[..., :3, :3]) <= 1 + TOLERANCE )
self.assertTrue(lower_bound and upper_bound )
| 61
|
"""simple docstring"""
from collections import namedtuple
_a = namedtuple('from_to', 'from_ to')
_a = {
'cubicmeter': from_to(1, 1),
'litre': from_to(0.001, 1_000),
'kilolitre': from_to(1, 1),
'gallon': from_to(0.0_0454, 264.172),
'cubicyard': from_to(0.7_6455, 1.3_0795),
'cubicfoot': from_to(0.028, 35.3147),
'cup': from_to(0.0_0023_6588, 4226.75),
}
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
if from_type not in METRIC_CONVERSION:
raise ValueError(
f"""Invalid 'from_type' value: {from_type!r} Supported values are:\n"""
+ ", ".join(__lowerCamelCase ) )
if to_type not in METRIC_CONVERSION:
raise ValueError(
f"""Invalid 'to_type' value: {to_type!r}. Supported values are:\n"""
+ ", ".join(__lowerCamelCase ) )
return value * METRIC_CONVERSION[from_type].from_ * METRIC_CONVERSION[to_type].to
if __name__ == "__main__":
import doctest
doctest.testmod()
| 61
| 1
|
def _a ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ):
return int((input_a, input_a).count(0 ) != 0 )
def _a ( ):
assert nand_gate(0 , 0 ) == 1
assert nand_gate(0 , 1 ) == 1
assert nand_gate(1 , 0 ) == 1
assert nand_gate(1 , 1 ) == 0
if __name__ == "__main__":
print(nand_gate(0, 0))
print(nand_gate(0, 1))
print(nand_gate(1, 0))
print(nand_gate(1, 1))
| 357
|
from __future__ import annotations
def _a ( SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float , ):
if (electron_conc, hole_conc, intrinsic_conc).count(0 ) != 1:
raise ValueError("You cannot supply more or less than 2 values" )
elif electron_conc < 0:
raise ValueError("Electron concentration cannot be negative in a semiconductor" )
elif hole_conc < 0:
raise ValueError("Hole concentration cannot be negative in a semiconductor" )
elif intrinsic_conc < 0:
raise ValueError(
"Intrinsic concentration cannot be negative in a semiconductor" )
elif electron_conc == 0:
return (
"electron_conc",
intrinsic_conc**2 / hole_conc,
)
elif hole_conc == 0:
return (
"hole_conc",
intrinsic_conc**2 / electron_conc,
)
elif intrinsic_conc == 0:
return (
"intrinsic_conc",
(electron_conc * hole_conc) ** 0.5,
)
else:
return (-1, -1)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 102
| 0
|
import logging
import os
import sys
from dataclasses import dataclass, field
from importlib import import_module
from typing import Dict, List, Optional, Tuple
import numpy as np
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch import nn
from utils_ner import Split, TokenClassificationDataset, TokenClassificationTask
import transformers
from transformers import (
AutoConfig,
AutoModelForTokenClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
lowerCamelCase__ = logging.getLogger(__name__)
@dataclass
class SCREAMING_SNAKE_CASE :
__lowerCamelCase : str =field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
__lowerCamelCase : Optional[str] =field(
default=lowerCamelCase__ , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
__lowerCamelCase : Optional[str] =field(
default='NER' , metadata={'help': 'Task type to fine tune in training (e.g. NER, POS, etc)'} )
__lowerCamelCase : Optional[str] =field(
default=lowerCamelCase__ , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
__lowerCamelCase : bool =field(default=lowerCamelCase__ , metadata={'help': 'Set this flag to use fast tokenization.'} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
__lowerCamelCase : Optional[str] =field(
default=lowerCamelCase__ , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
@dataclass
class SCREAMING_SNAKE_CASE :
__lowerCamelCase : str =field(
metadata={'help': 'The input data dir. Should contain the .txt files for a CoNLL-2003-formatted task.'} )
__lowerCamelCase : Optional[str] =field(
default=lowerCamelCase__ , metadata={'help': 'Path to a file containing all labels. If not specified, CoNLL-2003 labels are used.'} , )
__lowerCamelCase : int =field(
default=128 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
__lowerCamelCase : bool =field(
default=lowerCamelCase__ , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
def lowerCAmelCase__ ( ):
"""simple docstring"""
__a = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__a , __a , __a = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__a , __a , __a = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. Use"
""" --overwrite_output_dir to overcome.""" )
__a = import_module("""tasks""" )
try:
__a = getattr(_SCREAMING_SNAKE_CASE , model_args.task_type )
__a = token_classification_task_clazz()
except AttributeError:
raise ValueError(
f"Task {model_args.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. "
f"Available tasks classes are: {TokenClassificationTask.__subclasses__()}" )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"""Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s""" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("""Training/evaluation parameters %s""" , _SCREAMING_SNAKE_CASE )
# Set seed
set_seed(training_args.seed )
# Prepare CONLL-2003 task
__a = token_classification_task.get_labels(data_args.labels )
__a = dict(enumerate(_SCREAMING_SNAKE_CASE ) )
__a = len(_SCREAMING_SNAKE_CASE )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__a = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=_SCREAMING_SNAKE_CASE , idalabel=_SCREAMING_SNAKE_CASE , labelaid={label: i for i, label in enumerate(_SCREAMING_SNAKE_CASE )} , cache_dir=model_args.cache_dir , )
__a = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast , )
__a = AutoModelForTokenClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=_SCREAMING_SNAKE_CASE , cache_dir=model_args.cache_dir , )
# Get datasets
__a = (
TokenClassificationDataset(
token_classification_task=_SCREAMING_SNAKE_CASE , data_dir=data_args.data_dir , tokenizer=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
__a = (
TokenClassificationDataset(
token_classification_task=_SCREAMING_SNAKE_CASE , data_dir=data_args.data_dir , tokenizer=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def align_predictions(_SCREAMING_SNAKE_CASE : np.ndarray , _SCREAMING_SNAKE_CASE : np.ndarray ) -> Tuple[List[int], List[int]]:
__a = np.argmax(_SCREAMING_SNAKE_CASE , axis=2 )
__a , __a = preds.shape
__a = [[] for _ in range(_SCREAMING_SNAKE_CASE )]
__a = [[] for _ in range(_SCREAMING_SNAKE_CASE )]
for i in range(_SCREAMING_SNAKE_CASE ):
for j in range(_SCREAMING_SNAKE_CASE ):
if label_ids[i, j] != nn.CrossEntropyLoss().ignore_index:
out_label_list[i].append(label_map[label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
return preds_list, out_label_list
def compute_metrics(_SCREAMING_SNAKE_CASE : EvalPrediction ) -> Dict:
__a , __a = align_predictions(p.predictions , p.label_ids )
return {
"accuracy_score": accuracy_score(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ),
"precision": precision_score(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ),
"recall": recall_score(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ),
"f1": fa_score(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ),
}
# Data collator
__a = DataCollatorWithPadding(_SCREAMING_SNAKE_CASE , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
__a = Trainer(
model=_SCREAMING_SNAKE_CASE , args=_SCREAMING_SNAKE_CASE , train_dataset=_SCREAMING_SNAKE_CASE , eval_dataset=_SCREAMING_SNAKE_CASE , compute_metrics=_SCREAMING_SNAKE_CASE , data_collator=_SCREAMING_SNAKE_CASE , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_process_zero():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
__a = {}
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
__a = trainer.evaluate()
__a = os.path.join(training_args.output_dir , """eval_results.txt""" )
if trainer.is_world_process_zero():
with open(_SCREAMING_SNAKE_CASE , """w""" ) as writer:
logger.info("""***** Eval results *****""" )
for key, value in result.items():
logger.info(""" %s = %s""" , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
writer.write("""%s = %s\n""" % (key, value) )
results.update(_SCREAMING_SNAKE_CASE )
# Predict
if training_args.do_predict:
__a = TokenClassificationDataset(
token_classification_task=_SCREAMING_SNAKE_CASE , data_dir=data_args.data_dir , tokenizer=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.test , )
__a , __a , __a = trainer.predict(_SCREAMING_SNAKE_CASE )
__a , __a = align_predictions(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__a = os.path.join(training_args.output_dir , """test_results.txt""" )
if trainer.is_world_process_zero():
with open(_SCREAMING_SNAKE_CASE , """w""" ) as writer:
for key, value in metrics.items():
logger.info(""" %s = %s""" , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
writer.write("""%s = %s\n""" % (key, value) )
# Save predictions
__a = os.path.join(training_args.output_dir , """test_predictions.txt""" )
if trainer.is_world_process_zero():
with open(_SCREAMING_SNAKE_CASE , """w""" ) as writer:
with open(os.path.join(data_args.data_dir , """test.txt""" ) , """r""" ) as f:
token_classification_task.write_predictions_to_file(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return results
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : List[Any] ):
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 302
|
import tempfile
import torch
from diffusers import IPNDMScheduler
from .test_schedulers import SchedulerCommonTest
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
__lowerCamelCase : Optional[int] =(IPNDMScheduler,)
__lowerCamelCase : int =(('num_inference_steps', 50),)
def UpperCamelCase_ ( self : str , **__lowercase : Dict ):
'''simple docstring'''
__a = {"""num_train_timesteps""": 1000}
config.update(**__lowercase )
return config
def UpperCamelCase_ ( self : Any , __lowercase : Tuple=0 , **__lowercase : Dict ):
'''simple docstring'''
__a = dict(self.forward_default_kwargs )
__a = kwargs.pop("""num_inference_steps""" , __lowercase )
__a = self.dummy_sample
__a = 0.1 * sample
__a = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
__a = self.get_scheduler_config(**__lowercase )
__a = scheduler_class(**__lowercase )
scheduler.set_timesteps(__lowercase )
# copy over dummy past residuals
__a = dummy_past_residuals[:]
if time_step is None:
__a = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__lowercase )
__a = scheduler_class.from_pretrained(__lowercase )
new_scheduler.set_timesteps(__lowercase )
# copy over dummy past residuals
__a = dummy_past_residuals[:]
__a = scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample
__a = new_scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
__a = scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample
__a = new_scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self : str , __lowercase : int=0 , **__lowercase : Dict ):
'''simple docstring'''
__a = dict(self.forward_default_kwargs )
__a = kwargs.pop("""num_inference_steps""" , __lowercase )
__a = self.dummy_sample
__a = 0.1 * sample
__a = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
__a = self.get_scheduler_config()
__a = scheduler_class(**__lowercase )
scheduler.set_timesteps(__lowercase )
# copy over dummy past residuals (must be after setting timesteps)
__a = dummy_past_residuals[:]
if time_step is None:
__a = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__lowercase )
__a = scheduler_class.from_pretrained(__lowercase )
# copy over dummy past residuals
new_scheduler.set_timesteps(__lowercase )
# copy over dummy past residual (must be after setting timesteps)
__a = dummy_past_residuals[:]
__a = scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample
__a = new_scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
__a = scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample
__a = new_scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def UpperCamelCase_ ( self : List[str] , **__lowercase : Dict ):
'''simple docstring'''
__a = self.scheduler_classes[0]
__a = self.get_scheduler_config(**__lowercase )
__a = scheduler_class(**__lowercase )
__a = 10
__a = self.dummy_model()
__a = self.dummy_sample_deter
scheduler.set_timesteps(__lowercase )
for i, t in enumerate(scheduler.timesteps ):
__a = model(__lowercase , __lowercase )
__a = scheduler.step(__lowercase , __lowercase , __lowercase ).prev_sample
for i, t in enumerate(scheduler.timesteps ):
__a = model(__lowercase , __lowercase )
__a = scheduler.step(__lowercase , __lowercase , __lowercase ).prev_sample
return sample
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
__a = dict(self.forward_default_kwargs )
__a = kwargs.pop("""num_inference_steps""" , __lowercase )
for scheduler_class in self.scheduler_classes:
__a = self.get_scheduler_config()
__a = scheduler_class(**__lowercase )
__a = self.dummy_sample
__a = 0.1 * sample
if num_inference_steps is not None and hasattr(__lowercase , """set_timesteps""" ):
scheduler.set_timesteps(__lowercase )
elif num_inference_steps is not None and not hasattr(__lowercase , """set_timesteps""" ):
__a = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
__a = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
__a = dummy_past_residuals[:]
__a = scheduler.timesteps[5]
__a = scheduler.timesteps[6]
__a = scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample
__a = scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
__a = scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample
__a = scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
for timesteps in [100, 1000]:
self.check_over_configs(num_train_timesteps=__lowercase , time_step=__lowercase )
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100] ):
self.check_over_forward(num_inference_steps=__lowercase , time_step=__lowercase )
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
__a = self.full_loop()
__a = torch.mean(torch.abs(__lowercase ) )
assert abs(result_mean.item() - 2540529 ) < 10
| 302
| 1
|
"""simple docstring"""
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import DeiTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
)
from transformers.models.deit.modeling_tf_deit import TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class A_ :
def __init__( self: int ,__lowerCAmelCase: Optional[Any] ,__lowerCAmelCase: List[str]=13 ,__lowerCAmelCase: Union[str, Any]=30 ,__lowerCAmelCase: Optional[Any]=2 ,__lowerCAmelCase: str=3 ,__lowerCAmelCase: List[Any]=True ,__lowerCAmelCase: Tuple=True ,__lowerCAmelCase: Dict=32 ,__lowerCAmelCase: Optional[Any]=2 ,__lowerCAmelCase: Union[str, Any]=4 ,__lowerCAmelCase: Union[str, Any]=37 ,__lowerCAmelCase: List[Any]="gelu" ,__lowerCAmelCase: Union[str, Any]=0.1 ,__lowerCAmelCase: Dict=0.1 ,__lowerCAmelCase: Tuple=10 ,__lowerCAmelCase: str=0.02 ,__lowerCAmelCase: Any=3 ,__lowerCAmelCase: str=None ,__lowerCAmelCase: Tuple=2 ,):
'''simple docstring'''
_lowerCamelCase : Dict = parent
_lowerCamelCase : int = batch_size
_lowerCamelCase : Union[str, Any] = image_size
_lowerCamelCase : List[str] = patch_size
_lowerCamelCase : str = num_channels
_lowerCamelCase : int = is_training
_lowerCamelCase : List[str] = use_labels
_lowerCamelCase : str = hidden_size
_lowerCamelCase : Tuple = num_hidden_layers
_lowerCamelCase : Optional[int] = num_attention_heads
_lowerCamelCase : Dict = intermediate_size
_lowerCamelCase : Union[str, Any] = hidden_act
_lowerCamelCase : Tuple = hidden_dropout_prob
_lowerCamelCase : Dict = attention_probs_dropout_prob
_lowerCamelCase : str = type_sequence_label_size
_lowerCamelCase : int = initializer_range
_lowerCamelCase : Dict = scope
_lowerCamelCase : Dict = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
_lowerCamelCase : Tuple = (image_size // patch_size) ** 2
_lowerCamelCase : str = num_patches + 2
def _lowercase ( self: List[Any] ):
'''simple docstring'''
_lowerCamelCase : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCamelCase : int = None
if self.use_labels:
_lowerCamelCase : str = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
_lowerCamelCase : str = self.get_config()
return config, pixel_values, labels
def _lowercase ( self: str ):
'''simple docstring'''
return DeiTConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=__lowerCAmelCase ,initializer_range=self.initializer_range ,encoder_stride=self.encoder_stride ,)
def _lowercase ( self: List[str] ,__lowerCAmelCase: int ,__lowerCAmelCase: Optional[Any] ,__lowerCAmelCase: Optional[int] ):
'''simple docstring'''
_lowerCamelCase : int = TFDeiTModel(config=__lowerCAmelCase )
_lowerCamelCase : Tuple = model(__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _lowercase ( self: List[str] ,__lowerCAmelCase: int ,__lowerCAmelCase: Dict ,__lowerCAmelCase: Tuple ):
'''simple docstring'''
_lowerCamelCase : int = TFDeiTForMaskedImageModeling(config=__lowerCAmelCase )
_lowerCamelCase : str = model(__lowerCAmelCase )
self.parent.assertEqual(
result.reconstruction.shape ,(self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
_lowerCamelCase : Any = 1
_lowerCamelCase : List[Any] = TFDeiTForMaskedImageModeling(__lowerCAmelCase )
_lowerCamelCase : Optional[int] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_lowerCamelCase : List[str] = model(__lowerCAmelCase )
self.parent.assertEqual(result.reconstruction.shape ,(self.batch_size, 1, self.image_size, self.image_size) )
def _lowercase ( self: Dict ,__lowerCAmelCase: int ,__lowerCAmelCase: Dict ,__lowerCAmelCase: Optional[Any] ):
'''simple docstring'''
_lowerCamelCase : Optional[Any] = self.type_sequence_label_size
_lowerCamelCase : List[str] = TFDeiTForImageClassification(__lowerCAmelCase )
_lowerCamelCase : Tuple = model(__lowerCAmelCase ,labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_lowerCamelCase : Any = 1
_lowerCamelCase : List[Any] = TFDeiTForImageClassification(__lowerCAmelCase )
_lowerCamelCase : Optional[int] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_lowerCamelCase : int = model(__lowerCAmelCase ,labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
def _lowercase ( self: List[Any] ):
'''simple docstring'''
_lowerCamelCase : List[Any] = self.prepare_config_and_inputs()
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Dict = config_and_inputs
_lowerCamelCase : Union[str, Any] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class A_ ( _a , _a , unittest.TestCase ):
lowerCAmelCase__ = (
(
TFDeiTModel,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
)
if is_tf_available()
else ()
)
lowerCAmelCase__ = (
{
'feature-extraction': TFDeiTModel,
'image-classification': (TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher),
}
if is_tf_available()
else {}
)
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def _lowercase ( self: Dict ):
'''simple docstring'''
_lowerCamelCase : str = TFDeiTModelTester(self )
_lowerCamelCase : Union[str, Any] = ConfigTester(self ,config_class=__lowerCAmelCase ,has_text_modality=__lowerCAmelCase ,hidden_size=37 )
def _lowercase ( self: List[str] ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="DeiT does not use inputs_embeds" )
def _lowercase ( self: Optional[int] ):
'''simple docstring'''
pass
def _lowercase ( self: int ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase : List[Any] = model_class(__lowerCAmelCase )
self.assertIsInstance(model.get_input_embeddings() ,(tf.keras.layers.Layer) )
_lowerCamelCase : str = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__lowerCAmelCase ,tf.keras.layers.Dense ) )
def _lowercase ( self: str ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase : Optional[Any] = model_class(__lowerCAmelCase )
_lowerCamelCase : List[Any] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCamelCase : Optional[Any] = [*signature.parameters.keys()]
_lowerCamelCase : List[str] = ["pixel_values"]
self.assertListEqual(arg_names[:1] ,__lowerCAmelCase )
def _lowercase ( self: str ):
'''simple docstring'''
_lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCAmelCase )
def _lowercase ( self: Any ):
'''simple docstring'''
_lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*__lowerCAmelCase )
def _lowercase ( self: List[str] ):
'''simple docstring'''
_lowerCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowerCAmelCase )
def _lowercase ( self: Optional[Any] ,__lowerCAmelCase: List[Any] ,__lowerCAmelCase: str ,__lowerCAmelCase: str=False ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = super()._prepare_for_class(__lowerCAmelCase ,__lowerCAmelCase ,return_labels=__lowerCAmelCase )
if return_labels:
if "labels" in inputs_dict and "labels" not in inspect.signature(model_class.call ).parameters:
del inputs_dict["labels"]
return inputs_dict
@slow
def _lowercase ( self: Optional[int] ):
'''simple docstring'''
for model_name in TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCamelCase : Dict = TFDeiTModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
def lowerCamelCase_( ) -> Union[str, Any]:
'''simple docstring'''
_lowerCamelCase : int = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
@require_vision
class A_ ( unittest.TestCase ):
@cached_property
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
return (
DeiTImageProcessor.from_pretrained("facebook/deit-base-distilled-patch16-224" )
if is_vision_available()
else None
)
@slow
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
_lowerCamelCase : int = TFDeiTForImageClassificationWithTeacher.from_pretrained("facebook/deit-base-distilled-patch16-224" )
_lowerCamelCase : Any = self.default_image_processor
_lowerCamelCase : str = prepare_img()
_lowerCamelCase : List[Any] = image_processor(images=__lowerCAmelCase ,return_tensors="tf" )
# forward pass
_lowerCamelCase : List[str] = model(**__lowerCAmelCase )
# verify the logits
_lowerCamelCase : Any = tf.TensorShape((1, 1_000) )
self.assertEqual(outputs.logits.shape ,__lowerCAmelCase )
_lowerCamelCase : Any = tf.constant([-1.02_66, 0.19_12, -1.28_61] )
self.assertTrue(np.allclose(outputs.logits[0, :3] ,__lowerCAmelCase ,atol=1e-4 ) )
| 340
|
"""simple docstring"""
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->UnCLIP
class A_ ( _a ):
lowerCAmelCase__ = 42
lowerCAmelCase__ = None
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase=0.9_9_9 , _lowerCamelCase="cosine" , ) -> List[str]:
'''simple docstring'''
if alpha_transform_type == "cosine":
def alpha_bar_fn(_lowerCamelCase ):
return math.cos((t + 0.0_0_8) / 1.0_0_8 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(_lowerCamelCase ):
return math.exp(t * -1_2.0 )
else:
raise ValueError(F"""Unsupported alpha_tranform_type: {alpha_transform_type}""" )
_lowerCamelCase : str = []
for i in range(_lowerCamelCase ):
_lowerCamelCase : Any = i / num_diffusion_timesteps
_lowerCamelCase : Optional[Any] = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(_lowerCamelCase ) / alpha_bar_fn(_lowerCamelCase ) , _lowerCamelCase ) )
return torch.tensor(_lowerCamelCase , dtype=torch.floataa )
class A_ ( _a , _a ):
@register_to_config
def __init__( self: str ,__lowerCAmelCase: int = 1_000 ,__lowerCAmelCase: str = "fixed_small_log" ,__lowerCAmelCase: bool = True ,__lowerCAmelCase: Optional[float] = 1.0 ,__lowerCAmelCase: str = "epsilon" ,__lowerCAmelCase: str = "squaredcos_cap_v2" ,):
'''simple docstring'''
if beta_schedule != "squaredcos_cap_v2":
raise ValueError("UnCLIPScheduler only supports `beta_schedule`: 'squaredcos_cap_v2'" )
_lowerCamelCase : Union[str, Any] = betas_for_alpha_bar(__lowerCAmelCase )
_lowerCamelCase : Optional[int] = 1.0 - self.betas
_lowerCamelCase : Dict = torch.cumprod(self.alphas ,dim=0 )
_lowerCamelCase : int = torch.tensor(1.0 )
# standard deviation of the initial noise distribution
_lowerCamelCase : Tuple = 1.0
# setable values
_lowerCamelCase : List[Any] = None
_lowerCamelCase : Union[str, Any] = torch.from_numpy(np.arange(0 ,__lowerCAmelCase )[::-1].copy() )
_lowerCamelCase : List[str] = variance_type
def _lowercase ( self: Any ,__lowerCAmelCase: torch.FloatTensor ,__lowerCAmelCase: Optional[int] = None ):
'''simple docstring'''
return sample
def _lowercase ( self: Optional[int] ,__lowerCAmelCase: int ,__lowerCAmelCase: Union[str, torch.device] = None ):
'''simple docstring'''
_lowerCamelCase : str = num_inference_steps
_lowerCamelCase : str = (self.config.num_train_timesteps - 1) / (self.num_inference_steps - 1)
_lowerCamelCase : Union[str, Any] = (np.arange(0 ,__lowerCAmelCase ) * step_ratio).round()[::-1].copy().astype(np.intaa )
_lowerCamelCase : int = torch.from_numpy(__lowerCAmelCase ).to(__lowerCAmelCase )
def _lowercase ( self: List[Any] ,__lowerCAmelCase: Tuple ,__lowerCAmelCase: Tuple=None ,__lowerCAmelCase: List[str]=None ,__lowerCAmelCase: str=None ):
'''simple docstring'''
if prev_timestep is None:
_lowerCamelCase : List[str] = t - 1
_lowerCamelCase : Optional[int] = self.alphas_cumprod[t]
_lowerCamelCase : Dict = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
_lowerCamelCase : Dict = 1 - alpha_prod_t
_lowerCamelCase : str = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
_lowerCamelCase : List[Any] = self.betas[t]
else:
_lowerCamelCase : str = 1 - alpha_prod_t / alpha_prod_t_prev
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
_lowerCamelCase : int = beta_prod_t_prev / beta_prod_t * beta
if variance_type is None:
_lowerCamelCase : List[str] = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small_log":
_lowerCamelCase : Dict = torch.log(torch.clamp(__lowerCAmelCase ,min=1e-20 ) )
_lowerCamelCase : str = torch.exp(0.5 * variance )
elif variance_type == "learned_range":
# NOTE difference with DDPM scheduler
_lowerCamelCase : str = variance.log()
_lowerCamelCase : str = beta.log()
_lowerCamelCase : Optional[int] = (predicted_variance + 1) / 2
_lowerCamelCase : Union[str, Any] = frac * max_log + (1 - frac) * min_log
return variance
def _lowercase ( self: str ,__lowerCAmelCase: torch.FloatTensor ,__lowerCAmelCase: int ,__lowerCAmelCase: torch.FloatTensor ,__lowerCAmelCase: Optional[int] = None ,__lowerCAmelCase: Tuple=None ,__lowerCAmelCase: bool = True ,):
'''simple docstring'''
_lowerCamelCase : str = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type == "learned_range":
_lowerCamelCase, _lowerCamelCase : int = torch.split(__lowerCAmelCase ,sample.shape[1] ,dim=1 )
else:
_lowerCamelCase : List[Any] = None
# 1. compute alphas, betas
if prev_timestep is None:
_lowerCamelCase : List[Any] = t - 1
_lowerCamelCase : Dict = self.alphas_cumprod[t]
_lowerCamelCase : int = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
_lowerCamelCase : Dict = 1 - alpha_prod_t
_lowerCamelCase : List[str] = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
_lowerCamelCase : Any = self.betas[t]
_lowerCamelCase : str = self.alphas[t]
else:
_lowerCamelCase : Any = 1 - alpha_prod_t / alpha_prod_t_prev
_lowerCamelCase : Optional[Any] = 1 - beta
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
_lowerCamelCase : List[str] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
_lowerCamelCase : List[Any] = model_output
else:
raise ValueError(
F"""prediction_type given as {self.config.prediction_type} must be one of `epsilon` or `sample`"""
" for the UnCLIPScheduler." )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
_lowerCamelCase : Any = torch.clamp(
__lowerCAmelCase ,-self.config.clip_sample_range ,self.config.clip_sample_range )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
_lowerCamelCase : List[str] = (alpha_prod_t_prev ** 0.5 * beta) / beta_prod_t
_lowerCamelCase : Optional[int] = alpha ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
_lowerCamelCase : str = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
_lowerCamelCase : Union[str, Any] = 0
if t > 0:
_lowerCamelCase : Dict = randn_tensor(
model_output.shape ,dtype=model_output.dtype ,generator=__lowerCAmelCase ,device=model_output.device )
_lowerCamelCase : Any = self._get_variance(
__lowerCAmelCase ,predicted_variance=__lowerCAmelCase ,prev_timestep=__lowerCAmelCase ,)
if self.variance_type == "fixed_small_log":
_lowerCamelCase : Optional[Any] = variance
elif self.variance_type == "learned_range":
_lowerCamelCase : Optional[int] = (0.5 * variance).exp()
else:
raise ValueError(
F"""variance_type given as {self.variance_type} must be one of `fixed_small_log` or `learned_range`"""
" for the UnCLIPScheduler." )
_lowerCamelCase : Dict = variance * variance_noise
_lowerCamelCase : List[Any] = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return UnCLIPSchedulerOutput(prev_sample=__lowerCAmelCase ,pred_original_sample=__lowerCAmelCase )
def _lowercase ( self: str ,__lowerCAmelCase: torch.FloatTensor ,__lowerCAmelCase: torch.FloatTensor ,__lowerCAmelCase: torch.IntTensor ,):
'''simple docstring'''
_lowerCamelCase : int = self.alphas_cumprod.to(device=original_samples.device ,dtype=original_samples.dtype )
_lowerCamelCase : Any = timesteps.to(original_samples.device )
_lowerCamelCase : List[Any] = alphas_cumprod[timesteps] ** 0.5
_lowerCamelCase : List[Any] = sqrt_alpha_prod.flatten()
while len(sqrt_alpha_prod.shape ) < len(original_samples.shape ):
_lowerCamelCase : int = sqrt_alpha_prod.unsqueeze(-1 )
_lowerCamelCase : Union[str, Any] = (1 - alphas_cumprod[timesteps]) ** 0.5
_lowerCamelCase : str = sqrt_one_minus_alpha_prod.flatten()
while len(sqrt_one_minus_alpha_prod.shape ) < len(original_samples.shape ):
_lowerCamelCase : Union[str, Any] = sqrt_one_minus_alpha_prod.unsqueeze(-1 )
_lowerCamelCase : Dict = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
| 340
| 1
|
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFGPTaLMHeadModel, is_keras_nlp_available, is_tf_available
from transformers.models.gpta.tokenization_gpta import GPTaTokenizer
from transformers.testing_utils import require_keras_nlp, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_keras_nlp_available():
from transformers.models.gpta import TFGPTaTokenizer
lowercase = ["gpt2"]
lowercase = "gpt2"
if is_tf_available():
class UpperCamelCase_ ( tf.Module ):
'''simple docstring'''
def __init__( self , a ) -> List[str]:
super().__init__()
snake_case_ = tokenizer
snake_case_ = AutoConfig.from_pretrained(a )
snake_case_ = TFGPTaLMHeadModel.from_config(a )
@tf.function(input_signature=(tf.TensorSpec((None,) , tf.string , name='text' ),) )
def _UpperCamelCase ( self , a ) -> List[str]:
snake_case_ = self.tokenizer(a )
snake_case_ = tokenized['input_ids'].to_tensor()
snake_case_ = tf.cast(input_ids_dense > 0 , tf.intaa )
# input_mask = tf.reshape(input_mask, [-1, MAX_SEQ_LEN])
snake_case_ = self.model(input_ids=a , attention_mask=a )['logits']
return outputs
@require_tf
@require_keras_nlp
class UpperCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def _UpperCamelCase ( self ) -> Any:
super().setUp()
snake_case_ = [GPTaTokenizer.from_pretrained(a ) for checkpoint in (TOKENIZER_CHECKPOINTS)]
snake_case_ = [TFGPTaTokenizer.from_pretrained(a ) for checkpoint in TOKENIZER_CHECKPOINTS]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
snake_case_ = [
'This is a straightforward English test sentence.',
'This one has some weird characters\rto\nsee\r\nif those\u00E9break things.',
'Now we\'re going to add some Chinese: 一 二 三 一二三',
'And some much more rare Chinese: 齉 堃 齉堃',
'Je vais aussi écrire en français pour tester les accents',
'Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ',
]
snake_case_ = list(zip(self.test_sentences , self.test_sentences[::-1] ) )
def _UpperCamelCase ( self ) -> Optional[Any]:
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ):
for test_inputs in self.test_sentences:
snake_case_ = tokenizer([test_inputs] , return_tensors='tf' )
snake_case_ = tf_tokenizer([test_inputs] )
for key in python_outputs.keys():
# convert them to numpy to avoid messing with ragged tensors
snake_case_ = python_outputs[key].numpy()
snake_case_ = tf_outputs[key].numpy()
self.assertTrue(tf.reduce_all(python_outputs_values.shape == tf_outputs_values.shape ) )
self.assertTrue(tf.reduce_all(tf.cast(a , tf.intaa ) == tf_outputs_values ) )
@slow
def _UpperCamelCase ( self ) -> Dict:
for tf_tokenizer in self.tf_tokenizers:
snake_case_ = tf.function(a )
for test_inputs in self.test_sentences:
snake_case_ = tf.constant(a )
snake_case_ = compiled_tokenizer(a )
snake_case_ = tf_tokenizer(a )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def _UpperCamelCase ( self ) -> Dict:
for tf_tokenizer in self.tf_tokenizers:
snake_case_ = ModelToSave(tokenizer=a )
snake_case_ = tf.convert_to_tensor([self.test_sentences[0]] )
snake_case_ = model.serving(a ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
snake_case_ = Path(a ) / 'saved.model'
tf.saved_model.save(a , a , signatures={'serving_default': model.serving} )
snake_case_ = tf.saved_model.load(a )
snake_case_ = loaded_model.signatures['serving_default'](a )['output_0']
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertTrue(tf.reduce_all(out == loaded_output ) )
@slow
def _UpperCamelCase ( self ) -> List[Any]:
for tf_tokenizer in self.tf_tokenizers:
snake_case_ = tf.convert_to_tensor([self.test_sentences[0]] )
snake_case_ = tf_tokenizer(a ) # Build model with some sample inputs
snake_case_ = tf_tokenizer.get_config()
snake_case_ = TFGPTaTokenizer.from_config(a )
snake_case_ = model_from_config(a )
for key in from_config_output.keys():
self.assertTrue(tf.reduce_all(from_config_output[key] == out[key] ) )
@slow
def _UpperCamelCase ( self ) -> Optional[Any]:
for tf_tokenizer in self.tf_tokenizers:
# for the test to run
snake_case_ = 12_31_23
for max_length in [3, 5, 10_24]:
snake_case_ = tf.convert_to_tensor([self.test_sentences[0]] )
snake_case_ = tf_tokenizer(a , max_length=a )
snake_case_ = out['input_ids'].numpy().shape[1]
assert out_length == max_length
| 178
|
import logging
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import arg_to_scheduler
from transformers import TrainingArguments
lowercase = logging.getLogger(__name__)
@dataclass
class UpperCamelCase_ ( snake_case_ ):
'''simple docstring'''
lowerCAmelCase = field(
default=0.0 , metadata={'''help''': '''The label smoothing epsilon to apply (if not zero).'''} )
lowerCAmelCase = field(default=snake_case_ , metadata={'''help''': '''Whether to SortishSamler or not.'''} )
lowerCAmelCase = field(
default=snake_case_ , metadata={'''help''': '''Whether to use generate to calculate generative metrics (ROUGE, BLEU).'''} )
lowerCAmelCase = field(default=snake_case_ , metadata={'''help''': '''whether to use adafactor'''} )
lowerCAmelCase = field(
default=snake_case_ , metadata={'''help''': '''Encoder layer dropout probability. Goes into model.config.'''} )
lowerCAmelCase = field(
default=snake_case_ , metadata={'''help''': '''Decoder layer dropout probability. Goes into model.config.'''} )
lowerCAmelCase = field(default=snake_case_ , metadata={'''help''': '''Dropout probability. Goes into model.config.'''} )
lowerCAmelCase = field(
default=snake_case_ , metadata={'''help''': '''Attention dropout probability. Goes into model.config.'''} )
lowerCAmelCase = field(
default='''linear''' , metadata={'''help''': F'''Which lr scheduler to use. Selected in {sorted(arg_to_scheduler.keys() )}'''} , )
| 178
| 1
|
"""simple docstring"""
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.roberta.modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
)
class UpperCAmelCase_ ( unittest.TestCase ):
def __init__( self : int , snake_case_ : Optional[int] , snake_case_ : int=13 , snake_case_ : Optional[Any]=7 , snake_case_ : int=True , snake_case_ : Optional[int]=True , snake_case_ : Optional[int]=True , snake_case_ : Optional[int]=True , snake_case_ : Optional[int]=99 , snake_case_ : Union[str, Any]=32 , snake_case_ : List[Any]=5 , snake_case_ : Optional[int]=4 , snake_case_ : Optional[int]=37 , snake_case_ : Tuple="gelu" , snake_case_ : Any=0.1 , snake_case_ : Tuple=0.1 , snake_case_ : Any=512 , snake_case_ : Optional[Any]=16 , snake_case_ : Dict=2 , snake_case_ : List[str]=0.02 , snake_case_ : Tuple=4 , ) -> Optional[int]:
'''simple docstring'''
A__ = parent
A__ = batch_size
A__ = seq_length
A__ = is_training
A__ = use_attention_mask
A__ = use_token_type_ids
A__ = use_labels
A__ = vocab_size
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = intermediate_size
A__ = hidden_act
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = max_position_embeddings
A__ = type_vocab_size
A__ = type_sequence_label_size
A__ = initializer_range
A__ = num_choices
def __magic_name__ ( self : Optional[int] ) -> List[str]:
'''simple docstring'''
A__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A__ = None
if self.use_attention_mask:
A__ = random_attention_mask([self.batch_size, self.seq_length] )
A__ = None
if self.use_token_type_ids:
A__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
A__ = RobertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case_ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def __magic_name__ ( self : List[str] ) -> List[str]:
'''simple docstring'''
A__ = self.prepare_config_and_inputs()
A__, A__, A__, A__ = config_and_inputs
A__ = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
def __magic_name__ ( self : Optional[int] ) -> Dict:
'''simple docstring'''
A__ = self.prepare_config_and_inputs()
A__, A__, A__, A__ = config_and_inputs
A__ = True
A__ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
A__ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class UpperCAmelCase_ ( A_, unittest.TestCase ):
lowercase__ = True
lowercase__ = (
(
FlaxRobertaModel,
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
)
if is_flax_available()
else ()
)
def __magic_name__ ( self : str ) -> Union[str, Any]:
'''simple docstring'''
A__ = FlaxRobertaModelTester(self )
@slow
def __magic_name__ ( self : str ) -> Optional[Any]:
'''simple docstring'''
for model_class_name in self.all_model_classes:
A__ = model_class_name.from_pretrained("roberta-base" , from_pt=snake_case_ )
A__ = model(np.ones((1, 1) ) )
self.assertIsNotNone(snake_case_ )
| 354
|
"""simple docstring"""
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import platform
import numpy as np
import psutil
import torch
from accelerate import __version__ as version
from accelerate.commands.config import default_config_file, load_config_from_file
from ..utils import is_npu_available, is_xpu_available
def _SCREAMING_SNAKE_CASE ( lowercase_=None ) -> Any:
if subparsers is not None:
A__ = subparsers.add_parser("env" )
else:
A__ = argparse.ArgumentParser("Accelerate env command" )
parser.add_argument(
"--config_file" , default=lowercase_ , help="The config file to use for the default values in the launching script." )
if subparsers is not None:
parser.set_defaults(func=lowercase_ )
return parser
def _SCREAMING_SNAKE_CASE ( lowercase_ ) -> Dict:
A__ = torch.__version__
A__ = torch.cuda.is_available()
A__ = is_xpu_available()
A__ = is_npu_available()
A__ = "Not found"
# Get the default from the config file.
if args.config_file is not None or os.path.isfile(lowercase_ ):
A__ = load_config_from_file(args.config_file ).to_dict()
A__ = {
"`Accelerate` version": version,
"Platform": platform.platform(),
"Python version": platform.python_version(),
"Numpy version": np.__version__,
"PyTorch version (GPU?)": f"""{pt_version} ({pt_cuda_available})""",
"PyTorch XPU available": str(lowercase_ ),
"PyTorch NPU available": str(lowercase_ ),
"System RAM": f"""{psutil.virtual_memory().total / 10_24 ** 3:.2f} GB""",
}
if pt_cuda_available:
A__ = torch.cuda.get_device_name()
print("\nCopy-and-paste the text below in your GitHub issue\n" )
print("\n".join([f"""- {prop}: {val}""" for prop, val in info.items()] ) )
print("- `Accelerate` default config:" if args.config_file is None else "- `Accelerate` config passed:" )
A__ = (
"\n".join([f"""\t- {prop}: {val}""" for prop, val in accelerate_config.items()] )
if isinstance(lowercase_ , lowercase_ )
else f"""\t{accelerate_config}"""
)
print(lowercase_ )
A__ = accelerate_config
return info
def _SCREAMING_SNAKE_CASE ( ) -> int:
A__ = env_command_parser()
A__ = parser.parse_args()
env_command(lowercase_ )
return 0
if __name__ == "__main__":
raise SystemExit(main())
| 230
| 0
|
"""simple docstring"""
import argparse
import json
import os
import torch
from transformers.file_utils import has_file
from diffusers import UNetaDConditionModel, UNetaDModel
__snake_case = False
__snake_case = True
__snake_case = False
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
parser.add_argument(
"""--repo_path""",
default=None,
type=str,
required=True,
help="""The config json file corresponding to the architecture.""",
)
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""")
__snake_case = parser.parse_args()
__snake_case = {
"""image_size""": """sample_size""",
"""num_res_blocks""": """layers_per_block""",
"""block_channels""": """block_out_channels""",
"""down_blocks""": """down_block_types""",
"""up_blocks""": """up_block_types""",
"""downscale_freq_shift""": """freq_shift""",
"""resnet_num_groups""": """norm_num_groups""",
"""resnet_act_fn""": """act_fn""",
"""resnet_eps""": """norm_eps""",
"""num_head_channels""": """attention_head_dim""",
}
__snake_case = {
"""time_steps""": """time_proj""",
"""mid""": """mid_block""",
"""downsample_blocks""": """down_blocks""",
"""upsample_blocks""": """up_blocks""",
}
__snake_case = """""" if has_file(args.repo_path, """config.json""") else """unet"""
with open(os.path.join(args.repo_path, subfolder, """config.json"""), """r""", encoding="""utf-8""") as reader:
__snake_case = reader.read()
__snake_case = json.loads(text)
if do_only_config:
for key in config_parameters_to_change.keys():
config.pop(key, None)
if has_file(args.repo_path, """config.json"""):
__snake_case = UNetaDModel(**config)
else:
__snake_case = UNetaDConditionModel if """ldm-text2im-large-256""" in args.repo_path else UNetaDModel
__snake_case = class_name(**config)
if do_only_config:
model.save_config(os.path.join(args.repo_path, subfolder))
__snake_case = dict(model.config)
if do_only_renaming:
for key, value in config_parameters_to_change.items():
if key in config:
__snake_case = config[key]
del config[key]
__snake_case = [k.replace("""UNetRes""", """""") for k in config["""down_block_types"""]]
__snake_case = [k.replace("""UNetRes""", """""") for k in config["""up_block_types"""]]
if do_only_weights:
__snake_case = torch.load(os.path.join(args.repo_path, subfolder, """diffusion_pytorch_model.bin"""))
__snake_case = {}
for param_key, param_value in state_dict.items():
if param_key.endswith(""".op.bias""") or param_key.endswith(""".op.weight"""):
continue
__snake_case = False
for key, new_key in key_parameters_to_change.items():
if not has_changed and param_key.split(""".""")[0] == key:
__snake_case = param_value
__snake_case = True
if not has_changed:
__snake_case = param_value
model.load_state_dict(new_state_dict)
model.save_pretrained(os.path.join(args.repo_path, subfolder))
| 203
|
"""simple docstring"""
import unittest
from transformers import JukeboxTokenizer
from transformers.testing_utils import require_torch
class _lowerCAmelCase ( unittest.TestCase ):
__UpperCAmelCase : Union[str, Any] = JukeboxTokenizer
__UpperCAmelCase : Union[str, Any] = {
'''artist''': '''Zac Brown Band''',
'''genres''': '''Country''',
'''lyrics''': '''I met a traveller from an antique land,
Who said "Two vast and trunkless legs of stone
Stand in the desert. . . . Near them, on the sand,
Half sunk a shattered visage lies, whose frown,
And wrinkled lip, and sneer of cold command,
Tell that its sculptor well those passions read
Which yet survive, stamped on these lifeless things,
The hand that mocked them, and the heart that fed;
And on the pedestal, these words appear:
My name is Ozymandias, King of Kings;
Look on my Works, ye Mighty, and despair!
Nothing beside remains. Round the decay
Of that colossal Wreck, boundless and bare
The lone and level sands stretch far away
''',
}
@require_torch
def lowerCamelCase ( self ) -> int:
'''simple docstring'''
import torch
snake_case : Any = JukeboxTokenizer.from_pretrained("openai/jukebox-1b-lyrics" )
snake_case : Optional[Any] = tokenizer(**self.metas )["input_ids"]
# fmt: off
snake_case : Optional[int] = [
torch.tensor([[
0, 0, 0, 7169, 507, 9, 76, 39, 31, 46, 76, 27,
76, 46, 44, 27, 48, 31, 38, 38, 31, 44, 76, 32,
44, 41, 39, 76, 27, 40, 76, 27, 40, 46, 35, 43,
47, 31, 76, 38, 27, 40, 30, 64, 78, 76, 76, 76,
76, 76, 76, 76, 76, 23, 34, 41, 76, 45, 27, 35,
30, 76, 71, 20, 49, 41, 76, 48, 27, 45, 46, 76,
27, 40, 30, 76, 46, 44, 47, 40, 37, 38, 31, 45,
45, 76, 38, 31, 33, 45, 76, 41, 32, 76, 45, 46,
41, 40, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
19, 46, 27, 40, 30, 76, 35, 40, 76, 46, 34, 31,
76, 30, 31, 45, 31, 44, 46, 63, 76, 63, 76, 63,
76, 63, 76, 14, 31, 27, 44, 76, 46, 34, 31, 39,
64, 76, 41, 40, 76, 46, 34, 31, 76, 45, 27, 40,
30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 8,
27, 38, 32, 76, 45, 47, 40, 37, 76, 27, 76, 45,
34, 27, 46, 46, 31, 44, 31, 30, 76, 48, 35, 45,
27, 33, 31, 76, 38, 35, 31, 45, 64, 76, 49, 34,
41, 45, 31, 76, 32, 44, 41, 49, 40, 64, 78, 76,
76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 49,
44, 35, 40, 37, 38, 31, 30, 76, 38, 35, 42, 64,
76, 27, 40, 30, 76, 45, 40, 31, 31, 44, 76, 41,
32, 76, 29, 41, 38, 30, 76, 29, 41, 39, 39, 27,
40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 31, 38, 38, 76, 46, 34, 27, 46, 76, 35, 46,
45, 76, 45, 29, 47, 38, 42, 46, 41, 44, 76, 49,
31, 38, 38, 76, 46, 34, 41, 45, 31, 76, 42, 27,
45, 45, 35, 41, 40, 45, 76, 44, 31, 27, 30, 78,
76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 35, 29,
34, 76, 51, 31, 46, 76, 45, 47, 44, 48, 35, 48,
31, 64, 76, 45, 46, 27, 39, 42, 31, 30, 76, 41,
40, 76, 46, 34, 31, 45, 31, 76, 38, 35, 32, 31,
38, 31, 45, 45, 76, 46, 34, 35, 40, 33, 45, 64,
78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31,
76, 34, 27, 40, 30, 76, 46, 34, 27, 46, 76, 39,
41, 29, 37, 31, 30, 76, 46, 34, 31, 39, 64, 76,
27, 40, 30, 76, 46, 34, 31, 76, 34, 31, 27, 44,
46, 76, 46, 34, 27, 46, 76, 32, 31, 30, 66, 78,
76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76,
41, 40, 76, 46, 34, 31, 76, 42, 31, 30, 31, 45,
46, 27, 38, 64, 76, 46, 34, 31, 45, 31, 76, 49,
41, 44, 30, 45, 76, 27, 42, 42, 31, 27, 44, 65,
78, 76, 76, 76, 76, 76, 76, 76, 76, 13, 51, 76,
40, 27, 39, 31, 76, 35, 45, 76, 15, 52, 51, 39,
27, 40, 30, 35, 27, 45, 64, 76, 11, 35, 40, 33,
76, 41, 32, 76, 11, 35, 40, 33, 45, 66, 78, 76,
76, 76, 76, 76, 76, 76, 76, 12, 41, 41, 37, 76,
41, 40, 76, 39, 51, 76, 23, 41, 44, 37, 45, 64,
76, 51, 31, 76, 13, 35, 33, 34, 46, 51, 64, 76,
27, 40, 30, 76, 30, 31, 45, 42, 27, 35, 44, 67,
78, 76, 76, 76, 76, 76, 76, 76, 76, 14, 41, 46,
34, 35, 40, 33, 76, 28, 31, 45, 35, 30, 31, 76,
44, 31, 39, 27, 35, 40, 45, 63, 76, 18, 41, 47,
40, 30, 76, 46, 34, 31, 76, 30, 31, 29, 27, 51,
78, 76, 76, 76, 76, 76, 76, 76, 76, 15, 32, 76,
46, 34, 27, 46, 76, 29, 41, 38, 41, 45, 45, 27,
38, 76, 23, 44, 31, 29, 37, 64, 76, 28, 41, 47,
40, 30, 38, 31, 45, 45, 76, 27, 40, 30, 76, 28,
27, 44, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 34, 31, 76, 38, 41, 40, 31, 76, 27, 40, 30,
76, 38, 31, 48, 31, 38, 76, 45, 27, 40, 30, 45,
76, 45, 46, 44, 31, 46, 29, 34, 76, 32, 27, 44,
76, 27, 49, 27, 51, 78, 76, 76, 76, 76, 76, 76,
76, 76]] ),
torch.tensor([[0, 0, 0, 1069, 11]] ),
torch.tensor([[0, 0, 0, 1069, 11]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
@require_torch
def lowerCamelCase ( self ) -> Any:
'''simple docstring'''
import torch
snake_case : Tuple = JukeboxTokenizer.from_pretrained("openai/jukebox-5b-lyrics" )
snake_case : Optional[Any] = tokenizer(**self.metas )["input_ids"]
# fmt: off
snake_case : List[Any] = [
torch.tensor([[
0, 0, 0, 1069, 11, -1, -1, -1, -1, 9, 77, 39,
31, 46, 77, 27, 77, 46, 44, 27, 48, 31, 38, 38,
31, 44, 77, 32, 44, 41, 39, 77, 27, 40, 77, 27,
40, 46, 35, 43, 47, 31, 77, 38, 27, 40, 30, 64,
79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 41,
77, 45, 27, 35, 30, 77, 72, 20, 49, 41, 77, 48,
27, 45, 46, 77, 27, 40, 30, 77, 46, 44, 47, 40,
37, 38, 31, 45, 45, 77, 38, 31, 33, 45, 77, 41,
32, 77, 45, 46, 41, 40, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 19, 46, 27, 40, 30, 77, 35, 40,
77, 46, 34, 31, 77, 30, 31, 45, 31, 44, 46, 63,
77, 63, 77, 63, 77, 63, 77, 14, 31, 27, 44, 77,
46, 34, 31, 39, 64, 77, 41, 40, 77, 46, 34, 31,
77, 45, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77,
77, 77, 77, 8, 27, 38, 32, 77, 45, 47, 40, 37,
77, 27, 77, 45, 34, 27, 46, 46, 31, 44, 31, 30,
77, 48, 35, 45, 27, 33, 31, 77, 38, 35, 31, 45,
64, 77, 49, 34, 41, 45, 31, 77, 32, 44, 41, 49,
40, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1,
40, 30, 77, 49, 44, 35, 40, 37, 38, 31, 30, 77,
38, 35, 42, 64, 77, 27, 40, 30, 77, 45, 40, 31,
31, 44, 77, 41, 32, 77, 29, 41, 38, 30, 77, 29,
41, 39, 39, 27, 40, 30, 64, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 31, 38, 38, 77, 46, 34, 27,
46, 77, 35, 46, 45, 77, 45, 29, 47, 38, 42, 46,
41, 44, 77, 49, 31, 38, 38, 77, 46, 34, 41, 45,
31, 77, 42, 27, 45, 45, 35, 41, 40, 45, 77, 44,
31, 27, 30, 79, 77, 77, 77, 77, 77, 77, 77, 77,
23, 34, 35, 29, 34, 77, 51, 31, 46, 77, 45, 47,
44, 48, 35, 48, 31, 64, 77, 45, 46, 27, 39, 42,
31, 30, 77, 41, 40, 77, 46, 34, 31, 45, 31, 77,
38, 35, 32, 31, 38, 31, 45, 45, 77, 46, 34, 35,
40, 33, 45, 64, 79, 77, 77, 77, 77, 77, 77, 77,
77, 20, 34, 31, 77, 34, 27, 40, 30, 77, 46, 34,
27, 46, 77, 39, 41, 29, 37, 31, 30, 77, 46, 34,
31, 39, 64, 77, 27, 40, 30, 77, 46, 34, 31, 77,
34, 31, 27, 44, 46, 77, 46, 34, 27, 46, 77, 32,
31, 30, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77,
1, 40, 30, 77, 41, 40, 77, 46, 34, 31, 77, 42,
31, 30, 31, 45, 46, 27, 38, 64, 77, 46, 34, 31,
45, 31, 77, 49, 41, 44, 30, 45, 77, 27, 42, 42,
31, 27, 44, 65, 79, 77, 77, 77, 77, 77, 77, 77,
77, 13, 51, 77, 40, 27, 39, 31, 77, 35, 45, 77,
15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 77,
11, 35, 40, 33, 77, 41, 32, 77, 11, 35, 40, 33,
45, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 12,
41, 41, 37, 77, 41, 40, 77, 39, 51, 77, 23, 41,
44, 37, 45, 64, 77, 51, 31, 77, 13, 35, 33, 34,
46, 51, 64, 77, 27, 40, 30, 77, 30, 31, 45, 42,
27, 35, 44, 67, 79, 77, 77, 77, 77, 77, 77, 77,
77, 14, 41, 46, 34, 35, 40, 33, 77, 28, 31, 45,
35, 30, 31, 77, 44, 31, 39, 27, 35, 40, 45, 63,
77, 18, 41, 47, 40, 30, 77, 46, 34, 31, 77, 30,
31, 29, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77,
77, 15, 32, 77, 46, 34, 27, 46, 77, 29, 41, 38,
41, 45, 45, 27, 38, 77, 23, 44, 31, 29, 37, 64,
77, 28, 41, 47, 40, 30, 38, 31, 45, 45, 77, 27,
40, 30, 77, 28, 27, 44, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 34, 31, 77, 38, 41, 40, 31,
77, 27, 40, 30, 77, 38, 31, 48, 31, 38, 77, 45,
27, 40, 30, 45, 77, 45, 46, 44, 31, 46, 29, 34,
77, 32, 27, 44, 77, 27, 49, 27, 51, 79, 77, 77,
77, 77, 77, 77, 77, 77]] ),
torch.tensor([[0, 0, 0, 1069, 11, -1, -1, -1, -1]] ),
torch.tensor([[0, 0, 0, 1069, 11, -1, -1, -1, -1]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
| 203
| 1
|
from collections import defaultdict
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> int:
'''simple docstring'''
lowerCAmelCase : List[str] = 1
lowerCAmelCase : str = True
for v in tree[start]:
if v not in visited:
ret += dfs(_UpperCAmelCase )
if ret % 2 == 0:
cuts.append(_UpperCAmelCase )
return ret
def SCREAMING_SNAKE_CASE__ ( ) -> str:
'''simple docstring'''
dfs(1 )
if __name__ == "__main__":
__A : Union[str, Any] = 10, 9
__A : Tuple = defaultdict(list)
__A : dict[int, bool] = {}
__A : list[int] = []
__A : Any = 0
__A : Tuple = [(2, 1), (3, 1), (4, 3), (5, 2), (6, 1), (7, 2), (8, 6), (9, 8), (10, 8)]
for u, v in edges:
tree[u].append(v)
tree[v].append(u)
even_tree()
print(len(cuts) - 1)
| 350
|
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
from .timesteps import (
fastaa_timesteps,
smartaa_timesteps,
smartaa_timesteps,
smartaaa_timesteps,
smartaaa_timesteps,
superaa_timesteps,
superaa_timesteps,
superaaa_timesteps,
)
@dataclass
class __A ( lowerCAmelCase ):
lowerCAmelCase_ : Union[List[PIL.Image.Image], np.ndarray]
lowerCAmelCase_ : Optional[List[bool]]
lowerCAmelCase_ : Optional[List[bool]]
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_if import IFPipeline
from .pipeline_if_imgaimg import IFImgaImgPipeline
from .pipeline_if_imgaimg_superresolution import IFImgaImgSuperResolutionPipeline
from .pipeline_if_inpainting import IFInpaintingPipeline
from .pipeline_if_inpainting_superresolution import IFInpaintingSuperResolutionPipeline
from .pipeline_if_superresolution import IFSuperResolutionPipeline
from .safety_checker import IFSafetyChecker
from .watermark import IFWatermarker
| 323
| 0
|
"""simple docstring"""
import unittest
from transformers import CamembertTokenizer, CamembertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
a : List[str] = get_tests_dir('''fixtures/test_sentencepiece.model''')
a : Dict = get_tests_dir('''fixtures/test_sentencepiece_bpe.model''')
a : List[Any] = '''pt''' if is_torch_available() else '''tf'''
@require_sentencepiece
@require_tokenizers
class __UpperCamelCase ( a__ , unittest.TestCase ):
lowerCamelCase : Tuple =CamembertTokenizer
lowerCamelCase : Tuple =CamembertTokenizerFast
lowerCamelCase : Any =True
lowerCamelCase : Tuple =True
def __a ( self ) -> Dict:
super().setUp()
# We have a SentencePiece fixture for testing
a : Union[str, Any] = CamembertTokenizer(lowerCAmelCase__ )
tokenizer.save_pretrained(self.tmpdirname )
def __a ( self ) -> Dict:
a : Dict = "<pad>"
a : Optional[int] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase__ ) , lowerCAmelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase__ ) , lowerCAmelCase__ )
def __a ( self ) -> Tuple:
a : List[str] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<s>NOTUSED" )
self.assertEqual(vocab_keys[1] , "<pad>" )
self.assertEqual(vocab_keys[-1] , "<mask>" )
self.assertEqual(len(lowerCAmelCase__ ) , 1004 )
def __a ( self ) -> str:
self.assertEqual(self.get_tokenizer().vocab_size , 1005 )
def __a ( self ) -> Any:
a : int = CamembertTokenizer(lowerCAmelCase__ )
tokenizer.save_pretrained(self.tmpdirname )
a : Dict = CamembertTokenizerFast.from_pretrained(self.tmpdirname )
a : Tuple = "I was born in 92000, and this is falsé."
a : Tuple = tokenizer.encode(lowerCAmelCase__ )
a : Any = rust_tokenizer.encode(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
a : str = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
a : int = rust_tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
# <unk> tokens are not the same for `rust` than for `slow`.
# Because spm gives back raw token instead of `unk` in EncodeAsPieces
# tokens = tokenizer.tokenize(sequence)
a : Dict = tokenizer.convert_ids_to_tokens(lowerCAmelCase__ )
a : List[Any] = rust_tokenizer.tokenize(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
def __a ( self ) -> Optional[Any]:
if not self.test_rust_tokenizer:
return
a : Dict = self.get_tokenizer()
a : str = self.get_rust_tokenizer()
a : Dict = "I was born in 92000, and this is falsé."
a : List[str] = tokenizer.tokenize(lowerCAmelCase__ )
a : Optional[Any] = rust_tokenizer.tokenize(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
a : Dict = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
a : Union[str, Any] = rust_tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
a : Tuple = self.get_rust_tokenizer()
a : Any = tokenizer.encode(lowerCAmelCase__ )
a : str = rust_tokenizer.encode(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
@slow
def __a ( self ) -> int:
# fmt: off
a : List[str] = {"input_ids": [[5, 54, 7196, 297, 30, 23, 776, 18, 11, 3215, 3705, 8252, 22, 3164, 1181, 2116, 29, 16, 813, 25, 791, 3314, 20, 3446, 38, 2_7575, 120, 6, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [5, 468, 17, 11, 9088, 20, 1517, 8, 2_2804, 1_8818, 10, 38, 629, 607, 607, 142, 19, 7196, 867, 56, 1_0326, 24, 2267, 20, 416, 5072, 1_5612, 233, 734, 7, 2399, 27, 16, 3015, 1649, 7, 24, 20, 4338, 2399, 27, 13, 3400, 14, 13, 6189, 8, 930, 9, 6]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# camembert is a french model. So we also use french texts.
a : Optional[Any] = [
"Le transformeur est un modèle d'apprentissage profond introduit en 2017, "
"utilisé principalement dans le domaine du traitement automatique des langues (TAL).",
"À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus "
"pour gérer des données séquentielles, telles que le langage naturel, pour des tâches "
"telles que la traduction et la synthèse de texte.",
]
self.tokenizer_integration_test_util(
expected_encoding=lowerCAmelCase__ , model_name="camembert-base" , revision="3a0641d9a1aeb7e848a74299e7e4c4bca216b4cf" , sequences=lowerCAmelCase__ , )
| 105
|
"""simple docstring"""
a : Any = {
'''a''': '''AAAAA''',
'''b''': '''AAAAB''',
'''c''': '''AAABA''',
'''d''': '''AAABB''',
'''e''': '''AABAA''',
'''f''': '''AABAB''',
'''g''': '''AABBA''',
'''h''': '''AABBB''',
'''i''': '''ABAAA''',
'''j''': '''BBBAA''',
'''k''': '''ABAAB''',
'''l''': '''ABABA''',
'''m''': '''ABABB''',
'''n''': '''ABBAA''',
'''o''': '''ABBAB''',
'''p''': '''ABBBA''',
'''q''': '''ABBBB''',
'''r''': '''BAAAA''',
'''s''': '''BAAAB''',
'''t''': '''BAABA''',
'''u''': '''BAABB''',
'''v''': '''BBBAB''',
'''w''': '''BABAA''',
'''x''': '''BABAB''',
'''y''': '''BABBA''',
'''z''': '''BABBB''',
''' ''': ''' ''',
}
a : List[Any] = {value: key for key, value in encode_dict.items()}
def _SCREAMING_SNAKE_CASE ( _lowercase : str ) ->str:
'''simple docstring'''
a : int = ""
for letter in word.lower():
if letter.isalpha() or letter == " ":
encoded += encode_dict[letter]
else:
raise Exception("encode() accepts only letters of the alphabet and spaces" )
return encoded
def _SCREAMING_SNAKE_CASE ( _lowercase : str ) ->str:
'''simple docstring'''
if set(_lowercase ) - {"A", "B", " "} != set():
raise Exception("decode() accepts only 'A', 'B' and spaces" )
a : Optional[Any] = ""
for word in coded.split():
while len(_lowercase ) != 0:
decoded += decode_dict[word[:5]]
a : List[Any] = word[5:]
decoded += " "
return decoded.strip()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 105
| 1
|
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class _lowerCamelCase ( metaclass=_lowercase ):
UpperCAmelCase_ = ["speech"]
def __init__(self , *__a , **__a ) -> int:
requires_backends(self , ["speech"] )
class _lowerCamelCase ( metaclass=_lowercase ):
UpperCAmelCase_ = ["speech"]
def __init__(self , *__a , **__a ) -> List[Any]:
requires_backends(self , ["speech"] )
| 244
|
"""simple docstring"""
import doctest
from collections import deque
import numpy as np
class _lowerCamelCase :
def __init__(self ) -> None:
UpperCamelCase = [2, 1, 2, -1]
UpperCamelCase = [1, 2, 3, 4]
def snake_case_ (self ) -> list[float]:
UpperCamelCase = len(self.first_signal )
UpperCamelCase = len(self.second_signal )
UpperCamelCase = max(__a , __a )
# create a zero matrix of max_length x max_length
UpperCamelCase = [[0] * max_length for i in range(__a )]
# fills the smaller signal with zeros to make both signals of same length
if length_first_signal < length_second_signal:
self.first_signal += [0] * (max_length - length_first_signal)
elif length_first_signal > length_second_signal:
self.second_signal += [0] * (max_length - length_second_signal)
for i in range(__a ):
UpperCamelCase = deque(self.second_signal )
rotated_signal.rotate(__a )
for j, item in enumerate(__a ):
matrix[i][j] += item
# multiply the matrix with the first signal
UpperCamelCase = np.matmul(np.transpose(__a ) , np.transpose(self.first_signal ) )
# rounding-off to two decimal places
return [round(__a , 2 ) for i in final_signal]
if __name__ == "__main__":
doctest.testmod()
| 244
| 1
|
'''simple docstring'''
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _snake_case ( a__ ):
lowerCAmelCase :Optional[int] = ['''image_processor''', '''tokenizer''']
lowerCAmelCase :Dict = '''Pix2StructImageProcessor'''
lowerCAmelCase :Optional[int] = ('''T5Tokenizer''', '''T5TokenizerFast''')
def __init__( self , _lowerCamelCase , _lowerCamelCase):
UpperCAmelCase__ : Any = False
super().__init__(_lowerCamelCase , _lowerCamelCase)
def __call__( self , _lowerCamelCase=None , _lowerCamelCase = None , _lowerCamelCase = True , _lowerCamelCase = False , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = 2048 , _lowerCamelCase = 0 , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = False , _lowerCamelCase = False , _lowerCamelCase = False , _lowerCamelCase = False , _lowerCamelCase = False , _lowerCamelCase = True , _lowerCamelCase = None , **_lowerCamelCase , ):
if images is None and text is None:
raise ValueError("""You have to specify either images or text.""")
# Get only text
if images is None and not self.image_processor.is_vqa:
UpperCAmelCase__ : List[Any] = self.tokenizer
UpperCAmelCase__ : int = self.tokenizer(
text=_lowerCamelCase , add_special_tokens=_lowerCamelCase , padding=_lowerCamelCase , truncation=_lowerCamelCase , max_length=_lowerCamelCase , stride=_lowerCamelCase , pad_to_multiple_of=_lowerCamelCase , return_attention_mask=_lowerCamelCase , return_overflowing_tokens=_lowerCamelCase , return_special_tokens_mask=_lowerCamelCase , return_offsets_mapping=_lowerCamelCase , return_token_type_ids=_lowerCamelCase , return_length=_lowerCamelCase , verbose=_lowerCamelCase , return_tensors=_lowerCamelCase , **_lowerCamelCase , )
return text_encoding
if not self.image_processor.is_vqa:
# add pixel_values
UpperCAmelCase__ : List[str] = self.image_processor(
_lowerCamelCase , return_tensors=_lowerCamelCase , max_patches=_lowerCamelCase , **_lowerCamelCase)
else:
# add pixel_values and bbox
UpperCAmelCase__ : str = self.image_processor(
_lowerCamelCase , return_tensors=_lowerCamelCase , max_patches=_lowerCamelCase , header_text=_lowerCamelCase , **_lowerCamelCase)
if text is not None and not self.image_processor.is_vqa:
UpperCAmelCase__ : Union[str, Any] = self.tokenizer(
text=_lowerCamelCase , add_special_tokens=_lowerCamelCase , padding=_lowerCamelCase , truncation=_lowerCamelCase , max_length=_lowerCamelCase , stride=_lowerCamelCase , pad_to_multiple_of=_lowerCamelCase , return_attention_mask=_lowerCamelCase , return_overflowing_tokens=_lowerCamelCase , return_special_tokens_mask=_lowerCamelCase , return_offsets_mapping=_lowerCamelCase , return_token_type_ids=_lowerCamelCase , return_length=_lowerCamelCase , verbose=_lowerCamelCase , return_tensors=_lowerCamelCase , **_lowerCamelCase , )
if "attention_mask" in text_encoding:
UpperCAmelCase__ : Any = text_encoding.pop("""attention_mask""")
if "input_ids" in text_encoding:
UpperCAmelCase__ : Any = text_encoding.pop("""input_ids""")
else:
UpperCAmelCase__ : int = None
if text_encoding is not None:
encoding_image_processor.update(_lowerCamelCase)
return encoding_image_processor
def snake_case__ ( self , *_lowerCamelCase , **_lowerCamelCase):
return self.tokenizer.batch_decode(*_lowerCamelCase , **_lowerCamelCase)
def snake_case__ ( self , *_lowerCamelCase , **_lowerCamelCase):
return self.tokenizer.decode(*_lowerCamelCase , **_lowerCamelCase)
@property
def snake_case__ ( self):
UpperCAmelCase__ : Dict = self.tokenizer.model_input_names
UpperCAmelCase__ : List[Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
| 163
|
'''simple docstring'''
import unittest
from transformers.testing_utils import require_bsa
from transformers.utils import is_bsa_available
from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin
if is_bsa_available():
from transformers import MarkupLMFeatureExtractor
class _snake_case ( unittest.TestCase ):
def __init__( self , _lowerCamelCase):
UpperCAmelCase__ : Any = parent
def snake_case__ ( self):
return {}
def _UpperCamelCase ( ):
UpperCAmelCase__ : List[str] = """<HTML>
<HEAD>
<TITLE>sample document</TITLE>
</HEAD>
<BODY BGCOLOR=\"FFFFFF\">
<HR>
<a href=\"http://google.com\">Goog</a>
<H1>This is one header</H1>
<H2>This is a another Header</H2>
<P>Travel from
<P>
<B>SFO to JFK</B>
<BR>
<B><I>on May 2, 2015 at 2:00 pm. For details go to confirm.com </I></B>
<HR>
<div style=\"color:#0000FF\">
<h3>Traveler <b> name </b> is
<p> John Doe </p>
</div>"""
UpperCAmelCase__ : Tuple = """
<!DOCTYPE html>
<html>
<body>
<h1>My First Heading</h1>
<p>My first paragraph.</p>
</body>
</html>
"""
return [html_string_a, html_string_a]
@require_bsa
class _snake_case ( a__ , unittest.TestCase ):
lowerCAmelCase :Optional[int] = MarkupLMFeatureExtractor if is_bsa_available() else None
def snake_case__ ( self):
UpperCAmelCase__ : Union[str, Any] = MarkupLMFeatureExtractionTester(self)
@property
def snake_case__ ( self):
return self.feature_extract_tester.prepare_feat_extract_dict()
def snake_case__ ( self):
# Initialize feature_extractor
UpperCAmelCase__ : List[Any] = self.feature_extraction_class()
# Test not batched input
UpperCAmelCase__ : Optional[Any] = get_html_strings()[0]
UpperCAmelCase__ : Any = feature_extractor(_lowerCamelCase)
# fmt: off
UpperCAmelCase__ : Dict = [["""sample document""", """Goog""", """This is one header""", """This is a another Header""", """Travel from""", """SFO to JFK""", """on May 2, 2015 at 2:00 pm. For details go to confirm.com""", """Traveler""", """name""", """is""", """John Doe"""]]
UpperCAmelCase__ : List[str] = [["""/html/head/title""", """/html/body/a""", """/html/body/h1""", """/html/body/h2""", """/html/body/p""", """/html/body/p/p/b[1]""", """/html/body/p/p/b[2]/i""", """/html/body/p/p/div/h3""", """/html/body/p/p/div/h3/b""", """/html/body/p/p/div/h3""", """/html/body/p/p/div/h3/p"""]]
# fmt: on
self.assertEqual(encoding.nodes , _lowerCamelCase)
self.assertEqual(encoding.xpaths , _lowerCamelCase)
# Test batched
UpperCAmelCase__ : int = get_html_strings()
UpperCAmelCase__ : Optional[Any] = feature_extractor(_lowerCamelCase)
# fmt: off
UpperCAmelCase__ : List[str] = expected_nodes + [["""My First Heading""", """My first paragraph."""]]
UpperCAmelCase__ : str = expected_xpaths + [["""/html/body/h1""", """/html/body/p"""]]
self.assertEqual(len(encoding.nodes) , 2)
self.assertEqual(len(encoding.xpaths) , 2)
self.assertEqual(encoding.nodes , _lowerCamelCase)
self.assertEqual(encoding.xpaths , _lowerCamelCase)
| 163
| 1
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bert import BertTokenizer
_A = logging.get_logger(__name__)
_A = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
_A = {
'vocab_file': {
'bert-base-uncased': 'https://huggingface.co/bert-base-uncased/resolve/main/vocab.txt',
'bert-large-uncased': 'https://huggingface.co/bert-large-uncased/resolve/main/vocab.txt',
'bert-base-cased': 'https://huggingface.co/bert-base-cased/resolve/main/vocab.txt',
'bert-large-cased': 'https://huggingface.co/bert-large-cased/resolve/main/vocab.txt',
'bert-base-multilingual-uncased': (
'https://huggingface.co/bert-base-multilingual-uncased/resolve/main/vocab.txt'
),
'bert-base-multilingual-cased': 'https://huggingface.co/bert-base-multilingual-cased/resolve/main/vocab.txt',
'bert-base-chinese': 'https://huggingface.co/bert-base-chinese/resolve/main/vocab.txt',
'bert-base-german-cased': 'https://huggingface.co/bert-base-german-cased/resolve/main/vocab.txt',
'bert-large-uncased-whole-word-masking': (
'https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/vocab.txt'
),
'bert-large-cased-whole-word-masking': (
'https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/vocab.txt'
),
'bert-large-uncased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt'
),
'bert-large-cased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt'
),
'bert-base-cased-finetuned-mrpc': (
'https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/vocab.txt'
),
'bert-base-german-dbmdz-cased': 'https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/vocab.txt',
'bert-base-german-dbmdz-uncased': (
'https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/vocab.txt'
),
'TurkuNLP/bert-base-finnish-cased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/vocab.txt'
),
'TurkuNLP/bert-base-finnish-uncased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/vocab.txt'
),
'wietsedv/bert-base-dutch-cased': (
'https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'bert-base-uncased': 'https://huggingface.co/bert-base-uncased/resolve/main/tokenizer.json',
'bert-large-uncased': 'https://huggingface.co/bert-large-uncased/resolve/main/tokenizer.json',
'bert-base-cased': 'https://huggingface.co/bert-base-cased/resolve/main/tokenizer.json',
'bert-large-cased': 'https://huggingface.co/bert-large-cased/resolve/main/tokenizer.json',
'bert-base-multilingual-uncased': (
'https://huggingface.co/bert-base-multilingual-uncased/resolve/main/tokenizer.json'
),
'bert-base-multilingual-cased': (
'https://huggingface.co/bert-base-multilingual-cased/resolve/main/tokenizer.json'
),
'bert-base-chinese': 'https://huggingface.co/bert-base-chinese/resolve/main/tokenizer.json',
'bert-base-german-cased': 'https://huggingface.co/bert-base-german-cased/resolve/main/tokenizer.json',
'bert-large-uncased-whole-word-masking': (
'https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/tokenizer.json'
),
'bert-large-cased-whole-word-masking': (
'https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/tokenizer.json'
),
'bert-large-uncased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json'
),
'bert-large-cased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json'
),
'bert-base-cased-finetuned-mrpc': (
'https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/tokenizer.json'
),
'bert-base-german-dbmdz-cased': (
'https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/tokenizer.json'
),
'bert-base-german-dbmdz-uncased': (
'https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/tokenizer.json'
),
'TurkuNLP/bert-base-finnish-cased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/tokenizer.json'
),
'TurkuNLP/bert-base-finnish-uncased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/tokenizer.json'
),
'wietsedv/bert-base-dutch-cased': (
'https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/tokenizer.json'
),
},
}
_A = {
'bert-base-uncased': 512,
'bert-large-uncased': 512,
'bert-base-cased': 512,
'bert-large-cased': 512,
'bert-base-multilingual-uncased': 512,
'bert-base-multilingual-cased': 512,
'bert-base-chinese': 512,
'bert-base-german-cased': 512,
'bert-large-uncased-whole-word-masking': 512,
'bert-large-cased-whole-word-masking': 512,
'bert-large-uncased-whole-word-masking-finetuned-squad': 512,
'bert-large-cased-whole-word-masking-finetuned-squad': 512,
'bert-base-cased-finetuned-mrpc': 512,
'bert-base-german-dbmdz-cased': 512,
'bert-base-german-dbmdz-uncased': 512,
'TurkuNLP/bert-base-finnish-cased-v1': 512,
'TurkuNLP/bert-base-finnish-uncased-v1': 512,
'wietsedv/bert-base-dutch-cased': 512,
}
_A = {
'bert-base-uncased': {'do_lower_case': True},
'bert-large-uncased': {'do_lower_case': True},
'bert-base-cased': {'do_lower_case': False},
'bert-large-cased': {'do_lower_case': False},
'bert-base-multilingual-uncased': {'do_lower_case': True},
'bert-base-multilingual-cased': {'do_lower_case': False},
'bert-base-chinese': {'do_lower_case': False},
'bert-base-german-cased': {'do_lower_case': False},
'bert-large-uncased-whole-word-masking': {'do_lower_case': True},
'bert-large-cased-whole-word-masking': {'do_lower_case': False},
'bert-large-uncased-whole-word-masking-finetuned-squad': {'do_lower_case': True},
'bert-large-cased-whole-word-masking-finetuned-squad': {'do_lower_case': False},
'bert-base-cased-finetuned-mrpc': {'do_lower_case': False},
'bert-base-german-dbmdz-cased': {'do_lower_case': False},
'bert-base-german-dbmdz-uncased': {'do_lower_case': True},
'TurkuNLP/bert-base-finnish-cased-v1': {'do_lower_case': False},
'TurkuNLP/bert-base-finnish-uncased-v1': {'do_lower_case': True},
'wietsedv/bert-base-dutch-cased': {'do_lower_case': False},
}
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : Any = VOCAB_FILES_NAMES
UpperCAmelCase__ : List[str] = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ : Optional[int] = PRETRAINED_INIT_CONFIGURATION
UpperCAmelCase__ : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ : Dict = BertTokenizer
def __init__( self , A_=None , A_=None , A_=True , A_="[UNK]" , A_="[SEP]" , A_="[PAD]" , A_="[CLS]" , A_="[MASK]" , A_=True , A_=None , **A_ , ) -> Any:
super().__init__(
A_ , tokenizer_file=A_ , do_lower_case=A_ , unk_token=A_ , sep_token=A_ , pad_token=A_ , cls_token=A_ , mask_token=A_ , tokenize_chinese_chars=A_ , strip_accents=A_ , **A_ , )
__UpperCamelCase =json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , A_ ) != do_lower_case
or normalizer_state.get('strip_accents' , A_ ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , A_ ) != tokenize_chinese_chars
):
__UpperCamelCase =getattr(A_ , normalizer_state.pop('type' ) )
__UpperCamelCase =do_lower_case
__UpperCamelCase =strip_accents
__UpperCamelCase =tokenize_chinese_chars
__UpperCamelCase =normalizer_class(**A_ )
__UpperCamelCase =do_lower_case
def _a ( self , A_ , A_=None ) -> List[str]:
__UpperCamelCase =[self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _a ( self , A_ , A_ = None ) -> List[int]:
__UpperCamelCase =[self.sep_token_id]
__UpperCamelCase =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _a ( self , A_ , A_ = None ) -> Tuple[str]:
__UpperCamelCase =self._tokenizer.model.save(A_ , name=A_ )
return tuple(A_ )
| 363
|
from __future__ import annotations
from typing import Any
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : list[Any] ):
create_state_space_tree(SCREAMING_SNAKE_CASE__ , [] , 0 )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : list[Any] , SCREAMING_SNAKE_CASE__ : list[Any] , SCREAMING_SNAKE_CASE__ : int ):
if index == len(SCREAMING_SNAKE_CASE__ ):
print(SCREAMING_SNAKE_CASE__ )
return
create_state_space_tree(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , index + 1 )
current_subsequence.append(sequence[index] )
create_state_space_tree(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , index + 1 )
current_subsequence.pop()
if __name__ == "__main__":
_A = [3, 1, 2, 4]
generate_all_subsequences(seq)
seq.clear()
seq.extend(['A', 'B', 'C'])
generate_all_subsequences(seq)
| 117
| 0
|
"""simple docstring"""
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
'EleutherAI/gpt-j-6B': 'https://huggingface.co/EleutherAI/gpt-j-6B/resolve/main/config.json',
# See all GPT-J models at https://huggingface.co/models?filter=gpt_j
}
class __lowerCamelCase ( UpperCAmelCase_ ):
'''simple docstring'''
a_ : Optional[Any] = """gptj"""
a_ : Any = {
"""max_position_embeddings""": """n_positions""",
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self : Tuple , a_ : int=5_04_00 , a_ : Union[str, Any]=20_48 , a_ : List[Any]=40_96 , a_ : Any=28 , a_ : str=16 , a_ : Any=64 , a_ : str=None , a_ : Any="gelu_new" , a_ : Optional[Any]=0.0 , a_ : Dict=0.0 , a_ : Tuple=0.0 , a_ : List[str]=1e-5 , a_ : Any=0.02 , a_ : Optional[Any]=True , a_ : Dict=5_02_56 , a_ : Tuple=5_02_56 , a_ : Optional[Any]=False , **a_ : int , ):
lowerCAmelCase_ : Tuple = vocab_size
lowerCAmelCase_ : Union[str, Any] = n_positions
lowerCAmelCase_ : List[Any] = n_embd
lowerCAmelCase_ : Union[str, Any] = n_layer
lowerCAmelCase_ : List[Any] = n_head
lowerCAmelCase_ : List[str] = n_inner
lowerCAmelCase_ : Optional[int] = rotary_dim
lowerCAmelCase_ : str = activation_function
lowerCAmelCase_ : Any = resid_pdrop
lowerCAmelCase_ : Optional[Any] = embd_pdrop
lowerCAmelCase_ : Tuple = attn_pdrop
lowerCAmelCase_ : Tuple = layer_norm_epsilon
lowerCAmelCase_ : Dict = initializer_range
lowerCAmelCase_ : Optional[int] = use_cache
lowerCAmelCase_ : str = bos_token_id
lowerCAmelCase_ : Optional[Any] = eos_token_id
super().__init__(
bos_token_id=a_ , eos_token_id=a_ , tie_word_embeddings=a_ , **a_ )
class __lowerCamelCase ( UpperCAmelCase_ ):
'''simple docstring'''
def __init__( self : Union[str, Any] , a_ : Optional[int] , a_ : int = "default" , a_ : int = None , a_ : List[str] = False , ):
super().__init__(a_ , task=a_ , patching_specs=a_ , use_past=a_ )
if not getattr(self._config , "pad_token_id" , a_ ):
# TODO: how to do that better?
lowerCAmelCase_ : Tuple = 0
@property
def lowerCamelCase ( self : str ):
lowerCAmelCase_ : str = OrderedDict({"input_ids": {0: "batch", 1: "sequence"}} )
if self.use_past:
self.fill_with_past_key_values_(a_ , direction="inputs" )
lowerCAmelCase_ : Optional[Any] = {0: "batch", 1: "past_sequence + sequence"}
else:
lowerCAmelCase_ : Optional[Any] = {0: "batch", 1: "sequence"}
return common_inputs
@property
def lowerCamelCase ( self : str ):
return self._config.n_layer
@property
def lowerCamelCase ( self : Dict ):
return self._config.n_head
def lowerCamelCase ( self : Any , a_ : int , a_ : List[str] = -1 , a_ : Optional[int] = -1 , a_ : Optional[int] = False , a_ : Dict = None , ):
lowerCAmelCase_ : Union[str, Any] = super(a_ , self ).generate_dummy_inputs(
a_ , batch_size=a_ , seq_length=a_ , is_pair=a_ , framework=a_ )
# We need to order the input in the way they appears in the forward()
lowerCAmelCase_ : Dict = OrderedDict({"input_ids": common_inputs["input_ids"]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
lowerCAmelCase_ , lowerCAmelCase_ : Dict = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
lowerCAmelCase_ : str = seqlen + 2
lowerCAmelCase_ : Tuple = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
lowerCAmelCase_ : int = [
(torch.zeros(a_ ), torch.zeros(a_ )) for _ in range(self.num_layers )
]
lowerCAmelCase_ : Dict = common_inputs["attention_mask"]
if self.use_past:
lowerCAmelCase_ : List[str] = ordered_inputs["attention_mask"].dtype
lowerCAmelCase_ : Union[str, Any] = torch.cat(
[ordered_inputs["attention_mask"], torch.ones(a_ , a_ , dtype=a_ )] , dim=1 )
return ordered_inputs
@property
def lowerCamelCase ( self : Optional[Any] ):
return 13
| 241
|
'''simple docstring'''
import numpy as np
from sklearn.datasets import fetch_california_housing
from sklearn.metrics import mean_absolute_error, mean_squared_error
from sklearn.model_selection import train_test_split
from xgboost import XGBRegressor
def _lowercase ( __A ):
'''simple docstring'''
return (data["data"], data["target"])
def _lowercase ( __A ,__A ,__A ):
'''simple docstring'''
__UpperCamelCase = XGBRegressor(verbosity=0 ,random_state=42 )
xgb.fit(__A ,__A )
# Predict target for test data
__UpperCamelCase = xgb.predict(__A )
__UpperCamelCase = predictions.reshape(len(__A ) ,1 )
return predictions
def _lowercase ( ):
'''simple docstring'''
__UpperCamelCase = fetch_california_housing()
__UpperCamelCase , __UpperCamelCase = data_handling(__A )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = train_test_split(
__A ,__A ,test_size=0.25 ,random_state=1 )
__UpperCamelCase = xgboost(__A ,__A ,__A )
# Error printing
print(f"Mean Absolute Error : {mean_absolute_error(__A ,__A )}" )
print(f"Mean Square Error : {mean_squared_error(__A ,__A )}" )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 349
| 0
|
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("""TEST_SAGEMAKER""" , """False""" ) ) is not True , reason="""Skipping test because should only be run when releasing minor transformers version""" , )
@pytest.mark.usefixtures("""sm_env""" )
@parameterized_class(
[
{
"""framework""": """pytorch""",
"""script""": """run_glue.py""",
"""model_name_or_path""": """distilbert-base-cased""",
"""instance_type""": """ml.p3.16xlarge""",
"""results""": {"""train_runtime""": 650, """eval_accuracy""": 0.7, """eval_loss""": 0.6},
},
{
"""framework""": """pytorch""",
"""script""": """run_ddp.py""",
"""model_name_or_path""": """distilbert-base-cased""",
"""instance_type""": """ml.p3.16xlarge""",
"""results""": {"""train_runtime""": 600, """eval_accuracy""": 0.7, """eval_loss""": 0.6},
},
{
"""framework""": """tensorflow""",
"""script""": """run_tf_dist.py""",
"""model_name_or_path""": """distilbert-base-cased""",
"""instance_type""": """ml.p3.16xlarge""",
"""results""": {"""train_runtime""": 600, """eval_accuracy""": 0.6, """eval_loss""": 0.7},
},
] )
class lowercase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ (self ) -> Dict:
"""simple docstring"""
if self.framework == "pytorch":
subprocess.run(
F"cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py".split() , encoding='utf-8' , check=__a , )
assert hasattr(self , 'env' )
def UpperCamelCase__ (self , __a ) -> int:
"""simple docstring"""
UpperCAmelCase__ = F"{self.env.base_job_name}-{instance_count}-{'ddp' if 'ddp' in self.script else 'smd'}"
# distributed data settings
UpperCAmelCase__ = {'smdistributed': {'dataparallel': {'enabled': True}}} if self.script != 'run_ddp.py' else None
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=__a , instance_count=__a , instance_type=self.instance_type , debugger_hook_config=__a , hyperparameters={**self.env.distributed_hyperparameters, 'model_name_or_path': self.model_name_or_path} , metric_definitions=self.env.metric_definitions , distribution=__a , py_version='py36' , )
def UpperCamelCase__ (self , __a ) -> str:
"""simple docstring"""
TrainingJobAnalytics(__a ).export_csv(F"{self.env.test_path}/{job_name}_metrics.csv" )
@parameterized.expand([(2,)] )
def UpperCamelCase__ (self , __a ) -> Dict:
"""simple docstring"""
UpperCAmelCase__ = self.create_estimator(__a )
# run training
estimator.fit()
# result dataframe
UpperCAmelCase__ = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
UpperCAmelCase__ = list(result_metrics_df[result_metrics_df.metric_name == 'eval_accuracy']['value'] )
UpperCAmelCase__ = list(result_metrics_df[result_metrics_df.metric_name == 'eval_loss']['value'] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
UpperCAmelCase__ = (
Session().describe_training_job(estimator.latest_training_job.name ).get('TrainingTimeInSeconds' , 999999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['eval_accuracy'] for t in eval_accuracy )
assert all(t <= self.results['eval_loss'] for t in eval_loss )
# dump tests result into json file to share in PR
with open(F"{estimator.latest_training_job.name}.json" , 'w' ) as outfile:
json.dump({'train_time': train_runtime, 'eval_accuracy': eval_accuracy, 'eval_loss': eval_loss} , __a )
| 354
|
from collections import deque
def UpperCamelCase_( snake_case__: Tuple ) -> Tuple:
UpperCAmelCase__ = len(snake_case__ )
UpperCAmelCase__ = deque()
UpperCAmelCase__ = [False for _ in range(snake_case__ )]
UpperCAmelCase__ = [-1 for _ in range(snake_case__ )]
UpperCAmelCase__ = index_of[:]
def strong_connect(snake_case__: List[str] , snake_case__: List[str] , snake_case__: List[str] ):
UpperCAmelCase__ = index # the number when this node is seen
UpperCAmelCase__ = index # lowest rank node reachable from here
index += 1
stack.append(snake_case__ )
UpperCAmelCase__ = True
for w in g[v]:
if index_of[w] == -1:
UpperCAmelCase__ = strong_connect(snake_case__ , snake_case__ , snake_case__ )
UpperCAmelCase__ = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
elif on_stack[w]:
UpperCAmelCase__ = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
if lowlink_of[v] == index_of[v]:
UpperCAmelCase__ = []
UpperCAmelCase__ = stack.pop()
UpperCAmelCase__ = False
component.append(snake_case__ )
while w != v:
UpperCAmelCase__ = stack.pop()
UpperCAmelCase__ = False
component.append(snake_case__ )
components.append(snake_case__ )
return index
UpperCAmelCase__ = []
for v in range(snake_case__ ):
if index_of[v] == -1:
strong_connect(snake_case__ , 0 , snake_case__ )
return components
def UpperCamelCase_( snake_case__: Dict , snake_case__: List[Any] ) -> Optional[int]:
UpperCAmelCase__ = [[] for _ in range(snake_case__ )]
for u, v in edges:
g[u].append(snake_case__ )
return g
if __name__ == "__main__":
# Test
_UpperCamelCase = 7
_UpperCamelCase = [0, 0, 1, 2, 3, 3, 4, 4, 6]
_UpperCamelCase = [1, 3, 2, 0, 1, 4, 5, 6, 5]
_UpperCamelCase = [(u, v) for u, v in zip(source, target)]
_UpperCamelCase = create_graph(n_vertices, edges)
assert [[5], [6], [4], [3, 2, 1, 0]] == tarjan(g)
| 335
| 0
|
"""simple docstring"""
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import torch
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
@dataclass
class lowerCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = 42
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_text_to_video_synth import TextToVideoSDPipeline
from .pipeline_text_to_video_synth_imgaimg import VideoToVideoSDPipeline # noqa: F401
from .pipeline_text_to_video_zero import TextToVideoZeroPipeline
| 171
|
"""simple docstring"""
import unittest
from transformers import SPIECE_UNDERLINE, XLNetTokenizer, XLNetTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
_A = get_tests_dir("""fixtures/test_sentencepiece.model""")
@require_sentencepiece
@require_tokenizers
class lowerCamelCase ( lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = XLNetTokenizer
SCREAMING_SNAKE_CASE = XLNetTokenizerFast
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = True
def _a (self ):
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase__ : List[str] = XLNetTokenizer(_lowerCamelCase , keep_accents=_lowerCamelCase )
tokenizer.sanitize_special_tokens()
tokenizer.save_pretrained(self.tmpdirname )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : str = """<s>"""
UpperCAmelCase__ : str = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_lowerCamelCase ) , _lowerCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_lowerCamelCase ) , _lowerCamelCase )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Tuple = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<unk>""" )
self.assertEqual(vocab_keys[1] , """<s>""" )
self.assertEqual(vocab_keys[-1] , """<eod>""" )
self.assertEqual(len(_lowerCamelCase ) , 1006 )
def _a (self ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1000 )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Optional[Any] = XLNetTokenizer(_lowerCamelCase , keep_accents=_lowerCamelCase )
UpperCAmelCase__ : List[Any] = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(_lowerCamelCase , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCamelCase ) , [285, 46, 10, 170, 382] )
UpperCAmelCase__ : Tuple = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
_lowerCamelCase , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
UpperCAmelCase__ : List[Any] = tokenizer.convert_tokens_to_ids(_lowerCamelCase )
self.assertListEqual(_lowerCamelCase , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] )
UpperCAmelCase__ : int = tokenizer.convert_ids_to_tokens(_lowerCamelCase )
self.assertListEqual(
_lowerCamelCase , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : List[str] = XLNetTokenizer(_lowerCamelCase , do_lower_case=_lowerCamelCase )
UpperCAmelCase__ : str = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
_lowerCamelCase , [
SPIECE_UNDERLINE + """""",
"""i""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""se""",
""".""",
] , )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""▁he""", """ll""", """o"""] )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Tuple = XLNetTokenizer(_lowerCamelCase , do_lower_case=_lowerCamelCase )
UpperCAmelCase__ : Any = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
_lowerCamelCase , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""se""",
""".""",
] , )
@slow
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Dict = XLNetTokenizer.from_pretrained("""xlnet-base-cased""" )
UpperCAmelCase__ : int = tokenizer.encode("""sequence builders""" , add_special_tokens=_lowerCamelCase )
UpperCAmelCase__ : int = tokenizer.encode("""multi-sequence build""" , add_special_tokens=_lowerCamelCase )
UpperCAmelCase__ : str = tokenizer.build_inputs_with_special_tokens(_lowerCamelCase )
UpperCAmelCase__ : Optional[int] = tokenizer.build_inputs_with_special_tokens(_lowerCamelCase , _lowerCamelCase )
assert encoded_sentence == text + [4, 3]
assert encoded_pair == text + [4] + text_a + [4, 3]
@slow
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Tuple = {"""input_ids""": [[17, 21442, 270, 17, 10, 14645, 318, 34, 17, 4546, 3145, 787, 13, 7752, 22018, 23, 21, 17, 4546, 3145, 787, 13, 3352, 14431, 13, 5500, 11, 1176, 580, 13, 16819, 4797, 23, 17, 10, 17135, 658, 19, 457, 7932, 13, 184, 19, 3154, 17135, 6468, 19, 1404, 12269, 19, 4229, 5356, 16264, 46, 19, 17, 20545, 10395, 9, 9, 9, 11, 28, 6421, 9531, 20729, 17, 10, 353, 17022, 11, 21, 6421, 9531, 16949, 17, 10, 11509, 753, 11, 33, 95, 2421, 7385, 956, 14431, 2626, 25, 842, 7385, 4836, 21, 1429, 2272, 9855, 3120, 161, 24738, 19, 13203, 658, 218, 787, 21, 430, 18482, 847, 2637, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 322, 22178, 27, 1064, 22, 956, 13, 11101, 1429, 5854, 24313, 18953, 40, 422, 24366, 68, 1758, 37, 10483, 14257, 31, 207, 263, 21, 203, 3773, 25, 71, 9735, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 32, 2049, 3442, 17, 13894, 3380, 23, 95, 18, 17634, 2288, 9, 4, 3]], """token_type_ids""": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_lowerCamelCase , model_name="""xlnet-base-cased""" , revision="""c841166438c31ec7ca9a106dee7bb312b73ae511""" , )
| 171
| 1
|
from collections.abc import Sequence
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase = False):
if not arr:
return 0
SCREAMING_SNAKE_CASE = 0 if allow_empty_subarrays else float('-inf')
SCREAMING_SNAKE_CASE = 0.0
for num in arr:
SCREAMING_SNAKE_CASE = max(0 if allow_empty_subarrays else num , curr_sum + num)
SCREAMING_SNAKE_CASE = max(UpperCamelCase__ , UpperCamelCase__)
return max_sum
if __name__ == "__main__":
from doctest import testmod
testmod()
a_ : Optional[Any] = [-2, 1, -3, 4, -1, 2, 1, -5, 4]
print(f"""{max_subarray_sum(nums) = }""")
| 363
|
import unittest
from transformers import (
MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TextaTextGenerationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, require_tf, require_torch
from transformers.utils import is_torch_available
from .test_pipelines_common import ANY
if is_torch_available():
import torch
@is_pipeline_test
class _snake_case ( unittest.TestCase ):
_lowercase : List[Any] = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
_lowercase : int = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
def SCREAMING_SNAKE_CASE__ ( self , a , a , a) -> Any:
SCREAMING_SNAKE_CASE = TextaTextGenerationPipeline(model=a , tokenizer=a)
return generator, ["Something to write", "Something else"]
def SCREAMING_SNAKE_CASE__ ( self , a , a) -> Any:
SCREAMING_SNAKE_CASE = generator('Something there')
self.assertEqual(a , [{'generated_text': ANY(a)}])
# These are encoder decoder, they don't just append to incoming string
self.assertFalse(outputs[0]['generated_text'].startswith('Something there'))
SCREAMING_SNAKE_CASE = generator(['This is great !', 'Something else'] , num_return_sequences=2 , do_sample=a)
self.assertEqual(
a , [
[{'generated_text': ANY(a)}, {'generated_text': ANY(a)}],
[{'generated_text': ANY(a)}, {'generated_text': ANY(a)}],
] , )
SCREAMING_SNAKE_CASE = generator(
['This is great !', 'Something else'] , num_return_sequences=2 , batch_size=2 , do_sample=a)
self.assertEqual(
a , [
[{'generated_text': ANY(a)}, {'generated_text': ANY(a)}],
[{'generated_text': ANY(a)}, {'generated_text': ANY(a)}],
] , )
with self.assertRaises(a):
generator(4)
@require_torch
def SCREAMING_SNAKE_CASE__ ( self) -> Any:
SCREAMING_SNAKE_CASE = pipeline('text2text-generation' , model='patrickvonplaten/t5-tiny-random' , framework='pt')
# do_sample=False necessary for reproducibility
SCREAMING_SNAKE_CASE = generator('Something there' , do_sample=a)
self.assertEqual(a , [{'generated_text': ''}])
SCREAMING_SNAKE_CASE = 3
SCREAMING_SNAKE_CASE = generator(
'Something there' , num_return_sequences=a , num_beams=a , )
SCREAMING_SNAKE_CASE = [
{'generated_text': 'Beide Beide Beide Beide Beide Beide Beide Beide Beide'},
{'generated_text': 'Beide Beide Beide Beide Beide Beide Beide Beide'},
{'generated_text': ''},
]
self.assertEqual(a , a)
SCREAMING_SNAKE_CASE = generator('This is a test' , do_sample=a , num_return_sequences=2 , return_tensors=a)
self.assertEqual(
a , [
{'generated_token_ids': ANY(torch.Tensor)},
{'generated_token_ids': ANY(torch.Tensor)},
] , )
SCREAMING_SNAKE_CASE = generator.model.config.eos_token_id
SCREAMING_SNAKE_CASE = '<pad>'
SCREAMING_SNAKE_CASE = generator(
['This is a test', 'This is a second test'] , do_sample=a , num_return_sequences=2 , batch_size=2 , return_tensors=a , )
self.assertEqual(
a , [
[
{'generated_token_ids': ANY(torch.Tensor)},
{'generated_token_ids': ANY(torch.Tensor)},
],
[
{'generated_token_ids': ANY(torch.Tensor)},
{'generated_token_ids': ANY(torch.Tensor)},
],
] , )
@require_tf
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[Any]:
SCREAMING_SNAKE_CASE = pipeline('text2text-generation' , model='patrickvonplaten/t5-tiny-random' , framework='tf')
# do_sample=False necessary for reproducibility
SCREAMING_SNAKE_CASE = generator('Something there' , do_sample=a)
self.assertEqual(a , [{'generated_text': ''}])
| 327
| 0
|
import unittest
from parameterized import parameterized
from transformers import OpenLlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel
class snake_case_ :
def __init__( self :int ,__snake_case :int ,__snake_case :Tuple=13 ,__snake_case :Optional[int]=7 ,__snake_case :Tuple=True ,__snake_case :List[Any]=True ,__snake_case :int=False ,__snake_case :str=True ,__snake_case :Optional[int]=99 ,__snake_case :int=32 ,__snake_case :List[Any]=5 ,__snake_case :Union[str, Any]=4 ,__snake_case :Tuple=37 ,__snake_case :List[str]="gelu" ,__snake_case :List[Any]=0.1 ,__snake_case :Union[str, Any]=0.1 ,__snake_case :Optional[Any]=5_12 ,__snake_case :str=16 ,__snake_case :str=2 ,__snake_case :List[Any]=0.02 ,__snake_case :Optional[int]=3 ,__snake_case :str=4 ,__snake_case :List[str]=None ,) -> Tuple:
a__ = parent
a__ = batch_size
a__ = seq_length
a__ = is_training
a__ = use_input_mask
a__ = use_token_type_ids
a__ = use_labels
a__ = vocab_size
a__ = hidden_size
a__ = num_hidden_layers
a__ = num_attention_heads
a__ = intermediate_size
a__ = hidden_act
a__ = hidden_dropout_prob
a__ = attention_probs_dropout_prob
a__ = max_position_embeddings
a__ = type_vocab_size
a__ = type_sequence_label_size
a__ = initializer_range
a__ = num_labels
a__ = num_choices
a__ = scope
def lowerCamelCase__( self :str ) -> Optional[int]:
a__ = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
a__ = None
if self.use_input_mask:
a__ = random_attention_mask([self.batch_size, self.seq_length] )
a__ = None
if self.use_token_type_ids:
a__ = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
a__ = None
a__ = None
a__ = None
if self.use_labels:
a__ = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
a__ = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
a__ = ids_tensor([self.batch_size] ,self.num_choices )
a__ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCamelCase__( self :Union[str, Any] ) -> Optional[Any]:
return OpenLlamaConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=__SCREAMING_SNAKE_CASE ,initializer_range=self.initializer_range ,use_stable_embedding=__SCREAMING_SNAKE_CASE ,)
def lowerCamelCase__( self :str ,__snake_case :List[Any] ,__snake_case :Union[str, Any] ,__snake_case :Optional[Any] ,__snake_case :Tuple ,__snake_case :Optional[Any] ,__snake_case :Dict ,__snake_case :List[str] ) -> List[Any]:
a__ = OpenLlamaModel(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
a__ = model(__SCREAMING_SNAKE_CASE ,attention_mask=__SCREAMING_SNAKE_CASE )
a__ = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase__( self :List[Any] ,__snake_case :Dict ,__snake_case :Optional[Any] ,__snake_case :Any ,__snake_case :int ,__snake_case :List[str] ,__snake_case :List[Any] ,__snake_case :Optional[int] ,__snake_case :str ,__snake_case :Any ,) -> List[Any]:
a__ = True
a__ = OpenLlamaModel(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
a__ = model(
__SCREAMING_SNAKE_CASE ,attention_mask=__SCREAMING_SNAKE_CASE ,encoder_hidden_states=__SCREAMING_SNAKE_CASE ,encoder_attention_mask=__SCREAMING_SNAKE_CASE ,)
a__ = model(
__SCREAMING_SNAKE_CASE ,attention_mask=__SCREAMING_SNAKE_CASE ,encoder_hidden_states=__SCREAMING_SNAKE_CASE ,)
a__ = model(__SCREAMING_SNAKE_CASE ,attention_mask=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase__( self :int ,__snake_case :int ,__snake_case :Dict ,__snake_case :List[str] ,__snake_case :Tuple ,__snake_case :Optional[int] ,__snake_case :Dict ,__snake_case :List[Any] ,__snake_case :int ,__snake_case :str ,) -> Union[str, Any]:
a__ = OpenLlamaForCausalLM(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
a__ = model(__SCREAMING_SNAKE_CASE ,attention_mask=__SCREAMING_SNAKE_CASE ,labels=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase__( self :int ,__snake_case :Any ,__snake_case :Union[str, Any] ,__snake_case :Dict ,__snake_case :Optional[int] ,__snake_case :Optional[Any] ,__snake_case :Optional[Any] ,__snake_case :Tuple ,__snake_case :int ,__snake_case :int ,) -> Optional[int]:
a__ = True
a__ = True
a__ = OpenLlamaForCausalLM(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
# first forward pass
a__ = model(
__SCREAMING_SNAKE_CASE ,attention_mask=__SCREAMING_SNAKE_CASE ,encoder_hidden_states=__SCREAMING_SNAKE_CASE ,encoder_attention_mask=__SCREAMING_SNAKE_CASE ,use_cache=__SCREAMING_SNAKE_CASE ,)
a__ = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
a__ = ids_tensor((self.batch_size, 3) ,config.vocab_size )
a__ = ids_tensor((self.batch_size, 3) ,vocab_size=2 )
# append to next input_ids and
a__ = torch.cat([input_ids, next_tokens] ,dim=-1 )
a__ = torch.cat([input_mask, next_mask] ,dim=-1 )
a__ = model(
__SCREAMING_SNAKE_CASE ,attention_mask=__SCREAMING_SNAKE_CASE ,encoder_hidden_states=__SCREAMING_SNAKE_CASE ,encoder_attention_mask=__SCREAMING_SNAKE_CASE ,output_hidden_states=__SCREAMING_SNAKE_CASE ,)['hidden_states'][0]
a__ = model(
__SCREAMING_SNAKE_CASE ,attention_mask=__SCREAMING_SNAKE_CASE ,encoder_hidden_states=__SCREAMING_SNAKE_CASE ,encoder_attention_mask=__SCREAMING_SNAKE_CASE ,past_key_values=__SCREAMING_SNAKE_CASE ,output_hidden_states=__SCREAMING_SNAKE_CASE ,)['hidden_states'][0]
# select random slice
a__ = ids_tensor((1,) ,output_from_past.shape[-1] ).item()
a__ = output_from_no_past[:, -3:, random_slice_idx].detach()
a__ = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,atol=1E-3 ) )
def lowerCamelCase__( self :Tuple ) -> Dict:
a__ = self.prepare_config_and_inputs()
(
(
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) ,
) = config_and_inputs
a__ = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class snake_case_ (UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
UpperCAmelCase__ : Union[str, Any] = (
(OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else ()
)
UpperCAmelCase__ : Union[str, Any] = (OpenLlamaForCausalLM,) if is_torch_available() else ()
UpperCAmelCase__ : Any = (
{
"""feature-extraction""": OpenLlamaModel,
"""text-classification""": OpenLlamaForSequenceClassification,
"""text-generation""": OpenLlamaForCausalLM,
"""zero-shot""": OpenLlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCAmelCase__ : Tuple = False
UpperCAmelCase__ : List[str] = False
def lowerCamelCase__( self :int ) -> int:
a__ = OpenLlamaModelTester(self )
a__ = ConfigTester(self ,config_class=__SCREAMING_SNAKE_CASE ,hidden_size=37 )
def lowerCamelCase__( self :Optional[int] ) -> int:
self.config_tester.run_common_tests()
def lowerCamelCase__( self :List[Any] ) -> List[Any]:
a__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE )
def lowerCamelCase__( self :List[str] ) -> Dict:
a__ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
a__ = type
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE )
def lowerCamelCase__( self :Dict ) -> str:
a__ , a__ = self.model_tester.prepare_config_and_inputs_for_common()
a__ = 3
a__ = input_dict['input_ids']
a__ = input_ids.ne(1 ).to(__SCREAMING_SNAKE_CASE )
a__ = ids_tensor([self.model_tester.batch_size] ,self.model_tester.type_sequence_label_size )
a__ = OpenLlamaForSequenceClassification(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
a__ = model(__SCREAMING_SNAKE_CASE ,attention_mask=__SCREAMING_SNAKE_CASE ,labels=__SCREAMING_SNAKE_CASE )
self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) )
def lowerCamelCase__( self :Any ) -> str:
a__ , a__ = self.model_tester.prepare_config_and_inputs_for_common()
a__ = 3
a__ = 'single_label_classification'
a__ = input_dict['input_ids']
a__ = input_ids.ne(1 ).to(__SCREAMING_SNAKE_CASE )
a__ = ids_tensor([self.model_tester.batch_size] ,self.model_tester.type_sequence_label_size )
a__ = OpenLlamaForSequenceClassification(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
a__ = model(__SCREAMING_SNAKE_CASE ,attention_mask=__SCREAMING_SNAKE_CASE ,labels=__SCREAMING_SNAKE_CASE )
self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) )
def lowerCamelCase__( self :Optional[Any] ) -> List[Any]:
a__ , a__ = self.model_tester.prepare_config_and_inputs_for_common()
a__ = 3
a__ = 'multi_label_classification'
a__ = input_dict['input_ids']
a__ = input_ids.ne(1 ).to(__SCREAMING_SNAKE_CASE )
a__ = ids_tensor(
[self.model_tester.batch_size, config.num_labels] ,self.model_tester.type_sequence_label_size ).to(torch.float )
a__ = OpenLlamaForSequenceClassification(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
a__ = model(__SCREAMING_SNAKE_CASE ,attention_mask=__SCREAMING_SNAKE_CASE ,labels=__SCREAMING_SNAKE_CASE )
self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip('Open-Llama buffers include complex numbers, which breaks this test' )
def lowerCamelCase__( self :int ) -> int:
pass
@parameterized.expand([('linear',), ('dynamic',)] )
def lowerCamelCase__( self :int ,__snake_case :List[Any] ) -> Dict:
a__ , a__ = self.model_tester.prepare_config_and_inputs_for_common()
a__ = ids_tensor([1, 10] ,config.vocab_size )
a__ = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] ,config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
a__ = OpenLlamaModel(__SCREAMING_SNAKE_CASE )
original_model.to(__SCREAMING_SNAKE_CASE )
original_model.eval()
a__ = original_model(__SCREAMING_SNAKE_CASE ).last_hidden_state
a__ = original_model(__SCREAMING_SNAKE_CASE ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
a__ = {'type': scaling_type, 'factor': 10.0}
a__ = OpenLlamaModel(__SCREAMING_SNAKE_CASE )
scaled_model.to(__SCREAMING_SNAKE_CASE )
scaled_model.eval()
a__ = scaled_model(__SCREAMING_SNAKE_CASE ).last_hidden_state
a__ = scaled_model(__SCREAMING_SNAKE_CASE ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,atol=1E-5 ) )
| 240
|
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class lowercase_ ( UpperCamelCase_ ):
"""simple docstring"""
UpperCAmelCase_ : List[str] = ["""image_processor""", """tokenizer"""]
UpperCAmelCase_ : int = """OwlViTImageProcessor"""
UpperCAmelCase_ : Any = ("""CLIPTokenizer""", """CLIPTokenizerFast""")
def __init__( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE ) ->Any:
lowerCAmelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , __SCREAMING_SNAKE_CASE , )
lowerCAmelCase = kwargs.pop('''feature_extractor''' )
lowerCAmelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def __call__( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE="max_length" , __SCREAMING_SNAKE_CASE="np" , **__SCREAMING_SNAKE_CASE ) ->int:
if text is None and query_images is None and images is None:
raise ValueError(
'''You have to specify at least one text or query image or image. All three cannot be none.''' )
if text is not None:
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) or (isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and not isinstance(text[0] , __SCREAMING_SNAKE_CASE )):
lowerCAmelCase = [self.tokenizer(__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )]
elif isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and isinstance(text[0] , __SCREAMING_SNAKE_CASE ):
lowerCAmelCase = []
# Maximum number of queries across batch
lowerCAmelCase = max([len(__SCREAMING_SNAKE_CASE ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(__SCREAMING_SNAKE_CASE ) != max_num_queries:
lowerCAmelCase = t + [''' '''] * (max_num_queries - len(__SCREAMING_SNAKE_CASE ))
lowerCAmelCase = self.tokenizer(__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
encodings.append(__SCREAMING_SNAKE_CASE )
else:
raise TypeError('''Input text should be a string, a list of strings or a nested list of strings''' )
if return_tensors == "np":
lowerCAmelCase = np.concatenate([encoding['''input_ids'''] for encoding in encodings] , axis=0 )
lowerCAmelCase = np.concatenate([encoding['''attention_mask'''] for encoding in encodings] , axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
lowerCAmelCase = jnp.concatenate([encoding['''input_ids'''] for encoding in encodings] , axis=0 )
lowerCAmelCase = jnp.concatenate([encoding['''attention_mask'''] for encoding in encodings] , axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
lowerCAmelCase = torch.cat([encoding['''input_ids'''] for encoding in encodings] , dim=0 )
lowerCAmelCase = torch.cat([encoding['''attention_mask'''] for encoding in encodings] , dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
lowerCAmelCase = tf.stack([encoding['''input_ids'''] for encoding in encodings] , axis=0 )
lowerCAmelCase = tf.stack([encoding['''attention_mask'''] for encoding in encodings] , axis=0 )
else:
raise ValueError('''Target return tensor type could not be returned''' )
lowerCAmelCase = BatchEncoding()
lowerCAmelCase = input_ids
lowerCAmelCase = attention_mask
if query_images is not None:
lowerCAmelCase = BatchEncoding()
lowerCAmelCase = self.image_processor(
__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ).pixel_values
lowerCAmelCase = query_pixel_values
if images is not None:
lowerCAmelCase = self.image_processor(__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
if text is not None and images is not None:
lowerCAmelCase = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
lowerCAmelCase = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**__SCREAMING_SNAKE_CASE ) , tensor_type=__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) ->Optional[int]:
return self.image_processor.post_process(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) ->Any:
return self.image_processor.post_process_object_detection(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) ->Tuple:
return self.image_processor.post_process_image_guided_detection(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) ->str:
return self.tokenizer.batch_decode(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) ->List[Any]:
return self.tokenizer.decode(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
@property
def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[int]:
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , __SCREAMING_SNAKE_CASE , )
return self.image_processor_class
@property
def SCREAMING_SNAKE_CASE_ ( self ) ->int:
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , __SCREAMING_SNAKE_CASE , )
return self.image_processor
| 338
| 0
|
'''simple docstring'''
import tempfile
import torch
from diffusers import PNDMScheduler
from .test_schedulers import SchedulerCommonTest
class __UpperCamelCase ( lowerCamelCase__ ):
lowercase : Tuple =(PNDMScheduler,)
lowercase : int =(('num_inference_steps', 50),)
def lowercase__ ( self, **lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ ={
'''num_train_timesteps''': 1_000,
'''beta_start''': 0.0_0_0_1,
'''beta_end''': 0.0_2,
'''beta_schedule''': '''linear''',
}
config.update(**lowerCAmelCase )
return config
def lowercase__ ( self, lowerCAmelCase=0, **lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =dict(self.forward_default_kwargs )
lowerCamelCase_ =kwargs.pop('''num_inference_steps''', lowerCAmelCase )
lowerCamelCase_ =self.dummy_sample
lowerCamelCase_ =0.1 * sample
lowerCamelCase_ =[residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5]
for scheduler_class in self.scheduler_classes:
lowerCamelCase_ =self.get_scheduler_config(**lowerCAmelCase )
lowerCamelCase_ =scheduler_class(**lowerCAmelCase )
scheduler.set_timesteps(lowerCAmelCase )
# copy over dummy past residuals
lowerCamelCase_ =dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowerCAmelCase )
lowerCamelCase_ =scheduler_class.from_pretrained(lowerCAmelCase )
new_scheduler.set_timesteps(lowerCAmelCase )
# copy over dummy past residuals
lowerCamelCase_ =dummy_past_residuals[:]
lowerCamelCase_ =scheduler.step_prk(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, **lowerCAmelCase ).prev_sample
lowerCamelCase_ =new_scheduler.step_prk(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, **lowerCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
lowerCamelCase_ =scheduler.step_plms(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, **lowerCAmelCase ).prev_sample
lowerCamelCase_ =new_scheduler.step_plms(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, **lowerCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def lowercase__ ( self ):
"""simple docstring"""
pass
def lowercase__ ( self, lowerCAmelCase=0, **lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =dict(self.forward_default_kwargs )
lowerCamelCase_ =kwargs.pop('''num_inference_steps''', lowerCAmelCase )
lowerCamelCase_ =self.dummy_sample
lowerCamelCase_ =0.1 * sample
lowerCamelCase_ =[residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5]
for scheduler_class in self.scheduler_classes:
lowerCamelCase_ =self.get_scheduler_config()
lowerCamelCase_ =scheduler_class(**lowerCAmelCase )
scheduler.set_timesteps(lowerCAmelCase )
# copy over dummy past residuals (must be after setting timesteps)
lowerCamelCase_ =dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowerCAmelCase )
lowerCamelCase_ =scheduler_class.from_pretrained(lowerCAmelCase )
# copy over dummy past residuals
new_scheduler.set_timesteps(lowerCAmelCase )
# copy over dummy past residual (must be after setting timesteps)
lowerCamelCase_ =dummy_past_residuals[:]
lowerCamelCase_ =scheduler.step_prk(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, **lowerCAmelCase ).prev_sample
lowerCamelCase_ =new_scheduler.step_prk(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, **lowerCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
lowerCamelCase_ =scheduler.step_plms(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, **lowerCAmelCase ).prev_sample
lowerCamelCase_ =new_scheduler.step_plms(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, **lowerCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def lowercase__ ( self, **lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =self.scheduler_classes[0]
lowerCamelCase_ =self.get_scheduler_config(**lowerCAmelCase )
lowerCamelCase_ =scheduler_class(**lowerCAmelCase )
lowerCamelCase_ =10
lowerCamelCase_ =self.dummy_model()
lowerCamelCase_ =self.dummy_sample_deter
scheduler.set_timesteps(lowerCAmelCase )
for i, t in enumerate(scheduler.prk_timesteps ):
lowerCamelCase_ =model(lowerCAmelCase, lowerCAmelCase )
lowerCamelCase_ =scheduler.step_prk(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ).prev_sample
for i, t in enumerate(scheduler.plms_timesteps ):
lowerCamelCase_ =model(lowerCAmelCase, lowerCAmelCase )
lowerCamelCase_ =scheduler.step_plms(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ).prev_sample
return sample
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =dict(self.forward_default_kwargs )
lowerCamelCase_ =kwargs.pop('''num_inference_steps''', lowerCAmelCase )
for scheduler_class in self.scheduler_classes:
lowerCamelCase_ =self.get_scheduler_config()
lowerCamelCase_ =scheduler_class(**lowerCAmelCase )
lowerCamelCase_ =self.dummy_sample
lowerCamelCase_ =0.1 * sample
if num_inference_steps is not None and hasattr(lowerCAmelCase, '''set_timesteps''' ):
scheduler.set_timesteps(lowerCAmelCase )
elif num_inference_steps is not None and not hasattr(lowerCAmelCase, '''set_timesteps''' ):
lowerCamelCase_ =num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
lowerCamelCase_ =[residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5]
lowerCamelCase_ =dummy_past_residuals[:]
lowerCamelCase_ =scheduler.step_prk(lowerCAmelCase, 0, lowerCAmelCase, **lowerCAmelCase ).prev_sample
lowerCamelCase_ =scheduler.step_prk(lowerCAmelCase, 1, lowerCAmelCase, **lowerCAmelCase ).prev_sample
self.assertEqual(output_a.shape, sample.shape )
self.assertEqual(output_a.shape, output_a.shape )
lowerCamelCase_ =scheduler.step_plms(lowerCAmelCase, 0, lowerCAmelCase, **lowerCAmelCase ).prev_sample
lowerCamelCase_ =scheduler.step_plms(lowerCAmelCase, 1, lowerCAmelCase, **lowerCAmelCase ).prev_sample
self.assertEqual(output_a.shape, sample.shape )
self.assertEqual(output_a.shape, output_a.shape )
def lowercase__ ( self ):
"""simple docstring"""
for timesteps in [100, 1_000]:
self.check_over_configs(num_train_timesteps=lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=lowerCAmelCase )
lowerCamelCase_ =self.scheduler_classes[0]
lowerCamelCase_ =self.get_scheduler_config(steps_offset=1 )
lowerCamelCase_ =scheduler_class(**lowerCAmelCase )
scheduler.set_timesteps(10 )
assert torch.equal(
scheduler.timesteps, torch.LongTensor(
[901, 851, 851, 801, 801, 751, 751, 701, 701, 651, 651, 601, 601, 501, 401, 301, 201, 101, 1] ), )
def lowercase__ ( self ):
"""simple docstring"""
for beta_start, beta_end in zip([0.0_0_0_1, 0.0_0_1], [0.0_0_2, 0.0_2] ):
self.check_over_configs(beta_start=lowerCAmelCase, beta_end=lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
for t in [1, 5, 10]:
self.check_over_forward(time_step=lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
for t, num_inference_steps in zip([1, 5, 10], [10, 50, 100] ):
self.check_over_forward(num_inference_steps=lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =27
for scheduler_class in self.scheduler_classes:
lowerCamelCase_ =self.dummy_sample
lowerCamelCase_ =0.1 * sample
lowerCamelCase_ =self.get_scheduler_config()
lowerCamelCase_ =scheduler_class(**lowerCAmelCase )
scheduler.set_timesteps(lowerCAmelCase )
# before power of 3 fix, would error on first step, so we only need to do two
for i, t in enumerate(scheduler.prk_timesteps[:2] ):
lowerCamelCase_ =scheduler.step_prk(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ).prev_sample
def lowercase__ ( self ):
"""simple docstring"""
with self.assertRaises(lowerCAmelCase ):
lowerCamelCase_ =self.scheduler_classes[0]
lowerCamelCase_ =self.get_scheduler_config()
lowerCamelCase_ =scheduler_class(**lowerCAmelCase )
scheduler.step_plms(self.dummy_sample, 1, self.dummy_sample ).prev_sample
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.full_loop()
lowerCamelCase_ =torch.sum(torch.abs(lowerCAmelCase ) )
lowerCamelCase_ =torch.mean(torch.abs(lowerCAmelCase ) )
assert abs(result_sum.item() - 198.1_318 ) < 1e-2
assert abs(result_mean.item() - 0.2_5_8_0 ) < 1e-3
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.full_loop(prediction_type='''v_prediction''' )
lowerCamelCase_ =torch.sum(torch.abs(lowerCAmelCase ) )
lowerCamelCase_ =torch.mean(torch.abs(lowerCAmelCase ) )
assert abs(result_sum.item() - 67.3_986 ) < 1e-2
assert abs(result_mean.item() - 0.0_8_7_8 ) < 1e-3
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.full_loop(set_alpha_to_one=lowerCAmelCase, beta_start=0.0_1 )
lowerCamelCase_ =torch.sum(torch.abs(lowerCAmelCase ) )
lowerCamelCase_ =torch.mean(torch.abs(lowerCAmelCase ) )
assert abs(result_sum.item() - 230.0_399 ) < 1e-2
assert abs(result_mean.item() - 0.2_9_9_5 ) < 1e-3
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.full_loop(set_alpha_to_one=lowerCAmelCase, beta_start=0.0_1 )
lowerCamelCase_ =torch.sum(torch.abs(lowerCAmelCase ) )
lowerCamelCase_ =torch.mean(torch.abs(lowerCAmelCase ) )
assert abs(result_sum.item() - 186.9_482 ) < 1e-2
assert abs(result_mean.item() - 0.2_4_3_4 ) < 1e-3
| 362
|
'''simple docstring'''
import importlib
import os
import sys
# This is required to make the module import works (when the python process is running from the root of the repo)
sys.path.append(""".""")
def a_ ( __snake_case : Any ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ =test_file.split(os.path.sep )
if components[0:2] != ["tests", "models"]:
raise ValueError(
'''`test_file` should start with `tests/models/` (with `/` being the OS specific path separator). Got '''
F'''{test_file} instead.''' )
lowerCamelCase_ =components[-1]
if not test_fn.endswith('''py''' ):
raise ValueError(F'''`test_file` should be a python file. Got {test_fn} instead.''' )
if not test_fn.startswith('''test_modeling_''' ):
raise ValueError(
F'''`test_file` should point to a file name of the form `test_modeling_*.py`. Got {test_fn} instead.''' )
lowerCamelCase_ =components[:-1] + [test_fn.replace('''.py''' , '''''' )]
lowerCamelCase_ ='''.'''.join(__snake_case )
return test_module_path
def a_ ( __snake_case : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ =get_module_path(__snake_case )
lowerCamelCase_ =importlib.import_module(__snake_case )
return test_module
def a_ ( __snake_case : Dict ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ =[]
lowerCamelCase_ =get_test_module(__snake_case )
for attr in dir(__snake_case ):
if attr.endswith('''ModelTester''' ):
tester_classes.append(getattr(__snake_case , __snake_case ) )
# sort with class names
return sorted(__snake_case , key=lambda __snake_case : x.__name__ )
def a_ ( __snake_case : List[Any] ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ =[]
lowerCamelCase_ =get_test_module(__snake_case )
for attr in dir(__snake_case ):
lowerCamelCase_ =getattr(__snake_case , __snake_case )
# (TF/Flax)ModelTesterMixin is also an attribute in specific model test module. Let's exclude them by checking
# `all_model_classes` is not empty (which also excludes other special classes).
lowerCamelCase_ =getattr(__snake_case , '''all_model_classes''' , [] )
if len(__snake_case ) > 0:
test_classes.append(__snake_case )
# sort with class names
return sorted(__snake_case , key=lambda __snake_case : x.__name__ )
def a_ ( __snake_case : List[Any] ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ =get_test_classes(__snake_case )
lowerCamelCase_ =set()
for test_class in test_classes:
model_classes.update(test_class.all_model_classes )
# sort with class names
return sorted(__snake_case , key=lambda __snake_case : x.__name__ )
def a_ ( __snake_case : str ) -> str:
"""simple docstring"""
lowerCamelCase_ =test_class()
if hasattr(__snake_case , '''setUp''' ):
test.setUp()
lowerCamelCase_ =None
if hasattr(__snake_case , '''model_tester''' ):
# `(TF/Flax)ModelTesterMixin` has this attribute default to `None`. Let's skip this case.
if test.model_tester is not None:
lowerCamelCase_ =test.model_tester.__class__
return model_tester
def a_ ( __snake_case : Dict , __snake_case : List[str] ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ =get_test_classes(__snake_case )
lowerCamelCase_ =[]
for test_class in test_classes:
if model_class in test_class.all_model_classes:
target_test_classes.append(__snake_case )
# sort with class names
return sorted(__snake_case , key=lambda __snake_case : x.__name__ )
def a_ ( __snake_case : Optional[Any] , __snake_case : List[str] ) -> Any:
"""simple docstring"""
lowerCamelCase_ =get_test_classes_for_model(__snake_case , __snake_case )
lowerCamelCase_ =[]
for test_class in test_classes:
lowerCamelCase_ =get_model_tester_from_test_class(__snake_case )
if tester_class is not None:
tester_classes.append(__snake_case )
# sort with class names
return sorted(__snake_case , key=lambda __snake_case : x.__name__ )
def a_ ( __snake_case : Tuple ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ =get_test_classes(__snake_case )
lowerCamelCase_ ={test_class: get_model_tester_from_test_class(__snake_case ) for test_class in test_classes}
return test_tester_mapping
def a_ ( __snake_case : Dict ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ =get_model_classes(__snake_case )
lowerCamelCase_ ={
model_class: get_test_classes_for_model(__snake_case , __snake_case ) for model_class in model_classes
}
return model_test_mapping
def a_ ( __snake_case : Optional[Any] ) -> Dict:
"""simple docstring"""
lowerCamelCase_ =get_model_classes(__snake_case )
lowerCamelCase_ ={
model_class: get_tester_classes_for_model(__snake_case , __snake_case ) for model_class in model_classes
}
return model_to_tester_mapping
def a_ ( __snake_case : List[str] ) -> List[Any]:
"""simple docstring"""
if isinstance(__snake_case , __snake_case ):
return o
elif isinstance(__snake_case , __snake_case ):
return o.__name__
elif isinstance(__snake_case , (list, tuple) ):
return [to_json(__snake_case ) for x in o]
elif isinstance(__snake_case , __snake_case ):
return {to_json(__snake_case ): to_json(__snake_case ) for k, v in o.items()}
else:
return o
| 6
| 0
|
'''simple docstring'''
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
class UpperCAmelCase ( unittest.TestCase , a_ ):
def lowercase__ ( self : Optional[int] ) -> List[str]:
_lowerCAmelCase = load_tool("""text-classification""" )
self.tool.setup()
_lowerCAmelCase = load_tool("""text-classification""" , remote=__snake_case )
def lowercase__ ( self : str ) -> Any:
_lowerCAmelCase = self.tool("""That's quite cool""" , ["""positive""", """negative"""] )
self.assertEqual(__snake_case , """positive""" )
def lowercase__ ( self : Any ) -> List[str]:
_lowerCAmelCase = self.remote_tool("""That's quite cool""" , ["""positive""", """negative"""] )
self.assertEqual(__snake_case , """positive""" )
def lowercase__ ( self : Optional[int] ) -> Tuple:
_lowerCAmelCase = self.tool(text="""That's quite cool""" , labels=["""positive""", """negative"""] )
self.assertEqual(__snake_case , """positive""" )
def lowercase__ ( self : Dict ) -> Dict:
_lowerCAmelCase = self.remote_tool(text="""That's quite cool""" , labels=["""positive""", """negative"""] )
self.assertEqual(__snake_case , """positive""" )
| 70
|
import fire
from transformers import AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer
def __lowerCamelCase ( lowerCamelCase__ : str , lowerCamelCase__ : str , **lowerCamelCase__ : Tuple ):
'''simple docstring'''
lowerCamelCase = AutoConfig.from_pretrained(lowerCamelCase__ , **lowerCamelCase__ )
lowerCamelCase = AutoModelForSeqaSeqLM.from_config(lowerCamelCase__ )
model.save_pretrained(lowerCamelCase__ )
AutoTokenizer.from_pretrained(lowerCamelCase__ ).save_pretrained(lowerCamelCase__ )
return model
if __name__ == "__main__":
fire.Fire(save_randomly_initialized_version)
| 252
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
lowerCAmelCase_ = {
"""configuration_ctrl""": ["""CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP""", """CTRLConfig"""],
"""tokenization_ctrl""": ["""CTRLTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
"""CTRL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""CTRLForSequenceClassification""",
"""CTRLLMHeadModel""",
"""CTRLModel""",
"""CTRLPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
"""TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFCTRLForSequenceClassification""",
"""TFCTRLLMHeadModel""",
"""TFCTRLModel""",
"""TFCTRLPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_ctrl import CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRLConfig
from .tokenization_ctrl import CTRLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ctrl import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
CTRLPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_ctrl import (
TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCTRLForSequenceClassification,
TFCTRLLMHeadModel,
TFCTRLModel,
TFCTRLPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 260
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..models.auto import AutoProcessor
from ..models.vision_encoder_decoder import VisionEncoderDecoderModel
from ..utils import is_vision_available
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
a_ : int ="""naver-clova-ix/donut-base-finetuned-docvqa"""
a_ : Dict =(
"""This is a tool that answers a question about an document (pdf). It takes an input named `document` which """
"""should be the document containing the information, as well as a `question` that is the question about the """
"""document. It returns a text that contains the answer to the question."""
)
a_ : Optional[Any] ="""document_qa"""
a_ : str =AutoProcessor
a_ : Union[str, Any] =VisionEncoderDecoderModel
a_ : List[Any] =["""image""", """text"""]
a_ : List[Any] =["""text"""]
def __init__( self : Any , *UpperCamelCase : List[Any] , **UpperCamelCase : List[Any] ):
'''simple docstring'''
if not is_vision_available():
raise ValueError('Pillow must be installed to use the DocumentQuestionAnsweringTool.' )
super().__init__(*UpperCamelCase , **UpperCamelCase )
def UpperCamelCase_ ( self : Optional[int] , UpperCamelCase : "Image" , UpperCamelCase : str ):
'''simple docstring'''
_snake_case : Dict = '<s_docvqa><s_question>{user_input}</s_question><s_answer>'
_snake_case : int = task_prompt.replace('{user_input}' , UpperCamelCase )
_snake_case : Union[str, Any] = self.pre_processor.tokenizer(
UpperCamelCase , add_special_tokens=UpperCamelCase , return_tensors='pt' ).input_ids
_snake_case : Dict = self.pre_processor(UpperCamelCase , return_tensors='pt' ).pixel_values
return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values}
def UpperCamelCase_ ( self : Dict , UpperCamelCase : Optional[int] ):
'''simple docstring'''
return self.model.generate(
inputs['pixel_values'].to(self.device ) , decoder_input_ids=inputs['decoder_input_ids'].to(self.device ) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=UpperCamelCase , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=UpperCamelCase , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=UpperCamelCase , ).sequences
def UpperCamelCase_ ( self : Optional[Any] , UpperCamelCase : str ):
'''simple docstring'''
_snake_case : Optional[Any] = self.pre_processor.batch_decode(UpperCamelCase )[0]
_snake_case : int = sequence.replace(self.pre_processor.tokenizer.eos_token , '' )
_snake_case : Optional[int] = sequence.replace(self.pre_processor.tokenizer.pad_token , '' )
_snake_case : int = re.sub(R'<.*?>' , '' , UpperCamelCase , count=1 ).strip() # remove first task start token
_snake_case : str = self.pre_processor.tokenajson(UpperCamelCase )
return sequence["answer"]
| 260
| 1
|
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class lowercase ( UpperCamelCase__ ):
_a = ["image_processor", "tokenizer"]
_a = "CLIPImageProcessor"
_a = ("CLIPTokenizer", "CLIPTokenizerFast")
def __init__( self , _a=None , _a=None , **_a ) -> Dict:
_A : Any = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , _a , )
_A : Union[str, Any] = kwargs.pop("""feature_extractor""" )
_A : Optional[int] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(_a , _a )
def __call__( self , _a=None , _a=None , _a=None , **_a ) -> int:
if text is None and images is None:
raise ValueError("""You have to specify either text or images. Both cannot be none.""" )
if text is not None:
_A : List[str] = self.tokenizer(_a , return_tensors=_a , **_a )
if images is not None:
_A : Dict = self.image_processor(_a , return_tensors=_a , **_a )
if text is not None and images is not None:
_A : Tuple = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**_a ) , tensor_type=_a )
def a__ ( self , *_a , **_a ) -> List[Any]:
return self.tokenizer.batch_decode(*_a , **_a )
def a__ ( self , *_a , **_a ) -> Any:
return self.tokenizer.decode(*_a , **_a )
@property
def a__ ( self ) -> List[Any]:
_A : Union[str, Any] = self.tokenizer.model_input_names
_A : str = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def a__ ( self ) -> List[str]:
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , _a , )
return self.image_processor_class
@property
def a__ ( self ) -> List[Any]:
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , _a , )
return self.image_processor
| 26
|
"""simple docstring"""
import argparse
import logging
import os
from datetime import datetime
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader, RandomSampler, TensorDataset
from tqdm import tqdm
from transformers import GPTaLMHeadModel
__UpperCamelCase : int = logging.getLogger(__name__)
def __SCREAMING_SNAKE_CASE ( A_ , A_ ):
# save results
if os.path.exists(A_ ):
if os.path.exists(os.path.join(A_ , '''config.json''' ) ) and os.path.isfile(
os.path.join(A_ , '''config.json''' ) ):
os.remove(os.path.join(A_ , '''config.json''' ) )
if os.path.exists(os.path.join(A_ , '''pytorch_model.bin''' ) ) and os.path.isfile(
os.path.join(A_ , '''pytorch_model.bin''' ) ):
os.remove(os.path.join(A_ , '''pytorch_model.bin''' ) )
else:
os.makedirs(A_ )
model.save_pretrained(A_ )
def __SCREAMING_SNAKE_CASE ( A_ , A_=False ):
lowerCAmelCase__ : Optional[Any] = 2
if unlogit:
lowerCAmelCase__ : Union[str, Any] = torch.pow(A_ , A_ )
lowerCAmelCase__ : Optional[Any] = p * torch.log(A_ )
lowerCAmelCase__ : List[Any] = 0
return -plogp.sum(dim=-1 )
def __SCREAMING_SNAKE_CASE ( A_ ):
logger.info('''lv, h >\t''' + '''\t'''.join(f'{x + 1}' for x in range(len(A_ ) ) ) )
for row in range(len(A_ ) ):
if tensor.dtype != torch.long:
logger.info(f'layer {row + 1}:\t' + '''\t'''.join(f'{x:.5f}' for x in tensor[row].cpu().data ) )
else:
logger.info(f'layer {row + 1}:\t' + '''\t'''.join(f'{x:d}' for x in tensor[row].cpu().data ) )
def __SCREAMING_SNAKE_CASE ( A_ , A_ , A_ , A_=True , A_=True , A_=None , A_=False ):
lowerCAmelCase__ ,lowerCAmelCase__ : List[Any] = model.config.num_hidden_layers, model.config.num_attention_heads
lowerCAmelCase__ : Dict = torch.zeros(A_ , A_ ).to(args.device )
lowerCAmelCase__ : int = torch.zeros(A_ , A_ ).to(args.device )
if head_mask is None:
lowerCAmelCase__ : Union[str, Any] = torch.ones(A_ , A_ ).to(args.device )
head_mask.requires_grad_(requires_grad=A_ )
# If actually pruned attention multi-head, set head mask to None to avoid shape mismatch
if actually_pruned:
lowerCAmelCase__ : Union[str, Any] = None
lowerCAmelCase__ : Optional[int] = 0.0
lowerCAmelCase__ : Optional[int] = 0.0
for step, inputs in enumerate(tqdm(A_ , desc='''Iteration''' , disable=args.local_rank not in [-1, 0] ) ):
lowerCAmelCase__ : Any = tuple(t.to(args.device ) for t in inputs )
((lowerCAmelCase__) ,) : List[Any] = inputs
# Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below)
lowerCAmelCase__ : Any = model(A_ , labels=A_ , head_mask=A_ )
# (loss), lm_logits, presents, (all hidden_states), (attentions)
lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ : Dict = (
outputs[0],
outputs[1],
outputs[-1],
) # Loss and logits are the first, attention the last
loss.backward() # Backpropagate to populate the gradients in the head mask
total_loss += loss.detach().cpu().numpy()
if compute_entropy:
for layer, attn in enumerate(A_ ):
lowerCAmelCase__ : Dict = entropy(attn.detach() , A_ )
attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach()
if compute_importance:
head_importance += head_mask.grad.abs().detach()
tot_tokens += torch.ones_like(A_ ).float().detach().sum().data
# Normalize
attn_entropy /= tot_tokens
head_importance /= tot_tokens
# Layerwise importance normalization
if not args.dont_normalize_importance_by_layer:
lowerCAmelCase__ : Any = 2
lowerCAmelCase__ : Dict = torch.pow(torch.pow(A_ , A_ ).sum(-1 ) , 1 / exponent )
head_importance /= norm_by_layer.unsqueeze(-1 ) + 1e-2_0
if not args.dont_normalize_global_importance:
lowerCAmelCase__ : List[Any] = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min())
# Print matrices
if compute_entropy:
logger.info('''Attention entropies''' )
print_ad_tensor(A_ )
if compute_importance:
logger.info('''Head importance scores''' )
print_ad_tensor(A_ )
logger.info('''Head ranked by importance scores''' )
lowerCAmelCase__ : str = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device )
lowerCAmelCase__ : Optional[int] = torch.arange(
head_importance.numel() , device=args.device )
lowerCAmelCase__ : int = head_ranks.view_as(A_ )
print_ad_tensor(A_ )
return attn_entropy, head_importance, total_loss
def __SCREAMING_SNAKE_CASE ( A_ , A_ , A_ ):
lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ : List[str] = compute_heads_importance(A_ , A_ , A_ , compute_entropy=A_ )
lowerCAmelCase__ : Union[str, Any] = 1 / loss # instead of downsteam score use the LM loss
logger.info('''Pruning: original score: %f, threshold: %f''' , A_ , original_score * args.masking_threshold )
lowerCAmelCase__ : Union[str, Any] = torch.ones_like(A_ )
lowerCAmelCase__ : List[str] = max(1 , int(new_head_mask.numel() * args.masking_amount ) )
lowerCAmelCase__ : int = original_score
while current_score >= original_score * args.masking_threshold:
lowerCAmelCase__ : Union[str, Any] = new_head_mask.clone().detach() # save current head mask
# heads from least important to most - keep only not-masked heads
lowerCAmelCase__ : str = float('''Inf''' )
lowerCAmelCase__ : List[Any] = head_importance.view(-1 ).sort()[1]
if len(A_ ) <= num_to_mask:
print('''BREAK BY num_to_mask''' )
break
# mask heads
lowerCAmelCase__ : List[Any] = current_heads_to_mask[:num_to_mask]
logger.info('''Heads to mask: %s''' , str(current_heads_to_mask.tolist() ) )
lowerCAmelCase__ : int = new_head_mask.view(-1 )
lowerCAmelCase__ : Optional[int] = 0.0
lowerCAmelCase__ : Union[str, Any] = new_head_mask.view_as(A_ )
lowerCAmelCase__ : Tuple = new_head_mask.clone().detach()
print_ad_tensor(A_ )
# Compute metric and head importance again
lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ : List[Any] = compute_heads_importance(
A_ , A_ , A_ , compute_entropy=A_ , head_mask=A_ )
lowerCAmelCase__ : Tuple = 1 / loss
logger.info(
'''Masking: current score: %f, remaining heads %d (%.1f percents)''' , A_ , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 1_00 , )
logger.info('''Final head mask''' )
print_ad_tensor(A_ )
np.save(os.path.join(args.output_dir , '''head_mask.npy''' ) , head_mask.detach().cpu().numpy() )
return head_mask
def __SCREAMING_SNAKE_CASE ( A_ , A_ , A_ , A_ ):
lowerCAmelCase__ : Optional[Any] = datetime.now()
lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ : Optional[Any] = compute_heads_importance(
A_ , A_ , A_ , compute_entropy=A_ , compute_importance=A_ , head_mask=A_ )
lowerCAmelCase__ : Optional[Any] = 1 / loss
lowerCAmelCase__ : Tuple = datetime.now() - before_time
lowerCAmelCase__ : int = sum(p.numel() for p in model.parameters() )
lowerCAmelCase__ : List[Any] = {
layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(A_ ) )
}
for k, v in heads_to_prune.items():
if isinstance(A_ , A_ ):
lowerCAmelCase__ : int = [
v,
]
assert sum(len(A_ ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item()
model.prune_heads(A_ )
lowerCAmelCase__ : List[Any] = sum(p.numel() for p in model.parameters() )
lowerCAmelCase__ : Any = datetime.now()
lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ : int = compute_heads_importance(
A_ , A_ , A_ , compute_entropy=A_ , compute_importance=A_ , head_mask=A_ , actually_pruned=A_ , )
lowerCAmelCase__ : int = 1 / loss
lowerCAmelCase__ : Dict = datetime.now() - before_time
logger.info(
'''Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)''' , A_ , A_ , pruned_num_params / original_num_params * 1_00 , )
logger.info('''Pruning: score with masking: %f score with pruning: %f''' , A_ , A_ )
logger.info('''Pruning: speed ratio (original timing / new timing): %f percents''' , original_time / new_time * 1_00 )
save_model(A_ , args.output_dir )
def __SCREAMING_SNAKE_CASE ( ):
lowerCAmelCase__ : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--data_dir''' , default=A_ , type=A_ , required=A_ , help='''The input data dir. Should contain the .tsv files (or other data files) for the task.''' , )
parser.add_argument(
'''--model_name_or_path''' , default=A_ , type=A_ , required=A_ , help='''Path to pretrained model or model identifier from huggingface.co/models''' , )
parser.add_argument(
'''--output_dir''' , default=A_ , type=A_ , required=A_ , help='''The output directory where the model predictions and checkpoints will be written.''' , )
# Other parameters
parser.add_argument(
'''--config_name''' , default='''''' , type=A_ , help='''Pretrained config name or path if not the same as model_name_or_path''' , )
parser.add_argument(
'''--tokenizer_name''' , default='''''' , type=A_ , help='''Pretrained tokenizer name or path if not the same as model_name_or_path''' , )
parser.add_argument(
'''--cache_dir''' , default=A_ , type=A_ , help='''Where do you want to store the pre-trained models downloaded from s3''' , )
parser.add_argument(
'''--data_subset''' , type=A_ , default=-1 , help='''If > 0: limit the data to a subset of data_subset instances.''' )
parser.add_argument(
'''--overwrite_output_dir''' , action='''store_true''' , help='''Whether to overwrite data in output directory''' )
parser.add_argument(
'''--overwrite_cache''' , action='''store_true''' , help='''Overwrite the cached training and evaluation sets''' )
parser.add_argument(
'''--dont_normalize_importance_by_layer''' , action='''store_true''' , help='''Don\'t normalize importance score by layers''' )
parser.add_argument(
'''--dont_normalize_global_importance''' , action='''store_true''' , help='''Don\'t normalize all importance scores between 0 and 1''' , )
parser.add_argument(
'''--try_masking''' , action='''store_true''' , help='''Whether to try to mask head until a threshold of accuracy.''' )
parser.add_argument(
'''--masking_threshold''' , default=0.9 , type=A_ , help='''masking threshold in term of metrics (stop masking when metric < threshold * original metric value).''' , )
parser.add_argument(
'''--masking_amount''' , default=0.1 , type=A_ , help='''Amount to heads to masking at each masking step.''' )
parser.add_argument('''--metric_name''' , default='''acc''' , type=A_ , help='''Metric to use for head masking.''' )
parser.add_argument(
'''--max_seq_length''' , default=1_28 , type=A_ , help=(
'''The maximum total input sequence length after WordPiece tokenization. \n'''
'''Sequences longer than this will be truncated, sequences shorter padded.'''
) , )
parser.add_argument('''--batch_size''' , default=1 , type=A_ , help='''Batch size.''' )
parser.add_argument('''--seed''' , type=A_ , default=42 )
parser.add_argument('''--local_rank''' , type=A_ , default=-1 , help='''local_rank for distributed training on gpus''' )
parser.add_argument('''--no_cuda''' , action='''store_true''' , help='''Whether not to use CUDA when available''' )
parser.add_argument('''--server_ip''' , type=A_ , default='''''' , help='''Can be used for distant debugging.''' )
parser.add_argument('''--server_port''' , type=A_ , default='''''' , help='''Can be used for distant debugging.''' )
lowerCAmelCase__ : Optional[Any] = parser.parse_args()
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('''Waiting for debugger attach''' )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=A_ )
ptvsd.wait_for_attach()
# Setup devices and distributed training
if args.local_rank == -1 or args.no_cuda:
lowerCAmelCase__ : Union[str, Any] = torch.device('''cuda''' if torch.cuda.is_available() and not args.no_cuda else '''cpu''' )
lowerCAmelCase__ : str = 0 if args.no_cuda else torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank )
lowerCAmelCase__ : Dict = torch.device('''cuda''' , args.local_rank )
lowerCAmelCase__ : Union[str, Any] = 1
torch.distributed.init_process_group(backend='''nccl''' ) # Initializes the distributed backend
# Setup logging
logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN )
logger.info('''device: {} n_gpu: {}, distributed: {}'''.format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) )
lowerCAmelCase__ : List[str] = GPTaLMHeadModel.from_pretrained(args.model_name_or_path )
# Distributed and parallel training
model.to(args.device )
if args.local_rank != -1:
lowerCAmelCase__ : Dict = nn.parallel.DistributedDataParallel(
A_ , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=A_ )
elif args.n_gpu > 1:
lowerCAmelCase__ : List[Any] = nn.DataParallel(A_ )
# Print/save training arguments
os.makedirs(args.output_dir , exist_ok=A_ )
torch.save(A_ , os.path.join(args.output_dir , '''run_args.bin''' ) )
logger.info('''Training/evaluation parameters %s''' , A_ )
# Prepare dataset
lowerCAmelCase__ : str = np.concatenate(
[
np.loadtxt(args.data_dir , dtype=np.intaa ),
] )
lowerCAmelCase__ : Union[str, Any] = (torch.from_numpy(A_ ),)
lowerCAmelCase__ : Tuple = TensorDataset(*A_ )
lowerCAmelCase__ : Optional[int] = RandomSampler(A_ )
lowerCAmelCase__ : Dict = DataLoader(A_ , sampler=A_ , batch_size=args.batch_size )
# Compute head entropy and importance score
compute_heads_importance(A_ , A_ , A_ )
# Try head masking (set heads to zero until the score goes under a threshole)
# and head pruning (remove masked heads and see the effect on the network)
if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0:
lowerCAmelCase__ : Tuple = mask_heads(A_ , A_ , A_ )
prune_heads(A_ , A_ , A_ , A_ )
if __name__ == "__main__":
main()
| 106
| 0
|
'''simple docstring'''
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
UpperCAmelCase__ : Any = len(UpperCamelCase__ )
UpperCAmelCase__ : List[Any] = [[0] * n for i in range(UpperCamelCase__ )]
for i in range(UpperCamelCase__ ):
UpperCAmelCase__ : Dict = y_points[i]
for i in range(2 , UpperCamelCase__ ):
for j in range(UpperCamelCase__ , UpperCamelCase__ ):
UpperCAmelCase__ : int = (
(xa - x_points[j - i + 1]) * q[j][i - 1]
- (xa - x_points[j]) * q[j - 1][i - 1]
) / (x_points[j] - x_points[j - i + 1])
return [q[n - 1][n - 1], q]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 363
|
'''simple docstring'''
import unittest
from .lib import (
Matrix,
Vector,
axpy,
square_zero_matrix,
unit_basis_vector,
zero_vector,
)
class _snake_case ( unittest.TestCase ):
def snake_case__ ( self):
UpperCAmelCase__ : str = Vector([1, 2, 3])
self.assertEqual(x.component(0) , 1)
self.assertEqual(x.component(2) , 3)
UpperCAmelCase__ : List[str] = Vector()
def snake_case__ ( self):
UpperCAmelCase__ : Any = Vector([0, 0, 0, 0, 0, 1])
self.assertEqual(str(_lowerCamelCase) , """(0,0,0,0,0,1)""")
def snake_case__ ( self):
UpperCAmelCase__ : Optional[int] = Vector([1, 2, 3, 4])
self.assertEqual(len(_lowerCamelCase) , 4)
def snake_case__ ( self):
UpperCAmelCase__ : List[str] = Vector([1, 2])
UpperCAmelCase__ : Optional[int] = Vector([1, 2, 3, 4, 5])
UpperCAmelCase__ : Optional[Any] = Vector([0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
UpperCAmelCase__ : Union[str, Any] = Vector([1, -1, 1, -1, 2, -3, 4, -5])
self.assertAlmostEqual(x.euclidean_length() , 2.236 , 3)
self.assertAlmostEqual(y.euclidean_length() , 7.416 , 3)
self.assertEqual(z.euclidean_length() , 0)
self.assertAlmostEqual(w.euclidean_length() , 7.616 , 3)
def snake_case__ ( self):
UpperCAmelCase__ : int = Vector([1, 2, 3])
UpperCAmelCase__ : Optional[Any] = Vector([1, 1, 1])
self.assertEqual((x + y).component(0) , 2)
self.assertEqual((x + y).component(1) , 3)
self.assertEqual((x + y).component(2) , 4)
def snake_case__ ( self):
UpperCAmelCase__ : Optional[int] = Vector([1, 2, 3])
UpperCAmelCase__ : Dict = Vector([1, 1, 1])
self.assertEqual((x - y).component(0) , 0)
self.assertEqual((x - y).component(1) , 1)
self.assertEqual((x - y).component(2) , 2)
def snake_case__ ( self):
UpperCAmelCase__ : Tuple = Vector([1, 2, 3])
UpperCAmelCase__ : Optional[int] = Vector([2, -1, 4]) # for test of dot product
UpperCAmelCase__ : Any = Vector([1, -2, -1])
self.assertEqual(str(x * 3.0) , """(3.0,6.0,9.0)""")
self.assertEqual((a * b) , 0)
def snake_case__ ( self):
self.assertEqual(str(zero_vector(10)).count("""0""") , 10)
def snake_case__ ( self):
self.assertEqual(str(unit_basis_vector(3 , 1)) , """(0,1,0)""")
def snake_case__ ( self):
UpperCAmelCase__ : Any = Vector([1, 2, 3])
UpperCAmelCase__ : List[str] = Vector([1, 0, 1])
self.assertEqual(str(axpy(2 , _lowerCamelCase , _lowerCamelCase)) , """(3,4,7)""")
def snake_case__ ( self):
UpperCAmelCase__ : Optional[int] = Vector([1, 0, 0, 0, 0, 0])
UpperCAmelCase__ : Optional[int] = x.copy()
self.assertEqual(str(_lowerCamelCase) , str(_lowerCamelCase))
def snake_case__ ( self):
UpperCAmelCase__ : str = Vector([1, 0, 0])
x.change_component(0 , 0)
x.change_component(1 , 1)
self.assertEqual(str(_lowerCamelCase) , """(0,1,0)""")
def snake_case__ ( self):
UpperCAmelCase__ : str = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3)
self.assertEqual("""|1,2,3|\n|2,4,5|\n|6,7,8|\n""" , str(_lowerCamelCase))
def snake_case__ ( self):
UpperCAmelCase__ : List[str] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3)
UpperCAmelCase__ : Dict = [[-3, -14, -10], [-5, -10, -5], [-2, -1, 0]]
for x in range(a.height()):
for y in range(a.width()):
self.assertEqual(minors[x][y] , a.minor(_lowerCamelCase , _lowerCamelCase))
def snake_case__ ( self):
UpperCAmelCase__ : Any = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3)
UpperCAmelCase__ : Tuple = [[-3, 14, -10], [5, -10, 5], [-2, 1, 0]]
for x in range(a.height()):
for y in range(a.width()):
self.assertEqual(cofactors[x][y] , a.cofactor(_lowerCamelCase , _lowerCamelCase))
def snake_case__ ( self):
UpperCAmelCase__ : Any = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3)
self.assertEqual(-5 , a.determinant())
def snake_case__ ( self):
UpperCAmelCase__ : str = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]] , 3 , 3)
UpperCAmelCase__ : List[Any] = Vector([1, 2, 3])
self.assertEqual("""(14,32,50)""" , str(a * x))
self.assertEqual("""|2,4,6|\n|8,10,12|\n|14,16,18|\n""" , str(a * 2))
def snake_case__ ( self):
UpperCAmelCase__ : Union[str, Any] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3)
a.change_component(0 , 2 , 5)
self.assertEqual("""|1,2,5|\n|2,4,5|\n|6,7,8|\n""" , str(_lowerCamelCase))
def snake_case__ ( self):
UpperCAmelCase__ : Any = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3)
self.assertEqual(7 , a.component(2 , 1) , 0.01)
def snake_case__ ( self):
UpperCAmelCase__ : str = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3)
UpperCAmelCase__ : List[Any] = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3)
self.assertEqual("""|2,4,10|\n|4,8,10|\n|12,14,18|\n""" , str(a + b))
def snake_case__ ( self):
UpperCAmelCase__ : List[Any] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3)
UpperCAmelCase__ : Dict = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3)
self.assertEqual("""|0,0,-4|\n|0,0,0|\n|0,0,-2|\n""" , str(a - b))
def snake_case__ ( self):
self.assertEqual(
"""|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n""" , str(square_zero_matrix(5)) , )
if __name__ == "__main__":
unittest.main()
| 283
| 0
|
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
__lowerCAmelCase = [
'''python''',
'''tqdm''',
'''regex''',
'''requests''',
'''packaging''',
'''filelock''',
'''numpy''',
'''tokenizers''',
'''huggingface-hub''',
'''safetensors''',
'''accelerate''',
'''pyyaml''',
]
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
elif pkg == "accelerate":
# must be loaded here, or else tqdm check may fail
from .utils import is_accelerate_available
# Maybe switch to is_torch_available in the future here so that Accelerate is hard dep of
# Transformers with PyTorch
if not is_accelerate_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(F'''can\'t find {pkg} in {deps.keys()}, check dependency_versions_table.py''')
def snake_case_ ( snake_case , snake_case=None ) -> Union[str, Any]:
require_version(deps[pkg] , lowerCamelCase_ )
| 196
|
'''simple docstring'''
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import SegformerImageProcessor, SwinConfig, UperNetConfig, UperNetForSemanticSegmentation
def __A ( lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = 3_84
SCREAMING_SNAKE_CASE : Union[str, Any] = 7
if "tiny" in model_name:
SCREAMING_SNAKE_CASE : List[str] = 96
SCREAMING_SNAKE_CASE : List[str] = (2, 2, 6, 2)
SCREAMING_SNAKE_CASE : List[Any] = (3, 6, 12, 24)
elif "small" in model_name:
SCREAMING_SNAKE_CASE : Any = 96
SCREAMING_SNAKE_CASE : List[str] = (2, 2, 18, 2)
SCREAMING_SNAKE_CASE : int = (3, 6, 12, 24)
elif "base" in model_name:
SCREAMING_SNAKE_CASE : int = 1_28
SCREAMING_SNAKE_CASE : Any = (2, 2, 18, 2)
SCREAMING_SNAKE_CASE : int = (4, 8, 16, 32)
SCREAMING_SNAKE_CASE : Optional[Any] = 12
SCREAMING_SNAKE_CASE : str = 5_12
elif "large" in model_name:
SCREAMING_SNAKE_CASE : Tuple = 1_92
SCREAMING_SNAKE_CASE : Tuple = (2, 2, 18, 2)
SCREAMING_SNAKE_CASE : List[str] = (6, 12, 24, 48)
SCREAMING_SNAKE_CASE : Tuple = 12
SCREAMING_SNAKE_CASE : Union[str, Any] = 7_68
# set label information
SCREAMING_SNAKE_CASE : List[str] = 1_50
SCREAMING_SNAKE_CASE : Optional[Any] = """huggingface/label-files"""
SCREAMING_SNAKE_CASE : List[str] = """ade20k-id2label.json"""
SCREAMING_SNAKE_CASE : Optional[int] = json.load(open(hf_hub_download(lowerCamelCase_ , lowerCamelCase_ , repo_type="""dataset""" ) , """r""" ) )
SCREAMING_SNAKE_CASE : str = {int(lowerCamelCase_ ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE : int = {v: k for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE : Optional[Any] = SwinConfig(
embed_dim=lowerCamelCase_ , depths=lowerCamelCase_ , num_heads=lowerCamelCase_ , window_size=lowerCamelCase_ , out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] , )
SCREAMING_SNAKE_CASE : List[str] = UperNetConfig(
backbone_config=lowerCamelCase_ , auxiliary_in_channels=lowerCamelCase_ , num_labels=lowerCamelCase_ , idalabel=lowerCamelCase_ , labelaid=lowerCamelCase_ , )
return config
def __A ( lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = []
# fmt: off
# stem
rename_keys.append(("""backbone.patch_embed.projection.weight""", """backbone.embeddings.patch_embeddings.projection.weight""") )
rename_keys.append(("""backbone.patch_embed.projection.bias""", """backbone.embeddings.patch_embeddings.projection.bias""") )
rename_keys.append(("""backbone.patch_embed.norm.weight""", """backbone.embeddings.norm.weight""") )
rename_keys.append(("""backbone.patch_embed.norm.bias""", """backbone.embeddings.norm.bias""") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm1.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.weight''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm1.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.bias''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_bias_table''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_index''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm2.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.weight''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm2.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.bias''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.1.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.output.dense.weight''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.1.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.output.dense.bias''') )
if i < 3:
rename_keys.append((f'''backbone.stages.{i}.downsample.reduction.weight''', f'''backbone.encoder.layers.{i}.downsample.reduction.weight''') )
rename_keys.append((f'''backbone.stages.{i}.downsample.norm.weight''', f'''backbone.encoder.layers.{i}.downsample.norm.weight''') )
rename_keys.append((f'''backbone.stages.{i}.downsample.norm.bias''', f'''backbone.encoder.layers.{i}.downsample.norm.bias''') )
rename_keys.append((f'''backbone.norm{i}.weight''', f'''backbone.hidden_states_norms.stage{i+1}.weight''') )
rename_keys.append((f'''backbone.norm{i}.bias''', f'''backbone.hidden_states_norms.stage{i+1}.bias''') )
# decode head
rename_keys.extend(
[
("""decode_head.conv_seg.weight""", """decode_head.classifier.weight"""),
("""decode_head.conv_seg.bias""", """decode_head.classifier.bias"""),
("""auxiliary_head.conv_seg.weight""", """auxiliary_head.classifier.weight"""),
("""auxiliary_head.conv_seg.bias""", """auxiliary_head.classifier.bias"""),
] )
# fmt: on
return rename_keys
def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = dct.pop(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = val
def __A ( lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
SCREAMING_SNAKE_CASE : Dict = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
SCREAMING_SNAKE_CASE : Union[str, Any] = state_dict.pop(f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.weight''' )
SCREAMING_SNAKE_CASE : Union[str, Any] = state_dict.pop(f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE : int = in_proj_weight[:dim, :]
SCREAMING_SNAKE_CASE : Optional[int] = in_proj_bias[: dim]
SCREAMING_SNAKE_CASE : Union[str, Any] = in_proj_weight[
dim : dim * 2, :
]
SCREAMING_SNAKE_CASE : Any = in_proj_bias[
dim : dim * 2
]
SCREAMING_SNAKE_CASE : List[Any] = in_proj_weight[
-dim :, :
]
SCREAMING_SNAKE_CASE : str = in_proj_bias[-dim :]
# fmt: on
def __A ( lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Tuple = x.shape
SCREAMING_SNAKE_CASE : Any = x.reshape(lowerCamelCase_ , 4 , in_channel // 4 )
SCREAMING_SNAKE_CASE : Any = x[:, [0, 2, 1, 3], :].transpose(1 , 2 ).reshape(lowerCamelCase_ , lowerCamelCase_ )
return x
def __A ( lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Dict = x.shape
SCREAMING_SNAKE_CASE : Dict = x.reshape(lowerCamelCase_ , in_channel // 4 , 4 )
SCREAMING_SNAKE_CASE : str = x[:, :, [0, 2, 1, 3]].transpose(1 , 2 ).reshape(lowerCamelCase_ , lowerCamelCase_ )
return x
def __A ( lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = x.shape[0]
SCREAMING_SNAKE_CASE : List[str] = x.reshape(4 , in_channel // 4 )
SCREAMING_SNAKE_CASE : str = x[[0, 2, 1, 3], :].transpose(0 , 1 ).reshape(lowerCamelCase_ )
return x
def __A ( lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = x.shape[0]
SCREAMING_SNAKE_CASE : Optional[int] = x.reshape(in_channel // 4 , 4 )
SCREAMING_SNAKE_CASE : str = x[:, [0, 2, 1, 3]].transpose(0 , 1 ).reshape(lowerCamelCase_ )
return x
def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = {
"""upernet-swin-tiny""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210531_112542-e380ad3e.pth""",
"""upernet-swin-small""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210526_192015-ee2fff1c.pth""",
"""upernet-swin-base""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K_20210531_125459-429057bf.pth""",
"""upernet-swin-large""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k_20220318_091743-9ba68901.pth""",
}
SCREAMING_SNAKE_CASE : List[str] = model_name_to_url[model_name]
SCREAMING_SNAKE_CASE : Optional[int] = torch.hub.load_state_dict_from_url(lowerCamelCase_ , map_location="""cpu""" , file_name=lowerCamelCase_ )[
"""state_dict"""
]
for name, param in state_dict.items():
print(lowerCamelCase_ , param.shape )
SCREAMING_SNAKE_CASE : Dict = get_upernet_config(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = UperNetForSemanticSegmentation(lowerCamelCase_ )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
SCREAMING_SNAKE_CASE : Union[str, Any] = state_dict.pop(lowerCamelCase_ )
if "bn" in key:
SCREAMING_SNAKE_CASE : List[str] = key.replace("""bn""" , """batch_norm""" )
SCREAMING_SNAKE_CASE : Optional[Any] = val
# rename keys
SCREAMING_SNAKE_CASE : Union[str, Any] = create_rename_keys(lowerCamelCase_ )
for src, dest in rename_keys:
rename_key(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
read_in_q_k_v(lowerCamelCase_ , config.backbone_config )
# fix downsample parameters
for key, value in state_dict.items():
if "downsample" in key:
if "reduction" in key:
SCREAMING_SNAKE_CASE : Tuple = reverse_correct_unfold_reduction_order(lowerCamelCase_ )
if "norm" in key:
SCREAMING_SNAKE_CASE : Optional[int] = reverse_correct_unfold_norm_order(lowerCamelCase_ )
model.load_state_dict(lowerCamelCase_ )
# verify on image
SCREAMING_SNAKE_CASE : Optional[int] = """https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg"""
SCREAMING_SNAKE_CASE : Tuple = Image.open(requests.get(lowerCamelCase_ , stream=lowerCamelCase_ ).raw ).convert("""RGB""" )
SCREAMING_SNAKE_CASE : Optional[int] = SegformerImageProcessor()
SCREAMING_SNAKE_CASE : str = processor(lowerCamelCase_ , return_tensors="""pt""" ).pixel_values
with torch.no_grad():
SCREAMING_SNAKE_CASE : List[str] = model(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = outputs.logits
print(logits.shape )
print("""First values of logits:""" , logits[0, 0, :3, :3] )
# assert values
if model_name == "upernet-swin-tiny":
SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor(
[[-7.5_958, -7.5_958, -7.4_302], [-7.5_958, -7.5_958, -7.4_302], [-7.4_797, -7.4_797, -7.3_068]] )
elif model_name == "upernet-swin-small":
SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor(
[[-7.1_921, -7.1_921, -6.9_532], [-7.1_921, -7.1_921, -6.9_532], [-7.0_908, -7.0_908, -6.8_534]] )
elif model_name == "upernet-swin-base":
SCREAMING_SNAKE_CASE : str = torch.tensor(
[[-6.5_851, -6.5_851, -6.4_330], [-6.5_851, -6.5_851, -6.4_330], [-6.4_763, -6.4_763, -6.3_254]] )
elif model_name == "upernet-swin-large":
SCREAMING_SNAKE_CASE : str = torch.tensor(
[[-7.5_297, -7.5_297, -7.3_802], [-7.5_297, -7.5_297, -7.3_802], [-7.4_044, -7.4_044, -7.2_586]] )
print("""Logits:""" , outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] , lowerCamelCase_ , atol=1E-4 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowerCamelCase_ )
print(f'''Saving processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(lowerCamelCase_ )
if push_to_hub:
print(f'''Pushing model and processor for {model_name} to hub''' )
model.push_to_hub(f'''openmmlab/{model_name}''' )
processor.push_to_hub(f'''openmmlab/{model_name}''' )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""upernet-swin-tiny""",
type=str,
choices=[f'''upernet-swin-{size}''' for size in ["""tiny""", """small""", """base""", """large"""]],
help="""Name of the Swin + UperNet model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
__UpperCAmelCase = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 323
| 0
|
import importlib
import shutil
import threading
import warnings
from typing import List
import fsspec
import fsspec.asyn
from . import compression
from .hffilesystem import HfFileSystem
UpperCamelCase__ =importlib.util.find_spec('s3fs') is not None
if _has_safs:
from .safilesystem import SaFileSystem # noqa: F401
UpperCamelCase__ =[
compression.BzaFileSystem,
compression.GzipFileSystem,
compression.LzaFileSystem,
compression.XzFileSystem,
compression.ZstdFileSystem,
]
# Register custom filesystems
for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]:
if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class:
warnings.warn(f"A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.")
fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True)
def lowerCamelCase__ (__lowerCamelCase ):
if "://" in dataset_path:
_SCREAMING_SNAKE_CASE : Union[str, Any] = dataset_path.split("://" )[1]
return dataset_path
def lowerCamelCase__ (__lowerCamelCase ):
if fs is not None and fs.protocol != "file":
return True
else:
return False
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Any = not is_remote_filesystem(__lowerCamelCase )
if is_local:
# LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory
shutil.move(fs._strip_protocol(__lowerCamelCase ), fs._strip_protocol(__lowerCamelCase ) )
else:
fs.mv(__lowerCamelCase, __lowerCamelCase, recursive=__lowerCamelCase )
def lowerCamelCase__ ():
if hasattr(fsspec.asyn, "reset_lock" ):
# for future fsspec>2022.05.0
fsspec.asyn.reset_lock()
else:
_SCREAMING_SNAKE_CASE : List[str] = None
_SCREAMING_SNAKE_CASE : List[Any] = None
_SCREAMING_SNAKE_CASE : Union[str, Any] = threading.Lock()
| 357
|
import argparse
import glob
import importlib.util
import os
import re
import black
from doc_builder.style_doc import style_docstrings_in_code
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
UpperCamelCase__ ='src/diffusers'
UpperCamelCase__ ='.'
# This is to make sure the diffusers module imported is the one in the repo.
UpperCamelCase__ =importlib.util.spec_from_file_location(
'diffusers',
os.path.join(DIFFUSERS_PATH, '__init__.py'),
submodule_search_locations=[DIFFUSERS_PATH],
)
UpperCamelCase__ =spec.loader.load_module()
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase ):
return line.startswith(__lowerCamelCase ) or len(__lowerCamelCase ) <= 1 or re.search(R"^\s*\)(\s*->.*:|:)\s*$", __lowerCamelCase ) is not None
def lowerCamelCase__ (__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Any = object_name.split("." )
_SCREAMING_SNAKE_CASE : List[Any] = 0
# First let's find the module where our object lives.
_SCREAMING_SNAKE_CASE : Any = parts[i]
while i < len(__lowerCamelCase ) and not os.path.isfile(os.path.join(__lowerCamelCase, f"""{module}.py""" ) ):
i += 1
if i < len(__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Any = os.path.join(__lowerCamelCase, parts[i] )
if i >= len(__lowerCamelCase ):
raise ValueError(f"""`object_name` should begin with the name of a module of diffusers but got {object_name}.""" )
with open(os.path.join(__lowerCamelCase, f"""{module}.py""" ), "r", encoding="utf-8", newline="\n" ) as f:
_SCREAMING_SNAKE_CASE : int = f.readlines()
# Now let's find the class / func in the code!
_SCREAMING_SNAKE_CASE : Union[str, Any] = ""
_SCREAMING_SNAKE_CASE : Union[str, Any] = 0
for name in parts[i + 1 :]:
while (
line_index < len(__lowerCamelCase ) and re.search(Rf"""^{indent}(class|def)\s+{name}(\(|\:)""", lines[line_index] ) is None
):
line_index += 1
indent += " "
line_index += 1
if line_index >= len(__lowerCamelCase ):
raise ValueError(f""" {object_name} does not match any function or class in {module}.""" )
# We found the beginning of the class / func, now let's find the end (when the indent diminishes).
_SCREAMING_SNAKE_CASE : Optional[int] = line_index
while line_index < len(__lowerCamelCase ) and _should_continue(lines[line_index], __lowerCamelCase ):
line_index += 1
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
_SCREAMING_SNAKE_CASE : Optional[int] = lines[start_index:line_index]
return "".join(__lowerCamelCase )
UpperCamelCase__ =re.compile(R'^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)')
UpperCamelCase__ =re.compile(R'^\s*(\S+)->(\S+)(\s+.*|$)')
UpperCamelCase__ =re.compile(R'<FILL\s+[^>]*>')
def lowerCamelCase__ (__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = code.split("\n" )
_SCREAMING_SNAKE_CASE : Union[str, Any] = 0
while idx < len(__lowerCamelCase ) and len(lines[idx] ) == 0:
idx += 1
if idx < len(__lowerCamelCase ):
return re.search(R"^(\s*)\S", lines[idx] ).groups()[0]
return ""
def lowerCamelCase__ (__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Optional[int] = len(get_indent(__lowerCamelCase ) ) > 0
if has_indent:
_SCREAMING_SNAKE_CASE : Union[str, Any] = f"""class Bla:\n{code}"""
_SCREAMING_SNAKE_CASE : Any = black.Mode(target_versions={black.TargetVersion.PYaa}, line_length=119, preview=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[Any] = black.format_str(__lowerCamelCase, mode=__lowerCamelCase )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[Any] = style_docstrings_in_code(__lowerCamelCase )
return result[len("class Bla:\n" ) :] if has_indent else result
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase=False ):
with open(__lowerCamelCase, "r", encoding="utf-8", newline="\n" ) as f:
_SCREAMING_SNAKE_CASE : int = f.readlines()
_SCREAMING_SNAKE_CASE : Dict = []
_SCREAMING_SNAKE_CASE : Tuple = 0
# Not a for loop cause `lines` is going to change (if `overwrite=True`).
while line_index < len(__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = _re_copy_warning.search(lines[line_index] )
if search is None:
line_index += 1
continue
# There is some copied code here, let's retrieve the original.
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : str = search.groups()
_SCREAMING_SNAKE_CASE : Any = find_code_in_diffusers(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Union[str, Any] = get_indent(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Union[str, Any] = line_index + 1 if indent == theoretical_indent else line_index + 2
_SCREAMING_SNAKE_CASE : int = theoretical_indent
_SCREAMING_SNAKE_CASE : str = start_index
# Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment.
_SCREAMING_SNAKE_CASE : Any = True
while line_index < len(__lowerCamelCase ) and should_continue:
line_index += 1
if line_index >= len(__lowerCamelCase ):
break
_SCREAMING_SNAKE_CASE : Union[str, Any] = lines[line_index]
_SCREAMING_SNAKE_CASE : str = _should_continue(__lowerCamelCase, __lowerCamelCase ) and re.search(f"""^{indent}# End copy""", __lowerCamelCase ) is None
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
_SCREAMING_SNAKE_CASE : List[Any] = lines[start_index:line_index]
_SCREAMING_SNAKE_CASE : Optional[Any] = "".join(__lowerCamelCase )
# Remove any nested `Copied from` comments to avoid circular copies
_SCREAMING_SNAKE_CASE : Dict = [line for line in theoretical_code.split("\n" ) if _re_copy_warning.search(__lowerCamelCase ) is None]
_SCREAMING_SNAKE_CASE : str = "\n".join(__lowerCamelCase )
# Before comparing, use the `replace_pattern` on the original code.
if len(__lowerCamelCase ) > 0:
_SCREAMING_SNAKE_CASE : str = replace_pattern.replace("with", "" ).split("," )
_SCREAMING_SNAKE_CASE : Union[str, Any] = [_re_replace_pattern.search(__lowerCamelCase ) for p in patterns]
for pattern in patterns:
if pattern is None:
continue
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[str] = pattern.groups()
_SCREAMING_SNAKE_CASE : Tuple = re.sub(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
if option.strip() == "all-casing":
_SCREAMING_SNAKE_CASE : List[Any] = re.sub(obja.lower(), obja.lower(), __lowerCamelCase )
_SCREAMING_SNAKE_CASE : Any = re.sub(obja.upper(), obja.upper(), __lowerCamelCase )
# Blackify after replacement. To be able to do that, we need the header (class or function definition)
# from the previous line
_SCREAMING_SNAKE_CASE : int = blackify(lines[start_index - 1] + theoretical_code )
_SCREAMING_SNAKE_CASE : List[str] = theoretical_code[len(lines[start_index - 1] ) :]
# Test for a diff and act accordingly.
if observed_code != theoretical_code:
diffs.append([object_name, start_index] )
if overwrite:
_SCREAMING_SNAKE_CASE : Optional[int] = lines[:start_index] + [theoretical_code] + lines[line_index:]
_SCREAMING_SNAKE_CASE : int = start_index + 1
if overwrite and len(__lowerCamelCase ) > 0:
# Warn the user a file has been modified.
print(f"""Detected changes, rewriting {filename}.""" )
with open(__lowerCamelCase, "w", encoding="utf-8", newline="\n" ) as f:
f.writelines(__lowerCamelCase )
return diffs
def lowerCamelCase__ (__lowerCamelCase = False ):
_SCREAMING_SNAKE_CASE : int = glob.glob(os.path.join(__lowerCamelCase, "**/*.py" ), recursive=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Any = []
for filename in all_files:
_SCREAMING_SNAKE_CASE : int = is_copy_consistent(__lowerCamelCase, __lowerCamelCase )
diffs += [f"""- {filename}: copy does not match {d[0]} at line {d[1]}""" for d in new_diffs]
if not overwrite and len(__lowerCamelCase ) > 0:
_SCREAMING_SNAKE_CASE : Dict = "\n".join(__lowerCamelCase )
raise Exception(
"Found the following copy inconsistencies:\n"
+ diff
+ "\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them." )
if __name__ == "__main__":
UpperCamelCase__ =argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
UpperCamelCase__ =parser.parse_args()
check_copies(args.fix_and_overwrite)
| 325
| 0
|
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_torch_available
from transformers.testing_utils import require_torch, torch_device
if is_torch_available():
from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments
@require_torch
class __A( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ ):
for model_result in results.values():
for batch_size, sequence_length in zip(model_result["""bs"""] , model_result["""ss"""] ):
UpperCamelCase__ = model_result["""result"""][batch_size][sequence_length]
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ (self ):
UpperCamelCase__ = """sshleifer/tiny-gpt2"""
UpperCamelCase__ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=SCREAMING_SNAKE_CASE_ , inference=SCREAMING_SNAKE_CASE_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=SCREAMING_SNAKE_CASE_ , )
UpperCamelCase__ = PyTorchBenchmark(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCAmelCase_ (self ):
UpperCamelCase__ = """sgugger/tiny-distilbert-classification"""
UpperCamelCase__ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=SCREAMING_SNAKE_CASE_ , inference=SCREAMING_SNAKE_CASE_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=SCREAMING_SNAKE_CASE_ , only_pretrain_model=SCREAMING_SNAKE_CASE_ , )
UpperCamelCase__ = PyTorchBenchmark(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCAmelCase_ (self ):
UpperCamelCase__ = """sshleifer/tiny-gpt2"""
UpperCamelCase__ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=SCREAMING_SNAKE_CASE_ , inference=SCREAMING_SNAKE_CASE_ , torchscript=SCREAMING_SNAKE_CASE_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=SCREAMING_SNAKE_CASE_ , )
UpperCamelCase__ = PyTorchBenchmark(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(torch_device == """cpu""" , """Cant do half precision""" )
def UpperCAmelCase_ (self ):
UpperCamelCase__ = """sshleifer/tiny-gpt2"""
UpperCamelCase__ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=SCREAMING_SNAKE_CASE_ , inference=SCREAMING_SNAKE_CASE_ , fpaa=SCREAMING_SNAKE_CASE_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=SCREAMING_SNAKE_CASE_ , )
UpperCamelCase__ = PyTorchBenchmark(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCAmelCase_ (self ):
UpperCamelCase__ = """sshleifer/tiny-gpt2"""
UpperCamelCase__ = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE_ )
# set architectures equal to `None`
UpperCamelCase__ = None
UpperCamelCase__ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=SCREAMING_SNAKE_CASE_ , inference=SCREAMING_SNAKE_CASE_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=SCREAMING_SNAKE_CASE_ , )
UpperCamelCase__ = PyTorchBenchmark(SCREAMING_SNAKE_CASE_ , configs=[config] )
UpperCamelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCAmelCase_ (self ):
UpperCamelCase__ = """sshleifer/tiny-gpt2"""
UpperCamelCase__ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=SCREAMING_SNAKE_CASE_ , inference=SCREAMING_SNAKE_CASE_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=SCREAMING_SNAKE_CASE_ , )
UpperCamelCase__ = PyTorchBenchmark(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
@unittest.skipIf(torch_device == """cpu""" , """Can't do half precision""" )
def UpperCAmelCase_ (self ):
UpperCamelCase__ = """sshleifer/tiny-gpt2"""
UpperCamelCase__ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=SCREAMING_SNAKE_CASE_ , inference=SCREAMING_SNAKE_CASE_ , sequence_lengths=[8] , batch_sizes=[1] , fpaa=SCREAMING_SNAKE_CASE_ , multi_process=SCREAMING_SNAKE_CASE_ , )
UpperCamelCase__ = PyTorchBenchmark(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def UpperCAmelCase_ (self ):
UpperCamelCase__ = """sshleifer/tiny-gpt2"""
UpperCamelCase__ = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=SCREAMING_SNAKE_CASE_ , inference=SCREAMING_SNAKE_CASE_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=SCREAMING_SNAKE_CASE_ , )
UpperCamelCase__ = PyTorchBenchmark(SCREAMING_SNAKE_CASE_ , configs=[config] )
UpperCamelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCAmelCase_ (self ):
UpperCamelCase__ = """sshleifer/tinier_bart"""
UpperCamelCase__ = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=SCREAMING_SNAKE_CASE_ , inference=SCREAMING_SNAKE_CASE_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=SCREAMING_SNAKE_CASE_ , )
UpperCamelCase__ = PyTorchBenchmark(SCREAMING_SNAKE_CASE_ , configs=[config] )
UpperCamelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCAmelCase_ (self ):
UpperCamelCase__ = """sshleifer/tiny-gpt2"""
UpperCamelCase__ = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=SCREAMING_SNAKE_CASE_ , inference=SCREAMING_SNAKE_CASE_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=SCREAMING_SNAKE_CASE_ , )
UpperCamelCase__ = PyTorchBenchmark(SCREAMING_SNAKE_CASE_ , configs=[config] )
UpperCamelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def UpperCAmelCase_ (self ):
UpperCamelCase__ = """sshleifer/tinier_bart"""
UpperCamelCase__ = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=SCREAMING_SNAKE_CASE_ , inference=SCREAMING_SNAKE_CASE_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=SCREAMING_SNAKE_CASE_ , )
UpperCamelCase__ = PyTorchBenchmark(SCREAMING_SNAKE_CASE_ , configs=[config] )
UpperCamelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def UpperCAmelCase_ (self ):
UpperCamelCase__ = """sshleifer/tiny-gpt2"""
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCamelCase__ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=SCREAMING_SNAKE_CASE_ , inference=SCREAMING_SNAKE_CASE_ , save_to_csv=SCREAMING_SNAKE_CASE_ , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(SCREAMING_SNAKE_CASE_ , """inf_time.csv""" ) , train_memory_csv_file=os.path.join(SCREAMING_SNAKE_CASE_ , """train_mem.csv""" ) , inference_memory_csv_file=os.path.join(SCREAMING_SNAKE_CASE_ , """inf_mem.csv""" ) , train_time_csv_file=os.path.join(SCREAMING_SNAKE_CASE_ , """train_time.csv""" ) , env_info_csv_file=os.path.join(SCREAMING_SNAKE_CASE_ , """env.csv""" ) , multi_process=SCREAMING_SNAKE_CASE_ , )
UpperCamelCase__ = PyTorchBenchmark(SCREAMING_SNAKE_CASE_ )
benchmark.run()
self.assertTrue(Path(os.path.join(SCREAMING_SNAKE_CASE_ , """inf_time.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(SCREAMING_SNAKE_CASE_ , """train_time.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(SCREAMING_SNAKE_CASE_ , """inf_mem.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(SCREAMING_SNAKE_CASE_ , """train_mem.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(SCREAMING_SNAKE_CASE_ , """env.csv""" ) ).exists() )
def UpperCAmelCase_ (self ):
UpperCamelCase__ = """sshleifer/tiny-gpt2"""
def _check_summary_is_not_empty(SCREAMING_SNAKE_CASE_ ):
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , """sequential""" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , """cumulative""" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , """current""" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , """total""" ) )
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCamelCase__ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=SCREAMING_SNAKE_CASE_ , inference=SCREAMING_SNAKE_CASE_ , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(SCREAMING_SNAKE_CASE_ , """log.txt""" ) , log_print=SCREAMING_SNAKE_CASE_ , trace_memory_line_by_line=SCREAMING_SNAKE_CASE_ , multi_process=SCREAMING_SNAKE_CASE_ , )
UpperCamelCase__ = PyTorchBenchmark(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
_check_summary_is_not_empty(result.train_summary )
self.assertTrue(Path(os.path.join(SCREAMING_SNAKE_CASE_ , """log.txt""" ) ).exists() )
| 244
|
import torch
from diffusers import CMStochasticIterativeScheduler
from .test_schedulers import SchedulerCommonTest
class __A( __lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = (CMStochasticIterativeScheduler,)
SCREAMING_SNAKE_CASE__ = 10
def UpperCAmelCase_ (self , **SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = {
"""num_train_timesteps""": 2_01,
"""sigma_min""": 0.002,
"""sigma_max""": 80.0,
}
config.update(**SCREAMING_SNAKE_CASE_ )
return config
def UpperCAmelCase_ (self ):
UpperCamelCase__ = 10
UpperCamelCase__ = self.get_scheduler_config()
UpperCamelCase__ = self.scheduler_classes[0](**SCREAMING_SNAKE_CASE_ )
scheduler.set_timesteps(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = scheduler.timesteps[0]
UpperCamelCase__ = scheduler.timesteps[1]
UpperCamelCase__ = self.dummy_sample
UpperCamelCase__ = 0.1 * sample
UpperCamelCase__ = scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).prev_sample
UpperCamelCase__ = scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def UpperCAmelCase_ (self ):
for timesteps in [10, 50, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ (self ):
for clip_denoised in [True, False]:
self.check_over_configs(clip_denoised=SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ (self ):
UpperCamelCase__ = self.scheduler_classes[0]
UpperCamelCase__ = self.get_scheduler_config()
UpperCamelCase__ = scheduler_class(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = 1
scheduler.set_timesteps(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = scheduler.timesteps
UpperCamelCase__ = torch.manual_seed(0 )
UpperCamelCase__ = self.dummy_model()
UpperCamelCase__ = self.dummy_sample_deter * scheduler.init_noise_sigma
for i, t in enumerate(SCREAMING_SNAKE_CASE_ ):
# 1. scale model input
UpperCamelCase__ = scheduler.scale_model_input(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# 2. predict noise residual
UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# 3. predict previous sample x_t-1
UpperCamelCase__ = scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ ).prev_sample
UpperCamelCase__ = pred_prev_sample
UpperCamelCase__ = torch.sum(torch.abs(SCREAMING_SNAKE_CASE_ ) )
UpperCamelCase__ = torch.mean(torch.abs(SCREAMING_SNAKE_CASE_ ) )
assert abs(result_sum.item() - 192.7614 ) < 1E-2
assert abs(result_mean.item() - 0.2510 ) < 1E-3
def UpperCAmelCase_ (self ):
UpperCamelCase__ = self.scheduler_classes[0]
UpperCamelCase__ = self.get_scheduler_config()
UpperCamelCase__ = scheduler_class(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = [1_06, 0]
scheduler.set_timesteps(timesteps=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = scheduler.timesteps
UpperCamelCase__ = torch.manual_seed(0 )
UpperCamelCase__ = self.dummy_model()
UpperCamelCase__ = self.dummy_sample_deter * scheduler.init_noise_sigma
for t in timesteps:
# 1. scale model input
UpperCamelCase__ = scheduler.scale_model_input(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# 2. predict noise residual
UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# 3. predict previous sample x_t-1
UpperCamelCase__ = scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ ).prev_sample
UpperCamelCase__ = pred_prev_sample
UpperCamelCase__ = torch.sum(torch.abs(SCREAMING_SNAKE_CASE_ ) )
UpperCamelCase__ = torch.mean(torch.abs(SCREAMING_SNAKE_CASE_ ) )
assert abs(result_sum.item() - 347.6357 ) < 1E-2
assert abs(result_mean.item() - 0.4527 ) < 1E-3
def UpperCAmelCase_ (self ):
UpperCamelCase__ = self.scheduler_classes[0]
UpperCamelCase__ = self.get_scheduler_config()
UpperCamelCase__ = scheduler_class(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = [39, 30, 12, 15, 0]
with self.assertRaises(SCREAMING_SNAKE_CASE_ , msg="""`timesteps` must be in descending order.""" ):
scheduler.set_timesteps(timesteps=SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ (self ):
UpperCamelCase__ = self.scheduler_classes[0]
UpperCamelCase__ = self.get_scheduler_config()
UpperCamelCase__ = scheduler_class(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = [39, 30, 12, 1, 0]
UpperCamelCase__ = len(SCREAMING_SNAKE_CASE_ )
with self.assertRaises(SCREAMING_SNAKE_CASE_ , msg="""Can only pass one of `num_inference_steps` or `timesteps`.""" ):
scheduler.set_timesteps(num_inference_steps=SCREAMING_SNAKE_CASE_ , timesteps=SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ (self ):
UpperCamelCase__ = self.scheduler_classes[0]
UpperCamelCase__ = self.get_scheduler_config()
UpperCamelCase__ = scheduler_class(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = [scheduler.config.num_train_timesteps]
with self.assertRaises(
SCREAMING_SNAKE_CASE_ , msg="""`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}""" , ):
scheduler.set_timesteps(timesteps=SCREAMING_SNAKE_CASE_ )
| 244
| 1
|
import requests
def UpperCAmelCase_ ( __UpperCAmelCase : str , __UpperCAmelCase : str ) -> None:
SCREAMING_SNAKE_CASE_ = {'Content-Type': 'application/json'}
SCREAMING_SNAKE_CASE_ = requests.post(__UpperCAmelCase , json={'text': message_body} , headers=__UpperCAmelCase )
if response.status_code != 2_00:
SCREAMING_SNAKE_CASE_ = (
'Request to slack returned an error '
f"{response.status_code}, the response is:\n{response.text}"
)
raise ValueError(__UpperCAmelCase )
if __name__ == "__main__":
# Set the slack url to the one provided by Slack when you create the webhook at
# https://my.slack.com/services/new/incoming-webhook/
send_slack_message('<YOUR MESSAGE BODY>', '<SLACK CHANNEL URL>')
| 210
|
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import MaskaFormerConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel
if is_vision_available():
from transformers import MaskaFormerImageProcessor
if is_vision_available():
from PIL import Image
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self : Any , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : List[Any]=2 , _lowerCAmelCase : List[str]=True , _lowerCAmelCase : Any=False , _lowerCAmelCase : Tuple=10 , _lowerCAmelCase : Optional[int]=3 , _lowerCAmelCase : Dict=32 * 8 , _lowerCAmelCase : List[str]=32 * 8 , _lowerCAmelCase : List[Any]=4 , _lowerCAmelCase : Optional[Any]=64 , ):
SCREAMING_SNAKE_CASE_ = parent
SCREAMING_SNAKE_CASE_ = batch_size
SCREAMING_SNAKE_CASE_ = is_training
SCREAMING_SNAKE_CASE_ = use_auxiliary_loss
SCREAMING_SNAKE_CASE_ = num_queries
SCREAMING_SNAKE_CASE_ = num_channels
SCREAMING_SNAKE_CASE_ = min_size
SCREAMING_SNAKE_CASE_ = max_size
SCREAMING_SNAKE_CASE_ = num_labels
SCREAMING_SNAKE_CASE_ = hidden_dim
SCREAMING_SNAKE_CASE_ = hidden_dim
def lowerCAmelCase_ ( self : str ):
SCREAMING_SNAKE_CASE_ = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = torch.ones([self.batch_size, self.min_size, self.max_size] , device=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=_lowerCAmelCase ) > 0.5
).float()
SCREAMING_SNAKE_CASE_ = (torch.rand((self.batch_size, self.num_labels) , device=_lowerCAmelCase ) > 0.5).long()
SCREAMING_SNAKE_CASE_ = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def lowerCAmelCase_ ( self : List[Any] ):
SCREAMING_SNAKE_CASE_ = MaskaFormerConfig(
hidden_size=self.hidden_dim , )
SCREAMING_SNAKE_CASE_ = self.num_queries
SCREAMING_SNAKE_CASE_ = self.num_labels
SCREAMING_SNAKE_CASE_ = [1, 1, 1, 1]
SCREAMING_SNAKE_CASE_ = self.num_channels
SCREAMING_SNAKE_CASE_ = 64
SCREAMING_SNAKE_CASE_ = 128
SCREAMING_SNAKE_CASE_ = self.hidden_dim
SCREAMING_SNAKE_CASE_ = self.hidden_dim
SCREAMING_SNAKE_CASE_ = self.hidden_dim
return config
def lowerCAmelCase_ ( self : Optional[int] ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_ = {'pixel_values': pixel_values, 'pixel_mask': pixel_mask}
return config, inputs_dict
def lowerCAmelCase_ ( self : Optional[Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Tuple ):
SCREAMING_SNAKE_CASE_ = output.encoder_hidden_states
SCREAMING_SNAKE_CASE_ = output.pixel_decoder_hidden_states
SCREAMING_SNAKE_CASE_ = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(_lowerCAmelCase ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(_lowerCAmelCase ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(_lowerCAmelCase ) , config.decoder_layers )
def lowerCAmelCase_ ( self : Optional[Any] , _lowerCAmelCase : Tuple , _lowerCAmelCase : Tuple , _lowerCAmelCase : Any , _lowerCAmelCase : Any=False ):
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = MaskaFormerModel(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE_ = model(pixel_values=_lowerCAmelCase , pixel_mask=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = model(_lowerCAmelCase , output_hidden_states=_lowerCAmelCase )
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.hidden_dim) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(_lowerCAmelCase , _lowerCAmelCase )
def lowerCAmelCase_ ( self : str , _lowerCAmelCase : Dict , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Any , _lowerCAmelCase : str , _lowerCAmelCase : Any ):
SCREAMING_SNAKE_CASE_ = MaskaFormerForUniversalSegmentation(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
def comm_check_on_output(_lowerCAmelCase : List[str] ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = model(pixel_values=_lowerCAmelCase , pixel_mask=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = model(_lowerCAmelCase )
comm_check_on_output(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = model(
pixel_values=_lowerCAmelCase , pixel_mask=_lowerCAmelCase , mask_labels=_lowerCAmelCase , class_labels=_lowerCAmelCase )
comm_check_on_output(_lowerCAmelCase )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowercase_ = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else ()
lowercase_ = {"feature-extraction": MaskaFormerModel} if is_torch_available() else {}
lowercase_ = False
lowercase_ = False
lowercase_ = False
lowercase_ = False
def lowerCAmelCase_ ( self : List[Any] ):
SCREAMING_SNAKE_CASE_ = MaskaFormerModelTester(self )
SCREAMING_SNAKE_CASE_ = ConfigTester(self , config_class=_lowerCAmelCase , has_text_modality=_lowerCAmelCase )
def lowerCAmelCase_ ( self : Union[str, Any] ):
self.config_tester.run_common_tests()
def lowerCAmelCase_ ( self : Tuple ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(_lowerCAmelCase , **_lowerCAmelCase , output_hidden_states=_lowerCAmelCase )
def lowerCAmelCase_ ( self : Any ):
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*_lowerCAmelCase )
@unittest.skip(reason='Mask2Former does not use inputs_embeds' )
def lowerCAmelCase_ ( self : Optional[int] ):
pass
@unittest.skip(reason='Mask2Former does not have a get_input_embeddings method' )
def lowerCAmelCase_ ( self : Tuple ):
pass
@unittest.skip(reason='Mask2Former is not a generative model' )
def lowerCAmelCase_ ( self : List[Any] ):
pass
@unittest.skip(reason='Mask2Former does not use token embeddings' )
def lowerCAmelCase_ ( self : Tuple ):
pass
@require_torch_multi_gpu
@unittest.skip(
reason='Mask2Former has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`' )
def lowerCAmelCase_ ( self : Any ):
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def lowerCAmelCase_ ( self : int ):
pass
def lowerCAmelCase_ ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ = model_class(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE_ = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE_ = ['pixel_values']
self.assertListEqual(arg_names[:1] , _lowerCAmelCase )
@slow
def lowerCAmelCase_ ( self : Any ):
for model_name in ["facebook/mask2former-swin-small-coco-instance"]:
SCREAMING_SNAKE_CASE_ = MaskaFormerModel.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
def lowerCAmelCase_ ( self : List[Any] ):
SCREAMING_SNAKE_CASE_ = (self.model_tester.min_size,) * 2
SCREAMING_SNAKE_CASE_ = {
'pixel_values': torch.randn((2, 3, *size) , device=_lowerCAmelCase ),
'mask_labels': torch.randn((2, 10, *size) , device=_lowerCAmelCase ),
'class_labels': torch.zeros(2 , 10 , device=_lowerCAmelCase ).long(),
}
SCREAMING_SNAKE_CASE_ = self.model_tester.get_config()
SCREAMING_SNAKE_CASE_ = MaskaFormerForUniversalSegmentation(_lowerCAmelCase ).to(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = model(**_lowerCAmelCase )
self.assertTrue(outputs.loss is not None )
def lowerCAmelCase_ ( self : List[Any] ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(_lowerCAmelCase , **_lowerCAmelCase , output_hidden_states=_lowerCAmelCase )
def lowerCAmelCase_ ( self : str ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ = model_class(_lowerCAmelCase ).to(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = model(**_lowerCAmelCase , output_attentions=_lowerCAmelCase )
self.assertTrue(outputs.attentions is not None )
def lowerCAmelCase_ ( self : List[str] ):
if not self.model_tester.is_training:
return
SCREAMING_SNAKE_CASE_ = self.all_model_classes[1]
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_ = model_class(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.train()
SCREAMING_SNAKE_CASE_ = model(_lowerCAmelCase , mask_labels=_lowerCAmelCase , class_labels=_lowerCAmelCase ).loss
loss.backward()
def lowerCAmelCase_ ( self : str ):
SCREAMING_SNAKE_CASE_ = self.all_model_classes[1]
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = model_class(_lowerCAmelCase ).to(_lowerCAmelCase )
model.train()
SCREAMING_SNAKE_CASE_ = model(_lowerCAmelCase , mask_labels=_lowerCAmelCase , class_labels=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
SCREAMING_SNAKE_CASE_ = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
SCREAMING_SNAKE_CASE_ = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
SCREAMING_SNAKE_CASE_ = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=_lowerCAmelCase )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
lowerCamelCase__ : Tuple = 1E-4
def UpperCAmelCase_ ( ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_vision
@slow
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowerCAmelCase_ ( self : Optional[int] ):
return "facebook/mask2former-swin-small-coco-instance"
@cached_property
def lowerCAmelCase_ ( self : int ):
return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints ) if is_vision_available() else None
def lowerCAmelCase_ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE_ = MaskaFormerModel.from_pretrained(self.model_checkpoints ).to(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = self.default_image_processor
SCREAMING_SNAKE_CASE_ = prepare_img()
SCREAMING_SNAKE_CASE_ = image_processor(_lowerCAmelCase , return_tensors='pt' ).to(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(_lowerCAmelCase , (1, 3, 384, 384) )
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = model(**_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = torch.tensor(
[[-0.2790, -1.0717, -1.1668], [-0.5128, -0.3128, -0.4987], [-0.5832, 0.1971, -0.0197]] ).to(_lowerCAmelCase )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , _lowerCAmelCase , atol=_lowerCAmelCase ) )
SCREAMING_SNAKE_CASE_ = torch.tensor(
[[0.8973, 1.1847, 1.1776], [1.1934, 1.5040, 1.5128], [1.1153, 1.4486, 1.4951]] ).to(_lowerCAmelCase )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , _lowerCAmelCase , atol=_lowerCAmelCase ) )
SCREAMING_SNAKE_CASE_ = torch.tensor(
[[2.1152, 1.7000, -0.8603], [1.5808, 1.8004, -0.9353], [1.6043, 1.7495, -0.5999]] ).to(_lowerCAmelCase )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , _lowerCAmelCase , atol=_lowerCAmelCase ) )
def lowerCAmelCase_ ( self : List[Any] ):
SCREAMING_SNAKE_CASE_ = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(_lowerCAmelCase ).eval()
SCREAMING_SNAKE_CASE_ = self.default_image_processor
SCREAMING_SNAKE_CASE_ = prepare_img()
SCREAMING_SNAKE_CASE_ = image_processor(_lowerCAmelCase , return_tensors='pt' ).to(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(_lowerCAmelCase , (1, 3, 384, 384) )
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = model(**_lowerCAmelCase )
# masks_queries_logits
SCREAMING_SNAKE_CASE_ = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) )
SCREAMING_SNAKE_CASE_ = [
[-8.7839, -9.0056, -8.8121],
[-7.4104, -7.0313, -6.5401],
[-6.6105, -6.3427, -6.4675],
]
SCREAMING_SNAKE_CASE_ = torch.tensor(_lowerCAmelCase ).to(_lowerCAmelCase )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , _lowerCAmelCase , atol=_lowerCAmelCase ) )
# class_queries_logits
SCREAMING_SNAKE_CASE_ = outputs.class_queries_logits
self.assertEqual(class_queries_logits.shape , (1, model.config.num_queries, model.config.num_labels + 1) )
SCREAMING_SNAKE_CASE_ = torch.tensor(
[
[1.8324, -8.0835, -4.1922],
[0.8450, -9.0050, -3.6053],
[0.3045, -7.7293, -3.0275],
] ).to(_lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , _lowerCAmelCase , atol=_lowerCAmelCase ) )
def lowerCAmelCase_ ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE_ = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(_lowerCAmelCase ).eval()
SCREAMING_SNAKE_CASE_ = self.default_image_processor
SCREAMING_SNAKE_CASE_ = image_processor(
[np.zeros((3, 800, 1_333) ), np.zeros((3, 800, 1_333) )] , segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] , return_tensors='pt' , )
SCREAMING_SNAKE_CASE_ = inputs['pixel_values'].to(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = [el.to(_lowerCAmelCase ) for el in inputs['mask_labels']]
SCREAMING_SNAKE_CASE_ = [el.to(_lowerCAmelCase ) for el in inputs['class_labels']]
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = model(**_lowerCAmelCase )
self.assertTrue(outputs.loss is not None )
| 210
| 1
|
import logging
import os
from typing import List, TextIO, Union
from conllu import parse_incr
from utils_ner import InputExample, Split, TokenClassificationTask
lowerCAmelCase__ = logging.getLogger(__name__)
class a__ ( snake_case ):
"""simple docstring"""
def __init__( self , lowercase=-1 ) -> Optional[Any]:
'''simple docstring'''
A__ = label_idx
def UpperCamelCase ( self , lowercase , lowercase ) -> List[InputExample]:
'''simple docstring'''
if isinstance(lowercase , lowercase ):
A__ = mode.value
A__ = os.path.join(lowercase , F'{mode}.txt' )
A__ = 1
A__ = []
with open(lowercase , encoding="utf-8" ) as f:
A__ = []
A__ = []
for line in f:
if line.startswith("-DOCSTART-" ) or line == "" or line == "\n":
if words:
examples.append(InputExample(guid=F'{mode}-{guid_index}' , words=lowercase , labels=lowercase ) )
guid_index += 1
A__ = []
A__ = []
else:
A__ = line.split(" " )
words.append(splits[0] )
if len(lowercase ) > 1:
labels.append(splits[self.label_idx].replace("\n" , "" ) )
else:
# Examples could have no label for mode = "test"
labels.append("O" )
if words:
examples.append(InputExample(guid=F'{mode}-{guid_index}' , words=lowercase , labels=lowercase ) )
return examples
def UpperCamelCase ( self , lowercase , lowercase , lowercase ) -> Optional[Any]:
'''simple docstring'''
A__ = 0
for line in test_input_reader:
if line.startswith("-DOCSTART-" ) or line == "" or line == "\n":
writer.write(lowercase )
if not preds_list[example_id]:
example_id += 1
elif preds_list[example_id]:
A__ = line.split()[0] + " " + preds_list[example_id].pop(0 ) + "\n"
writer.write(lowercase )
else:
logger.warning("Maximum sequence length exceeded: No prediction for '%s'." , line.split()[0] )
def UpperCamelCase ( self , lowercase ) -> List[str]:
'''simple docstring'''
if path:
with open(lowercase , "r" ) as f:
A__ = f.read().splitlines()
if "O" not in labels:
A__ = ["O"] + labels
return labels
else:
return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"]
class a__ ( snake_case ):
"""simple docstring"""
def __init__( self ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(label_idx=-2 )
def UpperCamelCase ( self , lowercase ) -> List[str]:
'''simple docstring'''
if path:
with open(lowercase , "r" ) as f:
A__ = f.read().splitlines()
if "O" not in labels:
A__ = ["O"] + labels
return labels
else:
return [
"O",
"B-ADVP",
"B-INTJ",
"B-LST",
"B-PRT",
"B-NP",
"B-SBAR",
"B-VP",
"B-ADJP",
"B-CONJP",
"B-PP",
"I-ADVP",
"I-INTJ",
"I-LST",
"I-PRT",
"I-NP",
"I-SBAR",
"I-VP",
"I-ADJP",
"I-CONJP",
"I-PP",
]
class a__ ( snake_case ):
"""simple docstring"""
def UpperCamelCase ( self , lowercase , lowercase ) -> List[InputExample]:
'''simple docstring'''
if isinstance(lowercase , lowercase ):
A__ = mode.value
A__ = os.path.join(lowercase , F'{mode}.txt' )
A__ = 1
A__ = []
with open(lowercase , encoding="utf-8" ) as f:
for sentence in parse_incr(lowercase ):
A__ = []
A__ = []
for token in sentence:
words.append(token["form"] )
labels.append(token["upos"] )
assert len(lowercase ) == len(lowercase )
if words:
examples.append(InputExample(guid=F'{mode}-{guid_index}' , words=lowercase , labels=lowercase ) )
guid_index += 1
return examples
def UpperCamelCase ( self , lowercase , lowercase , lowercase ) -> List[Any]:
'''simple docstring'''
A__ = 0
for sentence in parse_incr(lowercase ):
A__ = preds_list[example_id]
A__ = ""
for token in sentence:
out += F'{token["form"]} ({token["upos"]}|{s_p.pop(0 )}) '
out += "\n"
writer.write(lowercase )
example_id += 1
def UpperCamelCase ( self , lowercase ) -> List[str]:
'''simple docstring'''
if path:
with open(lowercase , "r" ) as f:
return f.read().splitlines()
else:
return [
"ADJ",
"ADP",
"ADV",
"AUX",
"CCONJ",
"DET",
"INTJ",
"NOUN",
"NUM",
"PART",
"PRON",
"PROPN",
"PUNCT",
"SCONJ",
"SYM",
"VERB",
"X",
]
| 68
|
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModel, PreTrainedModel
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Optional[int] , SCREAMING_SNAKE_CASE_: int ) -> int:
'''simple docstring'''
A__ = nn.functional.normalize(SCREAMING_SNAKE_CASE_ )
A__ = nn.functional.normalize(SCREAMING_SNAKE_CASE_ )
return torch.mm(SCREAMING_SNAKE_CASE_ , normalized_text_embeds.t() )
class a__ ( snake_case ):
"""simple docstring"""
__lowerCamelCase = CLIPConfig
__lowerCamelCase = ['CLIPEncoderLayer']
def __init__( self , lowercase ) -> Optional[int]:
'''simple docstring'''
super().__init__(lowercase )
A__ = CLIPVisionModel(config.vision_config )
A__ = nn.Linear(config.vision_config.hidden_size , config.projection_dim , bias=lowercase )
A__ = nn.Parameter(torch.ones(17 , config.projection_dim ) , requires_grad=lowercase )
A__ = nn.Parameter(torch.ones(3 , config.projection_dim ) , requires_grad=lowercase )
A__ = nn.Parameter(torch.ones(17 ) , requires_grad=lowercase )
A__ = nn.Parameter(torch.ones(3 ) , requires_grad=lowercase )
@torch.no_grad()
def UpperCamelCase ( self , lowercase , lowercase ) -> Any:
'''simple docstring'''
A__ = self.vision_model(lowercase )[1] # pooled_output
A__ = self.visual_projection(lowercase )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
A__ = cosine_distance(lowercase , self.special_care_embeds ).cpu().float().numpy()
A__ = cosine_distance(lowercase , self.concept_embeds ).cpu().float().numpy()
A__ = []
A__ = image_embeds.shape[0]
for i in range(lowercase ):
A__ = {"special_scores": {}, "special_care": [], "concept_scores": {}, "bad_concepts": []}
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign images
A__ = 0.0
for concept_idx in range(len(special_cos_dist[0] ) ):
A__ = special_cos_dist[i][concept_idx]
A__ = self.special_care_embeds_weights[concept_idx].item()
A__ = round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["special_scores"][concept_idx] > 0:
result_img["special_care"].append({concept_idx, result_img["special_scores"][concept_idx]} )
A__ = 0.01
for concept_idx in range(len(cos_dist[0] ) ):
A__ = cos_dist[i][concept_idx]
A__ = self.concept_embeds_weights[concept_idx].item()
A__ = round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["concept_scores"][concept_idx] > 0:
result_img["bad_concepts"].append(lowercase )
result.append(lowercase )
A__ = [len(res["bad_concepts"] ) > 0 for res in result]
return images, has_nsfw_concepts
@torch.no_grad()
def UpperCamelCase ( self , lowercase , lowercase ) -> Any:
'''simple docstring'''
A__ = self.vision_model(lowercase )[1] # pooled_output
A__ = self.visual_projection(lowercase )
A__ = cosine_distance(lowercase , self.special_care_embeds )
A__ = cosine_distance(lowercase , self.concept_embeds )
# increase this value to create a stronger `nsfw` filter
# at the cost of increasing the possibility of filtering benign images
A__ = 0.0
A__ = special_cos_dist - self.special_care_embeds_weights + adjustment
# special_scores = special_scores.round(decimals=3)
A__ = torch.any(special_scores > 0 , dim=1 )
A__ = special_care * 0.01
A__ = special_adjustment.unsqueeze(1 ).expand(-1 , cos_dist.shape[1] )
A__ = (cos_dist - self.concept_embeds_weights) + special_adjustment
# concept_scores = concept_scores.round(decimals=3)
A__ = torch.any(concept_scores > 0 , dim=1 )
return images, has_nsfw_concepts
| 68
| 1
|
import warnings
from transformers import AutoTokenizer
from transformers.utils import is_torch_available
from transformers.utils.generic import ExplicitEnum
from ...processing_utils import ProcessorMixin
if is_torch_available():
import torch
class SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
"""simple docstring"""
A_ = 'char'
A_ = 'bpe'
A_ = 'wp'
__A = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE)
class SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
"""simple docstring"""
A_ = ['image_processor', 'char_tokenizer']
A_ = 'ViTImageProcessor'
A_ = 'MgpstrTokenizer'
def __init__( self: Optional[int] , __A: int=None , __A: Any=None , **__A: Optional[int] ) -> Tuple:
_A = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , __lowerCAmelCase , )
_A = kwargs.pop('''feature_extractor''' )
_A = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
_A = tokenizer
_A = AutoTokenizer.from_pretrained('''gpt2''' )
_A = AutoTokenizer.from_pretrained('''bert-base-uncased''' )
super().__init__(__lowerCAmelCase , __lowerCAmelCase )
def __call__( self: Union[str, Any] , __A: List[Any]=None , __A: int=None , __A: Union[str, Any]=None , **__A: Any ) -> Optional[Any]:
if images is None and text is None:
raise ValueError('''You need to specify either an `images` or `text` input to process.''' )
if images is not None:
_A = self.image_processor(__lowerCAmelCase , return_tensors=__lowerCAmelCase , **__lowerCAmelCase )
if text is not None:
_A = self.char_tokenizer(__lowerCAmelCase , return_tensors=__lowerCAmelCase , **__lowerCAmelCase )
if text is None:
return inputs
elif images is None:
return encodings
else:
_A = encodings['''input_ids''']
return inputs
def __A ( self: int , __A: Dict ) -> Optional[Any]:
_A ,_A ,_A = sequences
_A = char_preds.size(0 )
_A ,_A = self._decode_helper(__lowerCAmelCase , '''char''' )
_A ,_A = self._decode_helper(__lowerCAmelCase , '''bpe''' )
_A ,_A = self._decode_helper(__lowerCAmelCase , '''wp''' )
_A = []
_A = []
for i in range(__lowerCAmelCase ):
_A = [char_scores[i], bpe_scores[i], wp_scores[i]]
_A = [char_strs[i], bpe_strs[i], wp_strs[i]]
_A = scores.index(max(__lowerCAmelCase ) )
final_strs.append(strs[max_score_index] )
final_scores.append(scores[max_score_index] )
_A = {}
_A = final_strs
_A = final_scores
_A = char_strs
_A = bpe_strs
_A = wp_strs
return out
def __A ( self: str , __A: Tuple , __A: Optional[int] ) -> str:
if format == DecodeType.CHARACTER:
_A = self.char_decode
_A = 1
_A = '''[s]'''
elif format == DecodeType.BPE:
_A = self.bpe_decode
_A = 2
_A = '''#'''
elif format == DecodeType.WORDPIECE:
_A = self.wp_decode
_A = 1_02
_A = '''[SEP]'''
else:
raise ValueError(f"""Format {format} is not supported.""" )
_A ,_A = [], []
_A = pred_logits.size(0 )
_A = pred_logits.size(1 )
_A ,_A = pred_logits.topk(1 , dim=-1 , largest=__lowerCAmelCase , sorted=__lowerCAmelCase )
_A = preds_index.view(-1 , __lowerCAmelCase )[:, 1:]
_A = decoder(__lowerCAmelCase )
_A ,_A = torch.nn.functional.softmax(__lowerCAmelCase , dim=2 ).max(dim=2 )
_A = preds_max_prob[:, 1:]
for index in range(__lowerCAmelCase ):
_A = preds_str[index].find(__lowerCAmelCase )
_A = preds_str[index][:pred_eos]
_A = preds_index[index].cpu().tolist()
_A = pred_index.index(__lowerCAmelCase ) if eos_token in pred_index else -1
_A = preds_max_prob[index][: pred_eos_index + 1]
_A = pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0
dec_strs.append(__lowerCAmelCase )
conf_scores.append(__lowerCAmelCase )
return dec_strs, conf_scores
def __A ( self: Optional[Any] , __A: str ) -> Optional[int]:
_A = [seq.replace(''' ''' , '''''' ) for seq in self.char_tokenizer.batch_decode(__lowerCAmelCase )]
return decode_strs
def __A ( self: Optional[int] , __A: Dict ) -> List[str]:
return self.bpe_tokenizer.batch_decode(__lowerCAmelCase )
def __A ( self: List[str] , __A: Optional[int] ) -> Any:
_A = [seq.replace(''' ''' , '''''' ) for seq in self.wp_tokenizer.batch_decode(__lowerCAmelCase )]
return decode_strs
| 358
|
__A = [
999,
800,
799,
600,
599,
500,
400,
399,
377,
355,
333,
311,
288,
266,
244,
222,
200,
199,
177,
155,
133,
111,
88,
66,
44,
22,
0,
]
__A = [
999,
976,
952,
928,
905,
882,
858,
857,
810,
762,
715,
714,
572,
429,
428,
286,
285,
238,
190,
143,
142,
118,
95,
71,
47,
24,
0,
]
__A = [
999,
988,
977,
966,
955,
944,
933,
922,
911,
900,
899,
879,
859,
840,
820,
800,
799,
766,
733,
700,
699,
650,
600,
599,
500,
499,
400,
399,
350,
300,
299,
266,
233,
200,
199,
179,
159,
140,
120,
100,
99,
88,
77,
66,
55,
44,
33,
22,
11,
0,
]
__A = [
999,
995,
992,
989,
985,
981,
978,
975,
971,
967,
964,
961,
957,
956,
951,
947,
942,
937,
933,
928,
923,
919,
914,
913,
908,
903,
897,
892,
887,
881,
876,
871,
870,
864,
858,
852,
846,
840,
834,
828,
827,
820,
813,
806,
799,
792,
785,
784,
777,
770,
763,
756,
749,
742,
741,
733,
724,
716,
707,
699,
698,
688,
677,
666,
656,
655,
645,
634,
623,
613,
612,
598,
584,
570,
569,
555,
541,
527,
526,
505,
484,
483,
462,
440,
439,
396,
395,
352,
351,
308,
307,
264,
263,
220,
219,
176,
132,
88,
44,
0,
]
__A = [
999,
997,
995,
992,
990,
988,
986,
984,
981,
979,
977,
975,
972,
970,
968,
966,
964,
961,
959,
957,
956,
954,
951,
949,
946,
944,
941,
939,
936,
934,
931,
929,
926,
924,
921,
919,
916,
914,
913,
910,
907,
905,
902,
899,
896,
893,
891,
888,
885,
882,
879,
877,
874,
871,
870,
867,
864,
861,
858,
855,
852,
849,
846,
843,
840,
837,
834,
831,
828,
827,
824,
821,
817,
814,
811,
808,
804,
801,
798,
795,
791,
788,
785,
784,
780,
777,
774,
770,
766,
763,
760,
756,
752,
749,
746,
742,
741,
737,
733,
730,
726,
722,
718,
714,
710,
707,
703,
699,
698,
694,
690,
685,
681,
677,
673,
669,
664,
660,
656,
655,
650,
646,
641,
636,
632,
627,
622,
618,
613,
612,
607,
602,
596,
591,
586,
580,
575,
570,
569,
563,
557,
551,
545,
539,
533,
527,
526,
519,
512,
505,
498,
491,
484,
483,
474,
466,
457,
449,
440,
439,
428,
418,
407,
396,
395,
381,
366,
352,
351,
330,
308,
307,
286,
264,
263,
242,
220,
219,
176,
175,
132,
131,
88,
44,
0,
]
__A = [
999,
991,
982,
974,
966,
958,
950,
941,
933,
925,
916,
908,
900,
899,
874,
850,
825,
800,
799,
700,
600,
500,
400,
300,
200,
100,
0,
]
__A = [
999,
992,
985,
978,
971,
964,
957,
949,
942,
935,
928,
921,
914,
907,
900,
899,
879,
859,
840,
820,
800,
799,
766,
733,
700,
699,
650,
600,
599,
500,
499,
400,
399,
300,
299,
200,
199,
100,
99,
0,
]
__A = [
999,
996,
992,
989,
985,
982,
979,
975,
972,
968,
965,
961,
958,
955,
951,
948,
944,
941,
938,
934,
931,
927,
924,
920,
917,
914,
910,
907,
903,
900,
899,
891,
884,
876,
869,
861,
853,
846,
838,
830,
823,
815,
808,
800,
799,
788,
777,
766,
755,
744,
733,
722,
711,
700,
699,
688,
677,
666,
655,
644,
633,
622,
611,
600,
599,
585,
571,
557,
542,
528,
514,
500,
499,
485,
471,
457,
442,
428,
414,
400,
399,
379,
359,
340,
320,
300,
299,
279,
259,
240,
220,
200,
199,
166,
133,
100,
99,
66,
33,
0,
]
| 75
| 0
|
'''simple docstring'''
import argparse
import os
import torch
from transformers.utils import WEIGHTS_NAME
UpperCamelCase = ['''small''', '''medium''', '''large''']
UpperCamelCase = '''lm_head.decoder.weight'''
UpperCamelCase = '''lm_head.weight'''
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> Optional[int]:
A: Tuple = torch.load(__lowercase )
A: str = d.pop(__lowercase )
os.makedirs(__lowercase , exist_ok=__lowercase )
torch.save(__lowercase , os.path.join(__lowercase , __lowercase ) )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
parser.add_argument('''--dialogpt_path''', default='''.''', type=str)
UpperCamelCase = parser.parse_args()
for MODEL in DIALOGPT_MODELS:
UpperCamelCase = os.path.join(args.dialogpt_path, f'{MODEL}_ft.pkl')
UpperCamelCase = f'./DialoGPT-{MODEL}'
convert_dialogpt_checkpoint(
checkpoint_path,
pytorch_dump_folder_path,
)
| 319
|
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_beit import BeitImageProcessor
UpperCamelCase = logging.get_logger(__name__)
class lowerCAmelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
def __init__( self : Union[str, Any] , *SCREAMING_SNAKE_CASE_ : List[str] , **SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> None:
'''simple docstring'''
warnings.warn(
'''The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use BeitImageProcessor instead.''' , SCREAMING_SNAKE_CASE_ , )
super().__init__(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
| 319
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowercase = {'''configuration_ibert''': ['''IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''IBertConfig''', '''IBertOnnxConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
'''IBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''IBertForMaskedLM''',
'''IBertForMultipleChoice''',
'''IBertForQuestionAnswering''',
'''IBertForSequenceClassification''',
'''IBertForTokenClassification''',
'''IBertModel''',
'''IBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_ibert import IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, IBertConfig, IBertOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ibert import (
IBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
IBertForMaskedLM,
IBertForMultipleChoice,
IBertForQuestionAnswering,
IBertForSequenceClassification,
IBertForTokenClassification,
IBertModel,
IBertPreTrainedModel,
)
else:
import sys
__lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 85
|
"""simple docstring"""
import argparse
import json
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinConfig, SwinForImageClassification
def lowerCAmelCase (__UpperCamelCase : Tuple ):
"""simple docstring"""
__UpperCamelCase =SwinConfig()
__UpperCamelCase =swin_name.split('''_''' )
__UpperCamelCase =name_split[1]
__UpperCamelCase =int(name_split[4] )
__UpperCamelCase =int(name_split[3][-1] )
if model_size == "tiny":
__UpperCamelCase =9_6
__UpperCamelCase =(2, 2, 6, 2)
__UpperCamelCase =(3, 6, 1_2, 2_4)
elif model_size == "small":
__UpperCamelCase =9_6
__UpperCamelCase =(2, 2, 1_8, 2)
__UpperCamelCase =(3, 6, 1_2, 2_4)
elif model_size == "base":
__UpperCamelCase =1_2_8
__UpperCamelCase =(2, 2, 1_8, 2)
__UpperCamelCase =(4, 8, 1_6, 3_2)
else:
__UpperCamelCase =1_9_2
__UpperCamelCase =(2, 2, 1_8, 2)
__UpperCamelCase =(6, 1_2, 2_4, 4_8)
if "in22k" in swin_name:
__UpperCamelCase =2_1_8_4_1
else:
__UpperCamelCase =1_0_0_0
__UpperCamelCase ='''huggingface/label-files'''
__UpperCamelCase ='''imagenet-1k-id2label.json'''
__UpperCamelCase =json.load(open(hf_hub_download(__UpperCamelCase , __UpperCamelCase , repo_type='''dataset''' ) , '''r''' ) )
__UpperCamelCase ={int(__UpperCamelCase ): v for k, v in idalabel.items()}
__UpperCamelCase =idalabel
__UpperCamelCase ={v: k for k, v in idalabel.items()}
__UpperCamelCase =img_size
__UpperCamelCase =num_classes
__UpperCamelCase =embed_dim
__UpperCamelCase =depths
__UpperCamelCase =num_heads
__UpperCamelCase =window_size
return config
def lowerCAmelCase (__UpperCamelCase : Optional[int] ):
"""simple docstring"""
if "patch_embed.proj" in name:
__UpperCamelCase =name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
__UpperCamelCase =name.replace('''patch_embed.norm''' , '''embeddings.norm''' )
if "layers" in name:
__UpperCamelCase ='''encoder.''' + name
if "attn.proj" in name:
__UpperCamelCase =name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name:
__UpperCamelCase =name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
__UpperCamelCase =name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
__UpperCamelCase =name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
__UpperCamelCase =name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
__UpperCamelCase =name.replace('''mlp.fc2''' , '''output.dense''' )
if name == "norm.weight":
__UpperCamelCase ='''layernorm.weight'''
if name == "norm.bias":
__UpperCamelCase ='''layernorm.bias'''
if "head" in name:
__UpperCamelCase =name.replace('''head''' , '''classifier''' )
else:
__UpperCamelCase ='''swin.''' + name
return name
def lowerCAmelCase (__UpperCamelCase : Dict , __UpperCamelCase : Union[str, Any] ):
"""simple docstring"""
for key in orig_state_dict.copy().keys():
__UpperCamelCase =orig_state_dict.pop(__UpperCamelCase )
if "mask" in key:
continue
elif "qkv" in key:
__UpperCamelCase =key.split('''.''' )
__UpperCamelCase =int(key_split[1] )
__UpperCamelCase =int(key_split[3] )
__UpperCamelCase =model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
__UpperCamelCase =val[:dim, :]
__UpperCamelCase =val[
dim : dim * 2, :
]
__UpperCamelCase =val[-dim:, :]
else:
__UpperCamelCase =val[
:dim
]
__UpperCamelCase =val[
dim : dim * 2
]
__UpperCamelCase =val[
-dim:
]
else:
__UpperCamelCase =val
return orig_state_dict
def lowerCAmelCase (__UpperCamelCase : int , __UpperCamelCase : Any ):
"""simple docstring"""
__UpperCamelCase =timm.create_model(__UpperCamelCase , pretrained=__UpperCamelCase )
timm_model.eval()
__UpperCamelCase =get_swin_config(__UpperCamelCase )
__UpperCamelCase =SwinForImageClassification(__UpperCamelCase )
model.eval()
__UpperCamelCase =convert_state_dict(timm_model.state_dict() , __UpperCamelCase )
model.load_state_dict(__UpperCamelCase )
__UpperCamelCase ='''http://images.cocodataset.org/val2017/000000039769.jpg'''
__UpperCamelCase =AutoImageProcessor.from_pretrained('''microsoft/{}'''.format(swin_name.replace('''_''' , '''-''' ) ) )
__UpperCamelCase =Image.open(requests.get(__UpperCamelCase , stream=__UpperCamelCase ).raw )
__UpperCamelCase =image_processor(images=__UpperCamelCase , return_tensors='''pt''' )
__UpperCamelCase =timm_model(inputs['''pixel_values'''] )
__UpperCamelCase =model(**__UpperCamelCase ).logits
assert torch.allclose(__UpperCamelCase , __UpperCamelCase , atol=1E-3 )
print(F"""Saving model {swin_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(__UpperCamelCase )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(__UpperCamelCase )
if __name__ == "__main__":
__lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--swin_name''',
default='''swin_tiny_patch4_window7_224''',
type=str,
help='''Name of the Swin timm model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
__lowercase = parser.parse_args()
convert_swin_checkpoint(args.swin_name, args.pytorch_dump_folder_path)
| 85
| 1
|
"""simple docstring"""
import os
import re
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_UpperCamelCase : Any = logging.get_logger(__name__)
_UpperCamelCase : str = {'''vocab_file''': '''spiece.model'''}
_UpperCamelCase : Dict = {
'''vocab_file''': {
'''google/bigbird-roberta-base''': '''https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model''',
'''google/bigbird-roberta-large''': (
'''https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model'''
),
'''google/bigbird-base-trivia-itc''': (
'''https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model'''
),
}
}
_UpperCamelCase : Any = {
'''google/bigbird-roberta-base''': 4_0_9_6,
'''google/bigbird-roberta-large''': 4_0_9_6,
'''google/bigbird-base-trivia-itc''': 4_0_9_6,
}
class a ( lowerCAmelCase__ ):
UpperCAmelCase_ : Union[str, Any] =VOCAB_FILES_NAMES
UpperCAmelCase_ : Any =PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase_ : Tuple =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase_ : int =["""input_ids""", """attention_mask"""]
UpperCAmelCase_ : List[int] =[]
def __init__( self , _lowerCamelCase , _lowerCamelCase="<unk>" , _lowerCamelCase="<s>" , _lowerCamelCase="</s>" , _lowerCamelCase="<pad>" , _lowerCamelCase="[SEP]" , _lowerCamelCase="[MASK]" , _lowerCamelCase="[CLS]" , _lowerCamelCase = None , **_lowerCamelCase , ):
lowercase = AddedToken(lowercase_ , lstrip=lowercase_ , rstrip=lowercase_ ) if isinstance(lowercase_ , lowercase_ ) else bos_token
lowercase = AddedToken(lowercase_ , lstrip=lowercase_ , rstrip=lowercase_ ) if isinstance(lowercase_ , lowercase_ ) else eos_token
lowercase = AddedToken(lowercase_ , lstrip=lowercase_ , rstrip=lowercase_ ) if isinstance(lowercase_ , lowercase_ ) else unk_token
lowercase = AddedToken(lowercase_ , lstrip=lowercase_ , rstrip=lowercase_ ) if isinstance(lowercase_ , lowercase_ ) else pad_token
lowercase = AddedToken(lowercase_ , lstrip=lowercase_ , rstrip=lowercase_ ) if isinstance(lowercase_ , lowercase_ ) else cls_token
lowercase = AddedToken(lowercase_ , lstrip=lowercase_ , rstrip=lowercase_ ) if isinstance(lowercase_ , lowercase_ ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
lowercase = AddedToken(lowercase_ , lstrip=lowercase_ , rstrip=lowercase_ ) if isinstance(lowercase_ , lowercase_ ) else mask_token
lowercase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=lowercase_ , eos_token=lowercase_ , unk_token=lowercase_ , pad_token=lowercase_ , sep_token=lowercase_ , mask_token=lowercase_ , cls_token=lowercase_ , sp_model_kwargs=self.sp_model_kwargs , **lowercase_ , )
lowercase = vocab_file
lowercase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowercase_ )
@property
def UpperCamelCase_ ( self ):
return self.sp_model.get_piece_size()
def UpperCamelCase_ ( self ):
lowercase = {self.convert_ids_to_tokens(lowercase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
lowercase = self.__dict__.copy()
lowercase = None
return state
def __setstate__( self , _lowerCamelCase ):
lowercase = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
lowercase = {}
lowercase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCamelCase_ ( self , _lowerCamelCase ):
return self.sp_model.encode(lowercase_ , out_type=lowercase_ )
def UpperCamelCase_ ( self , _lowerCamelCase ):
return self.sp_model.piece_to_id(lowercase_ )
def UpperCamelCase_ ( self , _lowerCamelCase ):
lowercase = self.sp_model.IdToPiece(lowercase_ )
return token
def UpperCamelCase_ ( self , _lowerCamelCase ):
lowercase = []
lowercase = ''''''
lowercase = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(lowercase_ ) + token
lowercase = True
lowercase = []
else:
current_sub_tokens.append(lowercase_ )
lowercase = False
out_string += self.sp_model.decode(lowercase_ )
return out_string.strip()
def UpperCamelCase_ ( self , _lowerCamelCase , _lowerCamelCase = False , _lowerCamelCase = None , _lowerCamelCase = True , **_lowerCamelCase , ):
lowercase = kwargs.pop('use_source_tokenizer' , lowercase_ )
lowercase = self.convert_ids_to_tokens(lowercase_ , skip_special_tokens=lowercase_ )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
lowercase = []
lowercase = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(lowercase_ ) )
lowercase = []
sub_texts.append(lowercase_ )
else:
current_sub_text.append(lowercase_ )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(lowercase_ ) )
# Mimic the behavior of the Rust tokenizer:
# No space before [MASK] and [SEP]
if spaces_between_special_tokens:
lowercase = re.sub(R' (\[(MASK|SEP)\])' , R'\1' , ' '.join(lowercase_ ) )
else:
lowercase = ''''''.join(lowercase_ )
lowercase = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
lowercase = self.clean_up_tokenization(lowercase_ )
return clean_text
else:
return text
def UpperCamelCase_ ( self , _lowerCamelCase , _lowerCamelCase = None ):
if not os.path.isdir(lowercase_ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
lowercase = os.path.join(
lowercase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowercase_ )
elif not os.path.isfile(self.vocab_file ):
with open(lowercase_ , 'wb' ) as fi:
lowercase = self.sp_model.serialized_model_proto()
fi.write(lowercase_ )
return (out_vocab_file,)
def UpperCamelCase_ ( self , _lowerCamelCase , _lowerCamelCase = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowercase = [self.cls_token_id]
lowercase = [self.sep_token_id]
return cls + token_ids_a + sep + token_ids_a + sep
def UpperCamelCase_ ( self , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowercase_ , token_ids_a=lowercase_ , already_has_special_tokens=lowercase_ )
if token_ids_a is None:
return [1] + ([0] * len(lowercase_ )) + [1]
return [1] + ([0] * len(lowercase_ )) + [1] + ([0] * len(lowercase_ )) + [1]
def UpperCamelCase_ ( self , _lowerCamelCase , _lowerCamelCase = None ):
lowercase = [self.sep_token_id]
lowercase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
| 220
|
"""simple docstring"""
from functools import lru_cache
@lru_cache
def __lowercase ( _a ):
if num < 0:
raise ValueError('''Number should not be negative.''' )
return 1 if num in (0, 1) else num * factorial(num - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 264
| 0
|
"""simple docstring"""
import argparse
import os
import subprocess
from packaging.version import Version, parse
from accelerate.commands.config.config_args import default_config_file, load_config_from_file
SCREAMING_SNAKE_CASE__:Tuple = '''Run commands across TPU VMs for initial setup before running `accelerate launch`.'''
def _lowerCamelCase( a=None ):
if subparsers is not None:
__a = subparsers.add_parser("tpu-config" , description=_description )
else:
__a = argparse.ArgumentParser("Accelerate tpu-config command" , description=_description )
# Core arguments
__a = parser.add_argument_group(
"Config Arguments" , "Arguments that can be configured through `accelerate config`." )
config_args.add_argument(
"--config_file" , type=_UpperCAmelCase , default=_UpperCAmelCase , help="Path to the config file to use for accelerate." , )
config_args.add_argument(
"--tpu_name" , default=_UpperCAmelCase , help="The name of the TPU to use. If not specified, will use the TPU specified in the config file." , )
config_args.add_argument(
"--tpu_zone" , default=_UpperCAmelCase , help="The zone of the TPU to use. If not specified, will use the zone specified in the config file." , )
__a = parser.add_argument_group("TPU Arguments" , "Arguments for options ran inside the TPU." )
pod_args.add_argument(
"--use_alpha" , action="store_true" , help="Whether to use `gcloud alpha` when running the TPU training script instead of `gcloud`." , )
pod_args.add_argument(
"--command_file" , default=_UpperCAmelCase , help="The path to the file containing the commands to run on the pod on startup." , )
pod_args.add_argument(
"--command" , action="append" , nargs="+" , help="A command to run on the pod. Can be passed multiple times." , )
pod_args.add_argument(
"--install_accelerate" , action="store_true" , help="Whether to install accelerate on the pod. Defaults to False." , )
pod_args.add_argument(
"--accelerate_version" , default="latest" , help="The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify \'dev\' to install from GitHub." , )
pod_args.add_argument(
"--debug" , action="store_true" , help="If set, will print the command that would be run instead of running it." )
if subparsers is not None:
parser.set_defaults(func=_UpperCAmelCase )
return parser
def _lowerCamelCase( a ):
__a = None
# Get the default from the config file if it exists.
if args.config_file is not None or os.path.isfile(_UpperCAmelCase ):
__a = load_config_from_file(args.config_file )
if not args.command_file and defaults.command_file is not None and not args.command:
__a = defaults.command_file
if not args.command and defaults.commands is not None:
__a = defaults.commands
if not args.tpu_name:
__a = defaults.tpu_name
if not args.tpu_zone:
__a = defaults.tpu_zone
if args.accelerate_version == "dev":
__a = 'git+https://github.com/huggingface/accelerate.git'
elif args.accelerate_version == "latest":
__a = 'accelerate -U'
elif isinstance(parse(args.accelerate_version ) , _UpperCAmelCase ):
__a = F"accelerate=={args.accelerate_version}"
if not args.command_file and not args.command:
raise ValueError("You must specify either a command file or a command to run on the pod." )
if args.command_file:
with open(args.command_file , "r" ) as f:
__a = [f.read().splitlines()]
# To turn list of lists into list of strings
if isinstance(args.command[0] , _UpperCAmelCase ):
__a = [line for cmd in args.command for line in cmd]
# Default to the shared folder and install accelerate
__a = ['cd /usr/share']
if args.install_accelerate:
new_cmd += [F"pip install {args.accelerate_version}"]
new_cmd += args.command
__a = '; '.join(_UpperCAmelCase )
# Then send it to gcloud
# Eventually try to use google-api-core to do this instead of subprocess
__a = ['gcloud']
if args.use_alpha:
cmd += ["alpha"]
cmd += [
"compute",
"tpus",
"tpu-vm",
"ssh",
args.tpu_name,
"--zone",
args.tpu_zone,
"--command",
args.command,
"--worker",
"all",
]
if args.debug:
print(F"Running {' '.join(_UpperCAmelCase )}" )
return
subprocess.run(_UpperCAmelCase )
print("Successfully setup pod." )
def _lowerCamelCase( ):
__a = tpu_command_parser()
__a = parser.parse_args()
tpu_command_launcher(_UpperCAmelCase )
| 350
|
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
SCREAMING_SNAKE_CASE__:Tuple = logging.get_logger(__name__)
class snake_case__ ( snake_case_ ):
_snake_case : Optional[Any] = ["""pixel_values"""]
def __init__( self , lowerCamelCase = True , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = PILImageResampling.BILINEAR , lowerCamelCase = True , lowerCamelCase = 1 / 255 , lowerCamelCase = True , lowerCamelCase = None , lowerCamelCase = None , **lowerCamelCase , ):
super().__init__(**lowerCamelCase )
__a = size if size is not None else {"shortest_edge": 384}
__a = get_size_dict(lowerCamelCase , default_to_square=lowerCamelCase )
__a = do_resize
__a = size
# Default value set here for backwards compatibility where the value in config is None
__a = crop_pct if crop_pct is not None else 224 / 256
__a = resample
__a = do_rescale
__a = rescale_factor
__a = do_normalize
__a = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
__a = image_std if image_std is not None else IMAGENET_STANDARD_STD
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = PILImageResampling.BICUBIC , lowerCamelCase = None , **lowerCamelCase , ):
__a = get_size_dict(lowerCamelCase , default_to_square=lowerCamelCase )
if "shortest_edge" not in size:
raise ValueError(F"Size dictionary must contain 'shortest_edge' key. Got {size.keys()}" )
__a = size["shortest_edge"]
if shortest_edge < 384:
# maintain same ratio, resizing shortest edge to shortest_edge/crop_pct
__a = int(shortest_edge / crop_pct )
__a = get_resize_output_image_size(lowerCamelCase , size=lowerCamelCase , default_to_square=lowerCamelCase )
__a = resize(image=lowerCamelCase , size=lowerCamelCase , resample=lowerCamelCase , data_format=lowerCamelCase , **lowerCamelCase )
# then crop to (shortest_edge, shortest_edge)
return center_crop(image=lowerCamelCase , size=(shortest_edge, shortest_edge) , data_format=lowerCamelCase , **lowerCamelCase )
else:
# warping (no cropping) when evaluated at 384 or larger
return resize(
lowerCamelCase , size=(shortest_edge, shortest_edge) , resample=lowerCamelCase , data_format=lowerCamelCase , **lowerCamelCase )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , **lowerCamelCase , ):
return rescale(lowerCamelCase , scale=lowerCamelCase , data_format=lowerCamelCase , **lowerCamelCase )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , **lowerCamelCase , ):
return normalize(lowerCamelCase , mean=lowerCamelCase , std=lowerCamelCase , data_format=lowerCamelCase , **lowerCamelCase )
def a__ ( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = ChannelDimension.FIRST , **lowerCamelCase , ):
__a = do_resize if do_resize is not None else self.do_resize
__a = crop_pct if crop_pct is not None else self.crop_pct
__a = resample if resample is not None else self.resample
__a = do_rescale if do_rescale is not None else self.do_rescale
__a = rescale_factor if rescale_factor is not None else self.rescale_factor
__a = do_normalize if do_normalize is not None else self.do_normalize
__a = image_mean if image_mean is not None else self.image_mean
__a = image_std if image_std is not None else self.image_std
__a = size if size is not None else self.size
__a = get_size_dict(lowerCamelCase , default_to_square=lowerCamelCase )
__a = make_list_of_images(lowerCamelCase )
if not valid_images(lowerCamelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_resize and size["shortest_edge"] < 384 and crop_pct is None:
raise ValueError("crop_pct must be specified if size < 384." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
__a = [to_numpy_array(lowerCamelCase ) for image in images]
if do_resize:
__a = [self.resize(image=lowerCamelCase , size=lowerCamelCase , crop_pct=lowerCamelCase , resample=lowerCamelCase ) for image in images]
if do_rescale:
__a = [self.rescale(image=lowerCamelCase , scale=lowerCamelCase ) for image in images]
if do_normalize:
__a = [self.normalize(image=lowerCamelCase , mean=lowerCamelCase , std=lowerCamelCase ) for image in images]
__a = [to_channel_dimension_format(lowerCamelCase , lowerCamelCase ) for image in images]
__a = {"pixel_values": images}
return BatchFeature(data=lowerCamelCase , tensor_type=lowerCamelCase )
| 268
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__A : Tuple = {
'configuration_graphormer': ['GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GraphormerConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Tuple = [
'GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'GraphormerForGraphClassification',
'GraphormerModel',
'GraphormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_graphormer import GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, GraphormerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_graphormer import (
GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
GraphormerForGraphClassification,
GraphormerModel,
GraphormerPreTrainedModel,
)
else:
import sys
__A : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 138
|
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class __A( a ):
snake_case_ = ['''image_processor''', '''tokenizer''']
snake_case_ = '''ChineseCLIPImageProcessor'''
snake_case_ = ('''BertTokenizer''', '''BertTokenizerFast''')
def __init__( self , _snake_case=None , _snake_case=None , **_snake_case ) -> Tuple:
'''simple docstring'''
__a = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , _snake_case , )
__a = kwargs.pop('''feature_extractor''' )
__a = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(_snake_case , _snake_case )
__a = self.image_processor
def __call__( self , _snake_case=None , _snake_case=None , _snake_case=None , **_snake_case ) -> Optional[Any]:
'''simple docstring'''
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
__a = self.tokenizer(_snake_case , return_tensors=_snake_case , **_snake_case )
if images is not None:
__a = self.image_processor(_snake_case , return_tensors=_snake_case , **_snake_case )
if text is not None and images is not None:
__a = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**_snake_case ) , tensor_type=_snake_case )
def SCREAMING_SNAKE_CASE_ ( self , *_snake_case , **_snake_case ) -> str:
'''simple docstring'''
return self.tokenizer.batch_decode(*_snake_case , **_snake_case )
def SCREAMING_SNAKE_CASE_ ( self , *_snake_case , **_snake_case ) -> Dict:
'''simple docstring'''
return self.tokenizer.decode(*_snake_case , **_snake_case )
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> Any:
'''simple docstring'''
__a = self.tokenizer.model_input_names
__a = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[Any]:
'''simple docstring'''
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , _snake_case , )
return self.image_processor_class
| 6
| 0
|
from typing import List
from .keymap import KEYMAP, get_character
def A__ ( SCREAMING_SNAKE_CASE__) -> Any:
def decorator(SCREAMING_SNAKE_CASE__):
__snake_case: List[str] = getattr(SCREAMING_SNAKE_CASE__ , """handle_key""" , [])
handle += [key]
setattr(SCREAMING_SNAKE_CASE__ , """handle_key""" , SCREAMING_SNAKE_CASE__)
return func
return decorator
def A__ ( *SCREAMING_SNAKE_CASE__) -> Union[str, Any]:
def decorator(SCREAMING_SNAKE_CASE__):
__snake_case: Tuple = getattr(SCREAMING_SNAKE_CASE__ , """handle_key""" , [])
handle += keys
setattr(SCREAMING_SNAKE_CASE__ , """handle_key""" , SCREAMING_SNAKE_CASE__)
return func
return decorator
class __snake_case ( __lowerCamelCase ):
'''simple docstring'''
def __new__( cls : Any , A : Dict , A : List[str] , A : List[str] ):
__snake_case: List[str] = super().__new__(cls , A , A , A )
if not hasattr(A , """key_handler""" ):
setattr(A , """key_handler""" , {} )
setattr(A , """handle_input""" , KeyHandler.handle_input )
for value in attrs.values():
__snake_case: int = getattr(A , """handle_key""" , [] )
for key in handled_keys:
__snake_case: List[Any] = value
return new_cls
@staticmethod
def UpperCAmelCase__ ( cls : Optional[int] ):
__snake_case: List[Any] = get_character()
if char != KEYMAP["undefined"]:
__snake_case: str = ord(A )
__snake_case: List[str] = cls.key_handler.get(A )
if handler:
__snake_case: Any = char
return handler(cls )
else:
return None
def A__ ( cls) -> Tuple:
return KeyHandler(cls.__name__ , cls.__bases__ , cls.__dict__.copy())
| 293
|
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase : Optional[Any] = logging.get_logger(__name__)
__UpperCAmelCase : Union[str, Any] = {
"asapp/sew-d-tiny-100k": "https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json",
# See all SEW-D models at https://huggingface.co/models?filter=sew-d
}
class __snake_case ( __lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = """sew-d"""
def __init__( self : Dict , A : Any=32 , A : Dict=768 , A : Optional[Any]=12 , A : Union[str, Any]=12 , A : Union[str, Any]=3_072 , A : Optional[Any]=2 , A : Union[str, Any]=512 , A : List[Any]=256 , A : Dict=True , A : Union[str, Any]=True , A : Optional[int]=("p2c", "c2p") , A : str="layer_norm" , A : Dict="gelu_python" , A : Tuple=0.1 , A : Any=0.1 , A : Tuple=0.1 , A : Optional[int]=0.0 , A : Any=0.1 , A : Any=0.02 , A : Dict=1E-7 , A : str=1E-5 , A : int="group" , A : int="gelu" , A : str=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , A : Union[str, Any]=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , A : List[Any]=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , A : Optional[int]=False , A : int=128 , A : int=16 , A : Optional[Any]=True , A : List[Any]=0.05 , A : Any=10 , A : Dict=2 , A : List[Any]=0.0 , A : Union[str, Any]=10 , A : int=0 , A : List[Any]="mean" , A : Union[str, Any]=False , A : Any=False , A : Optional[int]=256 , A : List[Any]=0 , A : Any=1 , A : List[Any]=2 , **A : List[Any] , ):
super().__init__(**A , pad_token_id=A , bos_token_id=A , eos_token_id=A )
__snake_case: Optional[int] = hidden_size
__snake_case: str = feat_extract_norm
__snake_case: int = feat_extract_activation
__snake_case: str = list(A )
__snake_case: Any = list(A )
__snake_case: str = list(A )
__snake_case: Union[str, Any] = conv_bias
__snake_case: int = num_conv_pos_embeddings
__snake_case: str = num_conv_pos_embedding_groups
__snake_case: List[Any] = len(self.conv_dim )
__snake_case: List[str] = num_hidden_layers
__snake_case: Union[str, Any] = intermediate_size
__snake_case: Dict = squeeze_factor
__snake_case: List[Any] = max_position_embeddings
__snake_case: List[Any] = position_buckets
__snake_case: List[str] = share_att_key
__snake_case: int = relative_attention
__snake_case: Union[str, Any] = norm_rel_ebd
__snake_case: List[str] = list(A )
__snake_case: Tuple = hidden_act
__snake_case: List[Any] = num_attention_heads
__snake_case: str = hidden_dropout
__snake_case: int = attention_dropout
__snake_case: Dict = activation_dropout
__snake_case: Any = feat_proj_dropout
__snake_case: int = final_dropout
__snake_case: List[Any] = layer_norm_eps
__snake_case: List[str] = feature_layer_norm_eps
__snake_case: List[Any] = initializer_range
__snake_case: List[Any] = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect."""
"""It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,"""
f'''but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)'''
f'''= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__snake_case: List[Any] = apply_spec_augment
__snake_case: List[Any] = mask_time_prob
__snake_case: str = mask_time_length
__snake_case: List[str] = mask_time_min_masks
__snake_case: str = mask_feature_prob
__snake_case: Optional[int] = mask_feature_length
__snake_case: Dict = mask_feature_min_masks
# ctc loss
__snake_case: Any = ctc_loss_reduction
__snake_case: str = ctc_zero_infinity
# sequence classification
__snake_case: Optional[Any] = use_weighted_layer_sum
__snake_case: List[Any] = classifier_proj_size
@property
def UpperCAmelCase__ ( self : int ):
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 293
| 1
|
"""simple docstring"""
def lowerCamelCase ( _UpperCamelCase : str ) -> bool:
'''simple docstring'''
if not all(x.isalpha() for x in string ):
raise ValueError("""String must only contain alphabetic characters.""" )
__UpperCAmelCase : Optional[int] = sorted(string.lower() )
return len(_UpperCamelCase ) == len(set(_UpperCamelCase ) )
if __name__ == "__main__":
UpperCAmelCase : Optional[Any] = input('Enter a string ').strip()
UpperCAmelCase : Optional[Any] = is_isogram(input_str)
print(F"{input_str} is {'an' if isogram else 'not an'} isogram.")
| 115
|
"""simple docstring"""
def lowerCamelCase ( _UpperCamelCase : int = 4_0_0_0_0_0_0 ) -> int:
'''simple docstring'''
__UpperCAmelCase : int = [0, 1]
__UpperCAmelCase : Optional[Any] = 0
while fib[i] <= n:
fib.append(fib[i] + fib[i + 1] )
if fib[i + 2] > n:
break
i += 1
__UpperCAmelCase : str = 0
for j in range(len(_UpperCamelCase ) - 1 ):
if fib[j] % 2 == 0:
total += fib[j]
return total
if __name__ == "__main__":
print(F"{solution() = }")
| 115
| 1
|
"""simple docstring"""
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModel, PreTrainedModel
from ...utils import logging
SCREAMING_SNAKE_CASE_ : Optional[int] = logging.get_logger(__name__)
def _snake_case ( UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Tuple ):
A__ = nn.functional.normalize(UpperCAmelCase_ )
A__ = nn.functional.normalize(UpperCAmelCase_ )
return torch.mm(UpperCAmelCase_ , normalized_text_embeds.t() )
class a ( _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase = CLIPConfig
UpperCAmelCase = ["CLIPEncoderLayer"]
def __init__( self: str , UpperCamelCase: CLIPConfig ):
"""simple docstring"""
super().__init__(UpperCamelCase )
A__ = CLIPVisionModel(config.vision_config )
A__ = nn.Linear(config.vision_config.hidden_size , config.projection_dim , bias=UpperCamelCase )
A__ = nn.Parameter(torch.ones(17 , config.projection_dim ) , requires_grad=UpperCamelCase )
A__ = nn.Parameter(torch.ones(3 , config.projection_dim ) , requires_grad=UpperCamelCase )
A__ = nn.Parameter(torch.ones(17 ) , requires_grad=UpperCamelCase )
A__ = nn.Parameter(torch.ones(3 ) , requires_grad=UpperCamelCase )
@torch.no_grad()
def UpperCamelCase ( self: Tuple , UpperCamelCase: List[str] , UpperCamelCase: Dict ):
"""simple docstring"""
A__ = self.vision_model(UpperCamelCase )[1] # pooled_output
A__ = self.visual_projection(UpperCamelCase )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
A__ = cosine_distance(UpperCamelCase , self.special_care_embeds ).cpu().float().numpy()
A__ = cosine_distance(UpperCamelCase , self.concept_embeds ).cpu().float().numpy()
A__ = []
A__ = image_embeds.shape[0]
for i in range(UpperCamelCase ):
A__ = {"""special_scores""": {}, """special_care""": [], """concept_scores""": {}, """bad_concepts""": []}
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign images
A__ = 0.0
for concept_idx in range(len(special_cos_dist[0] ) ):
A__ = special_cos_dist[i][concept_idx]
A__ = self.special_care_embeds_weights[concept_idx].item()
A__ = round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["special_scores"][concept_idx] > 0:
result_img["special_care"].append({concept_idx, result_img["""special_scores"""][concept_idx]} )
A__ = 0.01
for concept_idx in range(len(cos_dist[0] ) ):
A__ = cos_dist[i][concept_idx]
A__ = self.concept_embeds_weights[concept_idx].item()
A__ = round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["concept_scores"][concept_idx] > 0:
result_img["bad_concepts"].append(UpperCamelCase )
result.append(UpperCamelCase )
A__ = [len(res["""bad_concepts"""] ) > 0 for res in result]
return images, has_nsfw_concepts
@torch.no_grad()
def UpperCamelCase ( self: int , UpperCamelCase: torch.FloatTensor , UpperCamelCase: torch.FloatTensor ):
"""simple docstring"""
A__ = self.vision_model(UpperCamelCase )[1] # pooled_output
A__ = self.visual_projection(UpperCamelCase )
A__ = cosine_distance(UpperCamelCase , self.special_care_embeds )
A__ = cosine_distance(UpperCamelCase , self.concept_embeds )
# increase this value to create a stronger `nsfw` filter
# at the cost of increasing the possibility of filtering benign images
A__ = 0.0
A__ = special_cos_dist - self.special_care_embeds_weights + adjustment
# special_scores = special_scores.round(decimals=3)
A__ = torch.any(special_scores > 0 , dim=1 )
A__ = special_care * 0.01
A__ = special_adjustment.unsqueeze(1 ).expand(-1 , cos_dist.shape[1] )
A__ = (cos_dist - self.concept_embeds_weights) + special_adjustment
# concept_scores = concept_scores.round(decimals=3)
A__ = torch.any(concept_scores > 0 , dim=1 )
return images, has_nsfw_concepts
| 69
|
"""simple docstring"""
import sys
from collections import defaultdict
class a :
"""simple docstring"""
def __init__( self: Union[str, Any] ):
"""simple docstring"""
A__ = []
def UpperCamelCase ( self: List[str] , UpperCamelCase: int ):
"""simple docstring"""
return self.node_position[vertex]
def UpperCamelCase ( self: Union[str, Any] , UpperCamelCase: Optional[Any] , UpperCamelCase: str ):
"""simple docstring"""
A__ = pos
def UpperCamelCase ( self: Dict , UpperCamelCase: List[Any] , UpperCamelCase: Any , UpperCamelCase: str , UpperCamelCase: List[str] ):
"""simple docstring"""
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
A__ = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
A__ = 2 * start + 1
else:
A__ = 2 * start + 2
if heap[smallest_child] < heap[start]:
A__ , A__ = heap[smallest_child], positions[smallest_child]
A__ , A__ = (
heap[start],
positions[start],
)
A__ , A__ = temp, tempa
A__ = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] , self.get_position(positions[start] ) )
self.set_position(positions[start] , UpperCamelCase )
self.top_to_bottom(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
def UpperCamelCase ( self: Tuple , UpperCamelCase: Tuple , UpperCamelCase: Any , UpperCamelCase: Optional[Any] , UpperCamelCase: Any ):
"""simple docstring"""
A__ = position[index]
while index != 0:
A__ = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
A__ = heap[parent]
A__ = position[parent]
self.set_position(position[parent] , UpperCamelCase )
else:
A__ = val
A__ = temp
self.set_position(UpperCamelCase , UpperCamelCase )
break
A__ = parent
else:
A__ = val
A__ = temp
self.set_position(UpperCamelCase , 0 )
def UpperCamelCase ( self: Union[str, Any] , UpperCamelCase: List[str] , UpperCamelCase: Optional[int] ):
"""simple docstring"""
A__ = len(UpperCamelCase ) // 2 - 1
for i in range(UpperCamelCase , -1 , -1 ):
self.top_to_bottom(UpperCamelCase , UpperCamelCase , len(UpperCamelCase ) , UpperCamelCase )
def UpperCamelCase ( self: Optional[int] , UpperCamelCase: str , UpperCamelCase: List[str] ):
"""simple docstring"""
A__ = positions[0]
A__ = sys.maxsize
self.top_to_bottom(UpperCamelCase , 0 , len(UpperCamelCase ) , UpperCamelCase )
return temp
def _snake_case ( UpperCAmelCase_ : Union[str, Any] ):
A__ = Heap()
A__ = [0] * len(UpperCAmelCase_ )
A__ = [-1] * len(UpperCAmelCase_ ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
A__ = [] # Heap of Distance of vertices from their neighboring vertex
A__ = []
for vertex in range(len(UpperCAmelCase_ ) ):
distance_tv.append(sys.maxsize )
positions.append(UpperCAmelCase_ )
heap.node_position.append(UpperCAmelCase_ )
A__ = []
A__ = 1
A__ = sys.maxsize
for neighbor, distance in adjacency_list[0]:
A__ = 0
A__ = distance
heap.heapify(UpperCAmelCase_ , UpperCAmelCase_ )
for _ in range(1 , len(UpperCAmelCase_ ) ):
A__ = heap.delete_minimum(UpperCAmelCase_ , UpperCAmelCase_ )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
A__ = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(UpperCAmelCase_ )]
):
A__ = distance
heap.bottom_to_top(
UpperCAmelCase_ , heap.get_position(UpperCAmelCase_ ) , UpperCAmelCase_ , UpperCAmelCase_ )
A__ = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
SCREAMING_SNAKE_CASE_ : int = int(input('Enter number of edges: ').strip())
SCREAMING_SNAKE_CASE_ : str = defaultdict(list)
for _ in range(edges_number):
SCREAMING_SNAKE_CASE_ : Optional[int] = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 69
| 1
|
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Value
from .base import TaskTemplate
@dataclass(frozen=a )
class __snake_case ( a ):
# `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization
UpperCAmelCase__ : str = field(default='''text-classification''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
UpperCAmelCase__ : ClassVar[Features] = Features({'''text''': Value('''string''' )} )
UpperCAmelCase__ : ClassVar[Features] = Features({'''labels''': ClassLabel} )
UpperCAmelCase__ : str = "text"
UpperCAmelCase__ : str = "labels"
def lowerCamelCase ( self : Optional[int] , _snake_case : List[str]):
"""simple docstring"""
if self.label_column not in features:
raise ValueError(F"""Column {self.label_column} is not present in features.""")
if not isinstance(features[self.label_column] , _snake_case):
raise ValueError(F"""Column {self.label_column} is not a ClassLabel.""")
UpperCAmelCase_ = copy.deepcopy(self)
UpperCAmelCase_ = self.label_schema.copy()
UpperCAmelCase_ = features[self.label_column]
UpperCAmelCase_ = label_schema
return task_template
@property
def lowerCamelCase ( self : Dict):
"""simple docstring"""
return {
self.text_column: "text",
self.label_column: "labels",
}
| 51
|
from typing import Any
import numpy as np
def UpperCamelCase ( __lowercase : np.ndarray ):
'''simple docstring'''
return np.array_equal(__lowercase ,matrix.conjugate().T )
def UpperCamelCase ( __lowercase : np.ndarray ,__lowercase : np.ndarray ):
'''simple docstring'''
A_ : Union[str, Any] = v.conjugate().T
A_ : Dict = v_star.dot(__lowercase )
assert isinstance(__lowercase ,np.ndarray )
return (v_star_dot.dot(__lowercase )) / (v_star.dot(__lowercase ))
def UpperCamelCase ( ):
'''simple docstring'''
A_ : Dict = np.array([[2, 2 + 1j, 4], [2 - 1j, 3, 1j], [4, -1j, 1]] )
A_ : List[Any] = np.array([[1], [2], [3]] )
assert is_hermitian(__lowercase ), f'''{a} is not hermitian.'''
print(rayleigh_quotient(__lowercase ,__lowercase ) )
A_ : List[str] = np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] )
assert is_hermitian(__lowercase ), f'''{a} is not hermitian.'''
assert rayleigh_quotient(__lowercase ,__lowercase ) == float(3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
tests()
| 140
| 0
|
from pathlib import Path
import cva
import numpy as np
from matplotlib import pyplot as plt
def lowerCamelCase__ ( a , a , a , a , a ) -> np.ndarray:
_A: Dict = cva.getAffineTransform(a , a )
return cva.warpAffine(a , a , (rows, cols) )
if __name__ == "__main__":
# read original image
UpperCAmelCase__ : Dict = cva.imread(
str(Path(__file__).resolve().parent.parent / 'image_data' / 'lena.jpg')
)
# turn image in gray scale value
UpperCAmelCase__ : List[str] = cva.cvtColor(image, cva.COLOR_BGR2GRAY)
# get image shape
UpperCAmelCase__ ,UpperCAmelCase__ : Any = gray_img.shape
# set different points to rotate image
UpperCAmelCase__ : List[str] = np.array([[50, 50], [200, 50], [50, 200]], np.floataa)
UpperCAmelCase__ : int = np.array([[10, 100], [200, 50], [100, 250]], np.floataa)
UpperCAmelCase__ : Optional[Any] = np.array([[50, 50], [150, 50], [120, 200]], np.floataa)
UpperCAmelCase__ : Union[str, Any] = np.array([[10, 100], [80, 50], [180, 250]], np.floataa)
# add all rotated images in a list
UpperCAmelCase__ : Union[str, Any] = [
gray_img,
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
]
# plot different image rotations
UpperCAmelCase__ : Optional[int] = plt.figure(1)
UpperCAmelCase__ : Optional[Any] = ['Original', 'Rotation 1', 'Rotation 2', 'Rotation 3']
for i, image in enumerate(images):
plt.subplot(2, 2, i + 1), plt.imshow(image, 'gray')
plt.title(titles[i])
plt.axis('off')
plt.subplots_adjust(left=0.0, bottom=0.05, right=1.0, top=0.95)
plt.show()
| 301
|
def lowerCamelCase__ ( a = 10 ) -> str:
if not isinstance(a , a ) or n < 0:
raise ValueError('''Invalid input''' )
_A: int = 10**n
_A: List[Any] = 2_84_33 * (pow(2 , 7_83_04_57 , a )) + 1
return str(number % modulus )
if __name__ == "__main__":
from doctest import testmod
testmod()
print(F"""{solution(10) = }""")
| 301
| 1
|
from __future__ import annotations
def __lowerCAmelCase ( a__ , a__ ) -> list[int]:
__a = 0
__a = len(a__ ) - 1
while i < j:
if nums[i] + nums[j] == target:
return [i, j]
elif nums[i] + nums[j] < target:
__a = i + 1
else:
__a = j - 1
return []
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F"{two_pointer([2, 7, 1_1, 1_5], 9) = }")
| 6
|
def _A ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str ):
UpperCamelCase :Any = len(SCREAMING_SNAKE_CASE__ )
UpperCamelCase :str = len(SCREAMING_SNAKE_CASE__ )
UpperCamelCase :int = [[False for _ in range(m + 1 )] for _ in range(n + 1 )]
UpperCamelCase :List[str] = True
for i in range(SCREAMING_SNAKE_CASE__ ):
for j in range(m + 1 ):
if dp[i][j]:
if j < m and a[i].upper() == b[j]:
UpperCamelCase :List[Any] = True
if a[i].islower():
UpperCamelCase :List[Any] = True
return dp[n][m]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 259
| 0
|
"""simple docstring"""
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
a_ = logging.get_logger('transformers.models.speecht5')
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Dict:
hf_model.apply_weight_norm()
a__: int = checkpoint['input_conv.weight_g']
a__: Optional[Any] = checkpoint['input_conv.weight_v']
a__: str = checkpoint['input_conv.bias']
for i in range(len(config.upsample_rates ) ):
a__: Dict = checkpoint[F'upsamples.{i}.1.weight_g']
a__: List[str] = checkpoint[F'upsamples.{i}.1.weight_v']
a__: Union[str, Any] = checkpoint[F'upsamples.{i}.1.bias']
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
a__: List[Any] = checkpoint[F'blocks.{i}.convs1.{j}.1.weight_g']
a__: Tuple = checkpoint[F'blocks.{i}.convs1.{j}.1.weight_v']
a__: Optional[Any] = checkpoint[F'blocks.{i}.convs1.{j}.1.bias']
a__: Optional[Any] = checkpoint[F'blocks.{i}.convs2.{j}.1.weight_g']
a__: Optional[Any] = checkpoint[F'blocks.{i}.convs2.{j}.1.weight_v']
a__: Optional[Any] = checkpoint[F'blocks.{i}.convs2.{j}.1.bias']
a__: Any = checkpoint['output_conv.1.weight_g']
a__: Tuple = checkpoint['output_conv.1.weight_v']
a__: Optional[int] = checkpoint['output_conv.1.bias']
hf_model.remove_weight_norm()
@torch.no_grad()
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , ) ->Dict:
if config_path is not None:
a__: Dict = SpeechTaHifiGanConfig.from_pretrained(a__ )
else:
a__: Union[str, Any] = SpeechTaHifiGanConfig()
a__: Tuple = SpeechTaHifiGan(a__ )
a__: int = torch.load(a__ )
load_weights(orig_checkpoint['model']['generator'] , a__ , a__ )
a__: Any = np.load(a__ )
a__: int = stats[0].reshape(-1 )
a__: Dict = stats[1].reshape(-1 )
a__: List[str] = torch.from_numpy(a__ ).float()
a__: Optional[Any] = torch.from_numpy(a__ ).float()
model.save_pretrained(a__ )
if repo_id:
print('Pushing to the hub...' )
model.push_to_hub(a__ )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument('--checkpoint_path', required=True, default=None, type=str, help='Path to original checkpoint')
parser.add_argument('--stats_path', required=True, default=None, type=str, help='Path to stats.npy file')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--pytorch_dump_folder_path', required=True, default=None, type=str, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--push_to_hub', default=None, type=str, help='Where to upload the converted model on the 🤗 hub.'
)
a_ = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 369
|
"""simple docstring"""
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import VideoMAEConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEModel,
)
from transformers.models.videomae.modeling_videomae import VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class __snake_case :
def __init__( self , lowercase , lowercase=13 , lowercase=10 , lowercase=3 , lowercase=2 , lowercase=2 , lowercase=2 , lowercase=True , lowercase=True , lowercase=32 , lowercase=5 , lowercase=4 , lowercase=37 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=10 , lowercase=0.02 , lowercase=0.9 , lowercase=None , ) -> Optional[Any]:
'''simple docstring'''
a__: int = parent
a__: int = batch_size
a__: int = image_size
a__: Optional[int] = num_channels
a__: List[str] = patch_size
a__: List[str] = tubelet_size
a__: Any = num_frames
a__: Any = is_training
a__: Dict = use_labels
a__: Optional[Any] = hidden_size
a__: Optional[int] = num_hidden_layers
a__: Optional[Any] = num_attention_heads
a__: Optional[Any] = intermediate_size
a__: Any = hidden_act
a__: Dict = hidden_dropout_prob
a__: Union[str, Any] = attention_probs_dropout_prob
a__: List[Any] = type_sequence_label_size
a__: Optional[Any] = initializer_range
a__: List[str] = mask_ratio
a__: Union[str, Any] = scope
# in VideoMAE, the number of tokens equals num_frames/tubelet_size * num_patches per frame
a__: Dict = (image_size // patch_size) ** 2
a__: Tuple = (num_frames // tubelet_size) * self.num_patches_per_frame
# use this variable to define bool_masked_pos
a__: Tuple = int(mask_ratio * self.seq_length)
def lowerCamelCase_ ( self) -> Optional[Any]:
'''simple docstring'''
a__: List[Any] = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size])
a__: Any = None
if self.use_labels:
a__: Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size)
a__: Optional[int] = self.get_config()
return config, pixel_values, labels
def lowerCamelCase_ ( self) -> Union[str, Any]:
'''simple docstring'''
return VideoMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , tubelet_size=self.tubelet_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowercase , initializer_range=self.initializer_range , )
def lowerCamelCase_ ( self , lowercase , lowercase , lowercase) -> str:
'''simple docstring'''
a__: Any = VideoMAEModel(config=lowercase)
model.to(lowercase)
model.eval()
a__: Optional[Any] = model(lowercase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def lowerCamelCase_ ( self , lowercase , lowercase , lowercase) -> int:
'''simple docstring'''
a__: List[str] = VideoMAEForPreTraining(lowercase)
model.to(lowercase)
model.eval()
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
a__: int = torch.ones((self.num_masks,))
a__: Any = torch.cat([mask, torch.zeros(self.seq_length - mask.size(0))])
a__: int = mask.expand(self.batch_size , -1).bool()
a__: Union[str, Any] = model(lowercase , lowercase)
# model only returns predictions for masked patches
a__: List[str] = mask.sum().item()
a__: str = 3 * self.tubelet_size * self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_masked_patches, decoder_num_labels))
def lowerCamelCase_ ( self) -> int:
'''simple docstring'''
a__: Dict = self.prepare_config_and_inputs()
a__ , a__ , a__: Dict = config_and_inputs
a__: Any = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class __snake_case ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ):
a__ = (
(VideoMAEModel, VideoMAEForPreTraining, VideoMAEForVideoClassification) if is_torch_available() else ()
)
a__ = (
{"""feature-extraction""": VideoMAEModel, """video-classification""": VideoMAEForVideoClassification}
if is_torch_available()
else {}
)
a__ = False
a__ = False
a__ = False
a__ = False
def lowerCamelCase_ ( self) -> Tuple:
'''simple docstring'''
a__: List[str] = VideoMAEModelTester(self)
a__: str = ConfigTester(self , config_class=lowercase , has_text_modality=lowercase , hidden_size=37)
def lowerCamelCase_ ( self , lowercase , lowercase , lowercase=False) -> Any:
'''simple docstring'''
a__: Optional[int] = copy.deepcopy(lowercase)
if model_class == VideoMAEForPreTraining:
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
a__: List[Any] = torch.ones((self.model_tester.num_masks,))
a__: List[Any] = torch.cat([mask, torch.zeros(self.model_tester.seq_length - mask.size(0))])
a__: Optional[int] = mask.expand(self.model_tester.batch_size , -1).bool()
a__: Union[str, Any] = bool_masked_pos.to(lowercase)
if return_labels:
if model_class in [
*get_values(lowercase),
]:
a__: str = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowercase)
return inputs_dict
def lowerCamelCase_ ( self) -> Optional[int]:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='VideoMAE does not use inputs_embeds')
def lowerCamelCase_ ( self) -> Optional[Any]:
'''simple docstring'''
pass
def lowerCamelCase_ ( self) -> int:
'''simple docstring'''
a__ , a__: Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__: Union[str, Any] = model_class(lowercase)
self.assertIsInstance(model.get_input_embeddings() , (nn.Module))
a__: str = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowercase , nn.Linear))
def lowerCamelCase_ ( self) -> List[Any]:
'''simple docstring'''
a__ , a__: Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__: Any = model_class(lowercase)
a__: int = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a__: Optional[Any] = [*signature.parameters.keys()]
a__: Tuple = ['pixel_values']
self.assertListEqual(arg_names[:1] , lowercase)
def lowerCamelCase_ ( self) -> Union[str, Any]:
'''simple docstring'''
a__: Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase)
def lowerCamelCase_ ( self) -> List[str]:
'''simple docstring'''
a__: Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*lowercase)
@slow
def lowerCamelCase_ ( self) -> Union[str, Any]:
'''simple docstring'''
for model_name in VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a__: int = VideoMAEModel.from_pretrained(lowercase)
self.assertIsNotNone(lowercase)
def lowerCamelCase_ ( self) -> Union[str, Any]:
'''simple docstring'''
if not self.has_attentions:
pass
else:
a__ , a__: Any = self.model_tester.prepare_config_and_inputs_for_common()
a__: str = True
for model_class in self.all_model_classes:
a__: Optional[int] = self.model_tester.seq_length - self.model_tester.num_masks
a__: List[str] = (
num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
)
a__: Tuple = True
a__: str = False
a__: Dict = True
a__: List[Any] = model_class(lowercase)
model.to(lowercase)
model.eval()
with torch.no_grad():
a__: int = model(**self._prepare_for_class(lowercase , lowercase))
a__: Any = outputs.attentions
self.assertEqual(len(lowercase) , self.model_tester.num_hidden_layers)
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
a__: Tuple = True
a__: List[Any] = model_class(lowercase)
model.to(lowercase)
model.eval()
with torch.no_grad():
a__: str = model(**self._prepare_for_class(lowercase , lowercase))
a__: int = outputs.attentions
self.assertEqual(len(lowercase) , self.model_tester.num_hidden_layers)
self.assertListEqual(
list(attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
a__: Optional[Any] = len(lowercase)
# Check attention is always last and order is fine
a__: str = True
a__: Dict = True
a__: Tuple = model_class(lowercase)
model.to(lowercase)
model.eval()
with torch.no_grad():
a__: Optional[Any] = model(**self._prepare_for_class(lowercase , lowercase))
self.assertEqual(out_len + 1 , len(lowercase))
a__: int = outputs.attentions
self.assertEqual(len(lowercase) , self.model_tester.num_hidden_layers)
self.assertListEqual(
list(self_attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def lowerCamelCase_ ( self) -> List[Any]:
'''simple docstring'''
def check_hidden_states_output(lowercase , lowercase , lowercase):
a__: Union[str, Any] = model_class(lowercase)
model.to(lowercase)
model.eval()
with torch.no_grad():
a__: Tuple = model(**self._prepare_for_class(lowercase , lowercase))
a__: Dict = outputs.hidden_states
a__: Union[str, Any] = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(lowercase) , lowercase)
a__: Union[str, Any] = self.model_tester.seq_length - self.model_tester.num_masks
a__: Optional[Any] = num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:]) , [seq_length, self.model_tester.hidden_size] , )
a__ , a__: List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__: Dict = True
check_hidden_states_output(lowercase , lowercase , lowercase)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
a__: List[Any] = True
check_hidden_states_output(lowercase , lowercase , lowercase)
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.')
def lowerCamelCase_ ( self) -> int:
'''simple docstring'''
pass
def __a ( ) ->List[Any]:
a__: List[str] = hf_hub_download(
repo_id='hf-internal-testing/spaghetti-video' , filename='eating_spaghetti.npy' , repo_type='dataset' )
a__: Dict = np.load(_SCREAMING_SNAKE_CASE )
return list(_SCREAMING_SNAKE_CASE )
@require_torch
@require_vision
class __snake_case ( unittest.TestCase ):
@cached_property
def lowerCamelCase_ ( self) -> str:
'''simple docstring'''
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5])
if is_vision_available()
else None
)
@slow
def lowerCamelCase_ ( self) -> Union[str, Any]:
'''simple docstring'''
a__: Dict = VideoMAEForVideoClassification.from_pretrained('MCG-NJU/videomae-base-finetuned-kinetics').to(
lowercase)
a__: Dict = self.default_image_processor
a__: str = prepare_video()
a__: Tuple = image_processor(lowercase , return_tensors='pt').to(lowercase)
# forward pass
with torch.no_grad():
a__: List[Any] = model(**lowercase)
# verify the logits
a__: str = torch.Size((1, 4_00))
self.assertEqual(outputs.logits.shape , lowercase)
a__: Optional[Any] = torch.tensor([0.3669, -0.0688, -0.2421]).to(lowercase)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowercase , atol=1e-4))
@slow
def lowerCamelCase_ ( self) -> Dict:
'''simple docstring'''
a__: Tuple = VideoMAEForPreTraining.from_pretrained('MCG-NJU/videomae-base-short').to(lowercase)
a__: Optional[Any] = self.default_image_processor
a__: List[Any] = prepare_video()
a__: Union[str, Any] = image_processor(lowercase , return_tensors='pt').to(lowercase)
# add boolean mask, indicating which patches to mask
a__: Optional[Any] = hf_hub_download(repo_id='hf-internal-testing/bool-masked-pos' , filename='bool_masked_pos.pt')
a__: Any = torch.load(lowercase)
# forward pass
with torch.no_grad():
a__: Any = model(**lowercase)
# verify the logits
a__: Union[str, Any] = torch.Size([1, 14_08, 15_36])
a__: Union[str, Any] = torch.tensor(
[[0.7994, 0.9612, 0.8508], [0.7401, 0.8958, 0.8302], [0.5862, 0.7468, 0.7325]] , device=lowercase)
self.assertEqual(outputs.logits.shape , lowercase)
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , lowercase , atol=1e-4))
# verify the loss (`config.norm_pix_loss` = `True`)
a__: Optional[int] = torch.tensor([0.5142] , device=lowercase)
self.assertTrue(torch.allclose(outputs.loss , lowercase , atol=1e-4))
# verify the loss (`config.norm_pix_loss` = `False`)
a__: int = VideoMAEForPreTraining.from_pretrained('MCG-NJU/videomae-base-short' , norm_pix_loss=lowercase).to(
lowercase)
with torch.no_grad():
a__: Union[str, Any] = model(**lowercase)
a__: Optional[int] = torch.tensor(torch.tensor([0.6469]) , device=lowercase)
self.assertTrue(torch.allclose(outputs.loss , lowercase , atol=1e-4))
| 203
| 0
|
"""simple docstring"""
def _A (__a = 1_00 ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 0
SCREAMING_SNAKE_CASE_ : Optional[int] = 0
for i in range(1 , n + 1 ):
sum_of_squares += i**2
sum_of_ints += i
return sum_of_ints**2 - sum_of_squares
if __name__ == "__main__":
print(f'''{solution() = }''')
| 91
|
'''simple docstring'''
lowerCAmelCase__ = '''Input must be a string of 8 numbers plus letter'''
lowerCAmelCase__ = '''TRWAGMYFPDXBNJZSQVHLCKE'''
def _A ( A__ ):
"""simple docstring"""
if not isinstance(A__ , A__ ):
__lowercase = F"Expected string as input, found {type(A__ ).__name__}"
raise TypeError(A__ )
__lowercase = spanish_id.replace('''-''' , '''''' ).upper()
if len(A__ ) != 9:
raise ValueError(A__ )
try:
__lowercase = int(spanish_id_clean[0:8] )
__lowercase = spanish_id_clean[8]
except ValueError as ex:
raise ValueError(A__ ) from ex
if letter.isdigit():
raise ValueError(A__ )
return letter == LOOKUP_LETTERS[number % 23]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 104
| 0
|
'''simple docstring'''
import collections
import inspect
import unittest
from transformers import SwinvaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel
from transformers.models.swinva.modeling_swinva import SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self : Tuple , UpperCAmelCase_ : Any , UpperCAmelCase_ : Any=13 , UpperCAmelCase_ : Optional[int]=32 , UpperCAmelCase_ : Optional[Any]=2 , UpperCAmelCase_ : Union[str, Any]=3 , UpperCAmelCase_ : Optional[int]=16 , UpperCAmelCase_ : Dict=[1, 2, 1] , UpperCAmelCase_ : str=[2, 2, 4] , UpperCAmelCase_ : Optional[int]=2 , UpperCAmelCase_ : List[Any]=2.0 , UpperCAmelCase_ : str=True , UpperCAmelCase_ : List[str]=0.0 , UpperCAmelCase_ : Union[str, Any]=0.0 , UpperCAmelCase_ : str=0.1 , UpperCAmelCase_ : List[str]="gelu" , UpperCAmelCase_ : Dict=False , UpperCAmelCase_ : str=True , UpperCAmelCase_ : Optional[int]=0.02 , UpperCAmelCase_ : Optional[Any]=1e-5 , UpperCAmelCase_ : Optional[int]=True , UpperCAmelCase_ : Union[str, Any]=None , UpperCAmelCase_ : Optional[Any]=True , UpperCAmelCase_ : List[str]=10 , UpperCAmelCase_ : List[Any]=8 , ):
"""simple docstring"""
__UpperCAmelCase : Optional[int] = parent
__UpperCAmelCase : Union[str, Any] = batch_size
__UpperCAmelCase : Dict = image_size
__UpperCAmelCase : int = patch_size
__UpperCAmelCase : int = num_channels
__UpperCAmelCase : int = embed_dim
__UpperCAmelCase : Dict = depths
__UpperCAmelCase : int = num_heads
__UpperCAmelCase : List[str] = window_size
__UpperCAmelCase : List[str] = mlp_ratio
__UpperCAmelCase : List[Any] = qkv_bias
__UpperCAmelCase : List[Any] = hidden_dropout_prob
__UpperCAmelCase : Dict = attention_probs_dropout_prob
__UpperCAmelCase : Optional[int] = drop_path_rate
__UpperCAmelCase : int = hidden_act
__UpperCAmelCase : Optional[Any] = use_absolute_embeddings
__UpperCAmelCase : List[str] = patch_norm
__UpperCAmelCase : Optional[int] = layer_norm_eps
__UpperCAmelCase : List[Any] = initializer_range
__UpperCAmelCase : str = is_training
__UpperCAmelCase : Optional[int] = scope
__UpperCAmelCase : int = use_labels
__UpperCAmelCase : Union[str, Any] = type_sequence_label_size
__UpperCAmelCase : int = encoder_stride
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__UpperCAmelCase : List[str] = None
if self.use_labels:
__UpperCAmelCase : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCAmelCase : Dict = self.get_config()
return config, pixel_values, labels
def lowerCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
return SwinvaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def lowerCamelCase_ ( self : Dict , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : List[Any] ):
"""simple docstring"""
__UpperCAmelCase : List[str] = SwinvaModel(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
__UpperCAmelCase : Union[str, Any] = model(UpperCAmelCase_ )
__UpperCAmelCase : Dict = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
__UpperCAmelCase : str = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def lowerCamelCase_ ( self : Tuple , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : List[str] ):
"""simple docstring"""
__UpperCAmelCase : Dict = SwinvaForMaskedImageModeling(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
__UpperCAmelCase : Tuple = model(UpperCAmelCase_ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
__UpperCAmelCase : Dict = 1
__UpperCAmelCase : List[str] = SwinvaForMaskedImageModeling(UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
__UpperCAmelCase : Union[str, Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__UpperCAmelCase : Dict = model(UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def lowerCamelCase_ ( self : Optional[Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Tuple ):
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = self.type_sequence_label_size
__UpperCAmelCase : Dict = SwinvaForImageClassification(UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
__UpperCAmelCase : Dict = model(UpperCAmelCase_ , labels=UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowerCamelCase_ ( self : Optional[Any] ):
"""simple docstring"""
__UpperCAmelCase : Dict = self.prepare_config_and_inputs()
__UpperCAmelCase : List[str] = config_and_inputs
__UpperCAmelCase : Dict = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( snake_case__ ,snake_case__ ,unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = (
(SwinvaModel, SwinvaForImageClassification, SwinvaForMaskedImageModeling) if is_torch_available() else ()
)
SCREAMING_SNAKE_CASE = (
{'''feature-extraction''': SwinvaModel, '''image-classification''': SwinvaForImageClassification}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
def lowerCamelCase_ ( self : Optional[Any] ):
"""simple docstring"""
__UpperCAmelCase : List[Any] = SwinvaModelTester(self )
__UpperCAmelCase : Union[str, Any] = ConfigTester(self , config_class=UpperCAmelCase_ , embed_dim=37 )
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
__UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase_ )
@unittest.skip(reason="Got `CUDA error: misaligned address` with PyTorch 2.0.0." )
def lowerCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
pass
@unittest.skip(reason="Swinv2 does not use inputs_embeds" )
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
pass
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
__UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase : Optional[Any] = model_class(UpperCAmelCase_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__UpperCAmelCase : Dict = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCAmelCase_ , nn.Linear ) )
def lowerCamelCase_ ( self : List[Any] ):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase : str = model_class(UpperCAmelCase_ )
__UpperCAmelCase : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCAmelCase : Dict = [*signature.parameters.keys()]
__UpperCAmelCase : int = ["pixel_values"]
self.assertListEqual(arg_names[:1] , UpperCAmelCase_ )
def lowerCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
__UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase : Optional[int] = True
for model_class in self.all_model_classes:
__UpperCAmelCase : Optional[Any] = True
__UpperCAmelCase : Any = False
__UpperCAmelCase : Tuple = True
__UpperCAmelCase : str = model_class(UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
with torch.no_grad():
__UpperCAmelCase : Dict = model(**self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ ) )
__UpperCAmelCase : Union[str, Any] = outputs.attentions
__UpperCAmelCase : Optional[int] = len(self.model_tester.depths )
self.assertEqual(len(UpperCAmelCase_ ) , UpperCAmelCase_ )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__UpperCAmelCase : Tuple = True
__UpperCAmelCase : List[str] = config.window_size**2
__UpperCAmelCase : Union[str, Any] = model_class(UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
with torch.no_grad():
__UpperCAmelCase : Optional[Any] = model(**self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ ) )
__UpperCAmelCase : Dict = outputs.attentions
self.assertEqual(len(UpperCAmelCase_ ) , UpperCAmelCase_ )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
__UpperCAmelCase : List[str] = len(UpperCAmelCase_ )
# Check attention is always last and order is fine
__UpperCAmelCase : Tuple = True
__UpperCAmelCase : Dict = True
__UpperCAmelCase : Dict = model_class(UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
with torch.no_grad():
__UpperCAmelCase : Optional[int] = model(**self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ ) )
if hasattr(self.model_tester , "num_hidden_states_types" ):
__UpperCAmelCase : int = self.model_tester.num_hidden_states_types
else:
# also another +1 for reshaped_hidden_states
__UpperCAmelCase : Optional[Any] = 2
self.assertEqual(out_len + added_hidden_states , len(UpperCAmelCase_ ) )
__UpperCAmelCase : List[str] = outputs.attentions
self.assertEqual(len(UpperCAmelCase_ ) , UpperCAmelCase_ )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
def lowerCamelCase_ ( self : Union[str, Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : int , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : int ):
"""simple docstring"""
__UpperCAmelCase : Optional[int] = model_class(UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
with torch.no_grad():
__UpperCAmelCase : Optional[int] = model(**self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ ) )
__UpperCAmelCase : Dict = outputs.hidden_states
__UpperCAmelCase : Dict = getattr(
self.model_tester , "expected_num_hidden_layers" , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(UpperCAmelCase_ ) , UpperCAmelCase_ )
# Swinv2 has a different seq_length
__UpperCAmelCase : str = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__UpperCAmelCase : Dict = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
__UpperCAmelCase : str = outputs.reshaped_hidden_states
self.assertEqual(len(UpperCAmelCase_ ) , UpperCAmelCase_ )
__UpperCAmelCase : int = reshaped_hidden_states[0].shape
__UpperCAmelCase : Dict = (
reshaped_hidden_states[0].view(UpperCAmelCase_ , UpperCAmelCase_ , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def lowerCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
__UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase : Any = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
__UpperCAmelCase : List[str] = True
self.check_hidden_states_output(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__UpperCAmelCase : Union[str, Any] = True
self.check_hidden_states_output(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
__UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase : str = 3
__UpperCAmelCase : Optional[Any] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
__UpperCAmelCase : int = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__UpperCAmelCase : List[str] = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
__UpperCAmelCase : str = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
__UpperCAmelCase : Dict = True
self.check_hidden_states_output(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__UpperCAmelCase : List[Any] = True
self.check_hidden_states_output(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , (padded_height, padded_width) )
def lowerCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
__UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*UpperCAmelCase_ )
def lowerCamelCase_ ( self : str ):
"""simple docstring"""
__UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase_ )
@slow
def lowerCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
for model_name in SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCAmelCase : int = SwinvaModel.from_pretrained(UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
__UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase : Any = _config_zero_init(UpperCAmelCase_ )
for model_class in self.all_model_classes:
__UpperCAmelCase : str = model_class(config=UpperCAmelCase_ )
for name, param in model.named_parameters():
if "embeddings" not in name and "logit_scale" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f"Parameter {name} of model {model_class} seems not properly initialized" , )
@require_vision
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
return (
AutoImageProcessor.from_pretrained("microsoft/swinv2-tiny-patch4-window8-256" )
if is_vision_available()
else None
)
@slow
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = SwinvaForImageClassification.from_pretrained("microsoft/swinv2-tiny-patch4-window8-256" ).to(
UpperCAmelCase_ )
__UpperCAmelCase : List[str] = self.default_image_processor
__UpperCAmelCase : Union[str, Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
__UpperCAmelCase : Any = image_processor(images=UpperCAmelCase_ , return_tensors="pt" ).to(UpperCAmelCase_ )
# forward pass
with torch.no_grad():
__UpperCAmelCase : Dict = model(**UpperCAmelCase_ )
# verify the logits
__UpperCAmelCase : Any = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , UpperCAmelCase_ )
__UpperCAmelCase : str = torch.tensor([-0.3947, -0.4306, 0.0026] ).to(UpperCAmelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCAmelCase_ , atol=1e-4 ) )
| 354
|
'''simple docstring'''
from __future__ import annotations
import collections
import pprint
from pathlib import Path
def __UpperCamelCase ( _UpperCAmelCase ):
return "".join(sorted(_UpperCAmelCase ) )
def __UpperCamelCase ( _UpperCAmelCase ):
return word_by_signature[signature(_UpperCAmelCase )]
lowerCAmelCase__ : str = Path(__file__).parent.joinpath("words.txt").read_text(encoding="utf-8")
lowerCAmelCase__ : Optional[int] = sorted({word.strip().lower() for word in data.splitlines()})
lowerCAmelCase__ : Tuple = collections.defaultdict(list)
for word in word_list:
word_by_signature[signature(word)].append(word)
if __name__ == "__main__":
lowerCAmelCase__ : int = {word: anagram(word) for word in word_list if len(anagram(word)) > 1}
with open("anagrams.txt", "w") as file:
file.write("all_anagrams = \n ")
file.write(pprint.pformat(all_anagrams))
| 37
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_A = {
'''configuration_bloom''': ['''BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BloomConfig''', '''BloomOnnxConfig'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = ['''BloomTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
'''BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BloomForCausalLM''',
'''BloomModel''',
'''BloomPreTrainedModel''',
'''BloomForSequenceClassification''',
'''BloomForTokenClassification''',
'''BloomForQuestionAnswering''',
]
if TYPE_CHECKING:
from .configuration_bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig, BloomOnnxConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bloom_fast import BloomTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bloom import (
BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST,
BloomForCausalLM,
BloomForQuestionAnswering,
BloomForSequenceClassification,
BloomForTokenClassification,
BloomModel,
BloomPreTrainedModel,
)
else:
import sys
_A = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 122
|
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import YolosImageProcessor
class lowercase_ ( unittest.TestCase ):
def __init__( self , __UpperCamelCase , __UpperCamelCase=7 , __UpperCamelCase=3 , __UpperCamelCase=3_0 , __UpperCamelCase=4_0_0 , __UpperCamelCase=True , __UpperCamelCase=None , __UpperCamelCase=True , __UpperCamelCase=[0.5, 0.5, 0.5] , __UpperCamelCase=[0.5, 0.5, 0.5] , __UpperCamelCase=True , __UpperCamelCase=1 / 2_5_5 , __UpperCamelCase=True , ):
"""simple docstring"""
UpperCamelCase_ = size if size is not None else {"""shortest_edge""": 1_8, """longest_edge""": 1_3_3_3}
UpperCamelCase_ = parent
UpperCamelCase_ = batch_size
UpperCamelCase_ = num_channels
UpperCamelCase_ = min_resolution
UpperCamelCase_ = max_resolution
UpperCamelCase_ = do_resize
UpperCamelCase_ = size
UpperCamelCase_ = do_normalize
UpperCamelCase_ = image_mean
UpperCamelCase_ = image_std
UpperCamelCase_ = do_rescale
UpperCamelCase_ = rescale_factor
UpperCamelCase_ = do_pad
def lowerCamelCase_ ( self ):
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def lowerCamelCase_ ( self , __UpperCamelCase , __UpperCamelCase=False ):
"""simple docstring"""
if not batched:
UpperCamelCase_ = image_inputs[0]
if isinstance(__UpperCamelCase , Image.Image ):
UpperCamelCase_ , UpperCamelCase_ = image.size
else:
UpperCamelCase_ , UpperCamelCase_ = image.shape[1], image.shape[2]
if w < h:
UpperCamelCase_ = int(self.size["""shortest_edge"""] * h / w )
UpperCamelCase_ = self.size["""shortest_edge"""]
elif w > h:
UpperCamelCase_ = self.size["""shortest_edge"""]
UpperCamelCase_ = int(self.size["""shortest_edge"""] * w / h )
else:
UpperCamelCase_ = self.size["""shortest_edge"""]
UpperCamelCase_ = self.size["""shortest_edge"""]
else:
UpperCamelCase_ = []
for image in image_inputs:
UpperCamelCase_ , UpperCamelCase_ = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
UpperCamelCase_ = max(__UpperCamelCase , key=lambda __UpperCamelCase : item[0] )[0]
UpperCamelCase_ = max(__UpperCamelCase , key=lambda __UpperCamelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class lowercase_ ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
A__ : str = YolosImageProcessor if is_vision_available() else None
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = YolosImageProcessingTester(self )
@property
def lowerCamelCase_ ( self ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__UpperCamelCase , """image_mean""" ) )
self.assertTrue(hasattr(__UpperCamelCase , """image_std""" ) )
self.assertTrue(hasattr(__UpperCamelCase , """do_normalize""" ) )
self.assertTrue(hasattr(__UpperCamelCase , """do_resize""" ) )
self.assertTrue(hasattr(__UpperCamelCase , """size""" ) )
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 1_8, """longest_edge""": 1_3_3_3} )
self.assertEqual(image_processor.do_pad , __UpperCamelCase )
UpperCamelCase_ = self.image_processing_class.from_dict(
self.image_processor_dict , size=4_2 , max_size=8_4 , pad_and_return_pixel_mask=__UpperCamelCase )
self.assertEqual(image_processor.size , {"""shortest_edge""": 4_2, """longest_edge""": 8_4} )
self.assertEqual(image_processor.do_pad , __UpperCamelCase )
def lowerCamelCase_ ( self ):
"""simple docstring"""
pass
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCamelCase , Image.Image )
# Test not batched input
UpperCamelCase_ = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
UpperCamelCase_ , UpperCamelCase_ = self.image_processor_tester.get_expected_values(__UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCamelCase_ , UpperCamelCase_ = self.image_processor_tester.get_expected_values(__UpperCamelCase , batched=__UpperCamelCase )
UpperCamelCase_ = image_processing(__UpperCamelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCamelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCamelCase , numpify=__UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCamelCase , np.ndarray )
# Test not batched input
UpperCamelCase_ = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
UpperCamelCase_ , UpperCamelCase_ = self.image_processor_tester.get_expected_values(__UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCamelCase_ = image_processing(__UpperCamelCase , return_tensors="""pt""" ).pixel_values
UpperCamelCase_ , UpperCamelCase_ = self.image_processor_tester.get_expected_values(__UpperCamelCase , batched=__UpperCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCamelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCamelCase , torchify=__UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCamelCase , torch.Tensor )
# Test not batched input
UpperCamelCase_ = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
UpperCamelCase_ , UpperCamelCase_ = self.image_processor_tester.get_expected_values(__UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCamelCase_ = image_processing(__UpperCamelCase , return_tensors="""pt""" ).pixel_values
UpperCamelCase_ , UpperCamelCase_ = self.image_processor_tester.get_expected_values(__UpperCamelCase , batched=__UpperCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = self.image_processing_class(**self.image_processor_dict )
UpperCamelCase_ = self.image_processing_class(do_resize=__UpperCamelCase , do_normalize=__UpperCamelCase , do_rescale=__UpperCamelCase )
# create random PyTorch tensors
UpperCamelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCamelCase , torchify=__UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCamelCase , torch.Tensor )
# Test whether the method "pad" and calling the image processor return the same tensors
UpperCamelCase_ = image_processing_a.pad(__UpperCamelCase , return_tensors="""pt""" )
UpperCamelCase_ = image_processing_a(__UpperCamelCase , return_tensors="""pt""" )
self.assertTrue(
torch.allclose(encoded_images_with_method["""pixel_values"""] , encoded_images["""pixel_values"""] , atol=1e-4 ) )
@slow
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_annotations.txt""" , """r""" ) as f:
UpperCamelCase_ = json.loads(f.read() )
UpperCamelCase_ = {"""image_id""": 3_9_7_6_9, """annotations""": target}
# encode them
UpperCamelCase_ = YolosImageProcessor.from_pretrained("""hustvl/yolos-small""" )
UpperCamelCase_ = image_processing(images=__UpperCamelCase , annotations=__UpperCamelCase , return_tensors="""pt""" )
# verify pixel values
UpperCamelCase_ = torch.Size([1, 3, 8_0_0, 1_0_6_6] )
self.assertEqual(encoding["""pixel_values"""].shape , __UpperCamelCase )
UpperCamelCase_ = torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , __UpperCamelCase , atol=1e-4 ) )
# verify area
UpperCamelCase_ = torch.tensor([5_887.9_600, 11_250.2_061, 489_353.8_438, 837_122.7_500, 147_967.5_156, 165_732.3_438] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , __UpperCamelCase ) )
# verify boxes
UpperCamelCase_ = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , __UpperCamelCase )
UpperCamelCase_ = torch.tensor([0.5_503, 0.2_765, 0.0_604, 0.2_215] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , __UpperCamelCase , atol=1e-3 ) )
# verify image_id
UpperCamelCase_ = torch.tensor([3_9_7_6_9] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , __UpperCamelCase ) )
# verify is_crowd
UpperCamelCase_ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , __UpperCamelCase ) )
# verify class_labels
UpperCamelCase_ = torch.tensor([7_5, 7_5, 6_3, 6_5, 1_7, 1_7] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , __UpperCamelCase ) )
# verify orig_size
UpperCamelCase_ = torch.tensor([4_8_0, 6_4_0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , __UpperCamelCase ) )
# verify size
UpperCamelCase_ = torch.tensor([8_0_0, 1_0_6_6] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , __UpperCamelCase ) )
@slow
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt""" , """r""" ) as f:
UpperCamelCase_ = json.loads(f.read() )
UpperCamelCase_ = {"""file_name""": """000000039769.png""", """image_id""": 3_9_7_6_9, """segments_info""": target}
UpperCamelCase_ = pathlib.Path("""./tests/fixtures/tests_samples/COCO/coco_panoptic""" )
# encode them
UpperCamelCase_ = YolosImageProcessor(format="""coco_panoptic""" )
UpperCamelCase_ = image_processing(images=__UpperCamelCase , annotations=__UpperCamelCase , masks_path=__UpperCamelCase , return_tensors="""pt""" )
# verify pixel values
UpperCamelCase_ = torch.Size([1, 3, 8_0_0, 1_0_6_6] )
self.assertEqual(encoding["""pixel_values"""].shape , __UpperCamelCase )
UpperCamelCase_ = torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , __UpperCamelCase , atol=1e-4 ) )
# verify area
UpperCamelCase_ = torch.tensor([147_979.6_875, 165_527.0_469, 484_638.5_938, 11_292.9_375, 5_879.6_562, 7_634.1_147] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , __UpperCamelCase ) )
# verify boxes
UpperCamelCase_ = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , __UpperCamelCase )
UpperCamelCase_ = torch.tensor([0.2_625, 0.5_437, 0.4_688, 0.8_625] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , __UpperCamelCase , atol=1e-3 ) )
# verify image_id
UpperCamelCase_ = torch.tensor([3_9_7_6_9] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , __UpperCamelCase ) )
# verify is_crowd
UpperCamelCase_ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , __UpperCamelCase ) )
# verify class_labels
UpperCamelCase_ = torch.tensor([1_7, 1_7, 6_3, 7_5, 7_5, 9_3] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , __UpperCamelCase ) )
# verify masks
UpperCamelCase_ = 8_2_2_8_7_3
self.assertEqual(encoding["""labels"""][0]["""masks"""].sum().item() , __UpperCamelCase )
# verify orig_size
UpperCamelCase_ = torch.tensor([4_8_0, 6_4_0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , __UpperCamelCase ) )
# verify size
UpperCamelCase_ = torch.tensor([8_0_0, 1_0_6_6] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , __UpperCamelCase ) )
| 122
| 1
|
def __magic_name__ ( __a : str , __a : bool = False ):
'''simple docstring'''
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
UpperCamelCase__ = f"Expected string as input, found {type(SCREAMING_SNAKE_CASE__ )}"
raise ValueError(SCREAMING_SNAKE_CASE__ )
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
UpperCamelCase__ = f"Expected boolean as use_pascal parameter, found {type(SCREAMING_SNAKE_CASE__ )}"
raise ValueError(SCREAMING_SNAKE_CASE__ )
UpperCamelCase__ = input_str.split("""_""" )
UpperCamelCase__ = 0 if use_pascal else 1
UpperCamelCase__ = words[start_index:]
UpperCamelCase__ = [word[0].upper() + word[1:] for word in words_to_capitalize]
UpperCamelCase__ = """""" if use_pascal else words[0]
return "".join([initial_word, *capitalized_words] )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 367
|
def __magic_name__ ( __a : int , __a : int ):
'''simple docstring'''
while a != 0:
UpperCamelCase__ , UpperCamelCase__ = b % a, a
return b
def __magic_name__ ( __a : int , __a : int ):
'''simple docstring'''
if gcd(__a , __a ) != 1:
UpperCamelCase__ = f"mod inverse of {a!r} and {m!r} does not exist"
raise ValueError(__a )
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = 1, 0, a
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = 0, 1, m
while va != 0:
UpperCamelCase__ = ua // va
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va
return ua % m
| 178
| 0
|
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class A_ (metaclass=lowercase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = ["""transformers""", """torch""", """note_seq"""]
def __init__( self , *lowercase_ , **lowercase_ ):
"""simple docstring"""
requires_backends(self , ["transformers", "torch", "note_seq"] )
@classmethod
def UpperCamelCase__ ( cls , *lowercase_ , **lowercase_ ):
"""simple docstring"""
requires_backends(cls , ["transformers", "torch", "note_seq"] )
@classmethod
def UpperCamelCase__ ( cls , *lowercase_ , **lowercase_ ):
"""simple docstring"""
requires_backends(cls , ["transformers", "torch", "note_seq"] )
| 61
|
import inspect
import unittest
from transformers import RegNetConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from transformers.utils import cached_property, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def __init__( self,__lowerCamelCase,__lowerCamelCase=3,__lowerCamelCase=32,__lowerCamelCase=3,__lowerCamelCase=10,__lowerCamelCase=[10, 20, 30, 40],__lowerCamelCase=[1, 1, 2, 1],__lowerCamelCase=True,__lowerCamelCase=True,__lowerCamelCase="relu",__lowerCamelCase=3,__lowerCamelCase=None,):
A__ = parent
A__ = batch_size
A__ = image_size
A__ = num_channels
A__ = embeddings_size
A__ = hidden_sizes
A__ = depths
A__ = is_training
A__ = use_labels
A__ = hidden_act
A__ = num_labels
A__ = scope
A__ = len(__lowerCamelCase )
def UpperCamelCase ( self ):
A__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A__ = self.get_config()
return config, pixel_values
def UpperCamelCase ( self ):
return RegNetConfig(
num_channels=self.num_channels,embeddings_size=self.embeddings_size,hidden_sizes=self.hidden_sizes,depths=self.depths,hidden_act=self.hidden_act,num_labels=self.num_labels,image_size=self.image_size,)
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase ):
A__ = FlaxRegNetModel(config=__lowerCamelCase )
A__ = model(__lowerCamelCase )
# Output shape (b, c, h, w)
self.parent.assertEqual(
result.last_hidden_state.shape,(self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32),)
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase ):
A__ = self.num_labels
A__ = FlaxRegNetForImageClassification(config=__lowerCamelCase )
A__ = model(__lowerCamelCase )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.num_labels) )
def UpperCamelCase ( self ):
A__ = self.prepare_config_and_inputs()
A__ , A__ = config_and_inputs
A__ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_flax
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase__ , unittest.TestCase ):
__SCREAMING_SNAKE_CASE = (FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else ()
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
def UpperCamelCase ( self ):
A__ = FlaxRegNetModelTester(self )
A__ = ConfigTester(self,config_class=__lowerCamelCase,has_text_modality=__lowerCamelCase )
def UpperCamelCase ( self ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCamelCase ( self ):
return
def UpperCamelCase ( self ):
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase )
def UpperCamelCase ( self ):
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowerCamelCase )
@unittest.skip(reason='''RegNet does not use inputs_embeds''' )
def UpperCamelCase ( self ):
pass
@unittest.skip(reason='''RegNet does not support input and output embeddings''' )
def UpperCamelCase ( self ):
pass
def UpperCamelCase ( self ):
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = model_class(__lowerCamelCase )
A__ = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A__ = [*signature.parameters.keys()]
A__ = ['''pixel_values''']
self.assertListEqual(arg_names[:1],__lowerCamelCase )
def UpperCamelCase ( self ):
def check_hidden_states_output(__lowerCamelCase,__lowerCamelCase,__lowerCamelCase ):
A__ = model_class(__lowerCamelCase )
A__ = model(**self._prepare_for_class(__lowerCamelCase,__lowerCamelCase ) )
A__ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
A__ = self.model_tester.num_stages
self.assertEqual(len(__lowerCamelCase ),expected_num_stages + 1 )
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = True
check_hidden_states_output(__lowerCamelCase,__lowerCamelCase,__lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A__ = True
check_hidden_states_output(__lowerCamelCase,__lowerCamelCase,__lowerCamelCase )
def UpperCamelCase ( self ):
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
A__ = self._prepare_for_class(__lowerCamelCase,__lowerCamelCase )
A__ = model_class(__lowerCamelCase )
@jax.jit
def model_jitted(__lowerCamelCase,**__lowerCamelCase ):
return model(pixel_values=__lowerCamelCase,**__lowerCamelCase )
with self.subTest('''JIT Enabled''' ):
A__ = model_jitted(**__lowerCamelCase ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
A__ = model_jitted(**__lowerCamelCase ).to_tuple()
self.assertEqual(len(__lowerCamelCase ),len(__lowerCamelCase ) )
for jitted_output, output in zip(__lowerCamelCase,__lowerCamelCase ):
self.assertEqual(jitted_output.shape,output.shape )
def UpperCamelCase__( )->Optional[int]:
A__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_flax
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@cached_property
def UpperCamelCase ( self ):
return AutoImageProcessor.from_pretrained('''facebook/regnet-y-040''' ) if is_vision_available() else None
@slow
def UpperCamelCase ( self ):
A__ = FlaxRegNetForImageClassification.from_pretrained('''facebook/regnet-y-040''' )
A__ = self.default_image_processor
A__ = prepare_img()
A__ = image_processor(images=__lowerCamelCase,return_tensors='''np''' )
A__ = model(**__lowerCamelCase )
# verify the logits
A__ = (1, 1000)
self.assertEqual(outputs.logits.shape,__lowerCamelCase )
A__ = jnp.array([-0.4180, -1.5051, -3.4836] )
self.assertTrue(jnp.allclose(outputs.logits[0, :3],__lowerCamelCase,atol=1E-4 ) )
| 193
| 0
|
a__ = '''ABCDEFGHIJKLMNOPQRSTUVWXYZ'''
def __UpperCAmelCase ( ) -> None:
"""simple docstring"""
_a : str = input('''Enter message: ''' )
_a : int = input('''Enter key [alphanumeric]: ''' )
_a : List[Any] = input('''Encrypt/Decrypt [e/d]: ''' )
if mode.lower().startswith('''e''' ):
_a : List[str] = '''encrypt'''
_a : Optional[Any] = encrypt_message(__a ,__a )
elif mode.lower().startswith('''d''' ):
_a : Dict = '''decrypt'''
_a : Tuple = decrypt_message(__a ,__a )
print(F"""\n{mode.title()}ed message:""" )
print(__a )
def __UpperCAmelCase ( __a : str ,__a : str ) -> str:
"""simple docstring"""
return translate_message(__a ,__a ,'''encrypt''' )
def __UpperCAmelCase ( __a : str ,__a : str ) -> str:
"""simple docstring"""
return translate_message(__a ,__a ,'''decrypt''' )
def __UpperCAmelCase ( __a : str ,__a : str ,__a : str ) -> str:
"""simple docstring"""
_a : Dict = []
_a : Optional[Any] = 0
_a : Any = key.upper()
for symbol in message:
_a : List[str] = LETTERS.find(symbol.upper() )
if num != -1:
if mode == "encrypt":
num += LETTERS.find(key[key_index] )
elif mode == "decrypt":
num -= LETTERS.find(key[key_index] )
num %= len(__a )
if symbol.isupper():
translated.append(LETTERS[num] )
elif symbol.islower():
translated.append(LETTERS[num].lower() )
key_index += 1
if key_index == len(__a ):
_a : int = 0
else:
translated.append(__a )
return "".join(__a )
if __name__ == "__main__":
main()
| 15
|
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from transformers import TvltFeatureExtractor, is_datasets_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
if is_datasets_available():
from datasets import load_dataset
a__ = random.Random()
def __UpperCAmelCase ( __a : Tuple ,__a : str=1.0 ,__a : Optional[int]=None ,__a : List[Any]=None ) -> Any:
"""simple docstring"""
if rng is None:
_a : Dict = global_rng
_a : Optional[Any] = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , _a , _a=7 , _a=4_0_0 , _a=2_0_0_0 , _a=2_0_4_8 , _a=1_2_8 , _a=1 , _a=5_1_2 , _a=3_0 , _a=4_4_1_0_0 , ) -> List[Any]:
_a : Optional[Any] = parent
_a : str = batch_size
_a : List[str] = min_seq_length
_a : str = max_seq_length
_a : Dict = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
_a : List[Any] = spectrogram_length
_a : List[str] = feature_size
_a : List[Any] = num_audio_channels
_a : Tuple = hop_length
_a : Optional[int] = chunk_length
_a : int = sampling_rate
def __lowercase ( self ) -> Union[str, Any]:
return {
"spectrogram_length": self.spectrogram_length,
"feature_size": self.feature_size,
"num_audio_channels": self.num_audio_channels,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"sampling_rate": self.sampling_rate,
}
def __lowercase ( self , _a=False , _a=False ) -> List[Any]:
def _flatten(_a ):
return list(itertools.chain(*_a ) )
if equal_length:
_a : List[Any] = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
_a : List[Any] = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
_a : str = [np.asarray(_a ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class UpperCAmelCase_ ( __lowercase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : List[Any] = TvltFeatureExtractor
def __lowercase ( self ) -> Dict:
_a : List[str] = TvltFeatureExtractionTester(self )
def __lowercase ( self ) -> Any:
_a : List[Any] = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(_a , '''spectrogram_length''' ) )
self.assertTrue(hasattr(_a , '''feature_size''' ) )
self.assertTrue(hasattr(_a , '''num_audio_channels''' ) )
self.assertTrue(hasattr(_a , '''hop_length''' ) )
self.assertTrue(hasattr(_a , '''chunk_length''' ) )
self.assertTrue(hasattr(_a , '''sampling_rate''' ) )
def __lowercase ( self ) -> Optional[int]:
_a : Optional[Any] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_a : int = feat_extract_first.save_pretrained(_a )[0]
check_json_file_has_correct_format(_a )
_a : Dict = self.feature_extraction_class.from_pretrained(_a )
_a : List[Any] = feat_extract_first.to_dict()
_a : Union[str, Any] = feat_extract_second.to_dict()
_a : Any = dict_first.pop('''mel_filters''' )
_a : int = dict_second.pop('''mel_filters''' )
self.assertTrue(np.allclose(_a , _a ) )
self.assertEqual(_a , _a )
def __lowercase ( self ) -> Optional[int]:
_a : Any = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_a : Optional[int] = os.path.join(_a , '''feat_extract.json''' )
feat_extract_first.to_json_file(_a )
_a : List[str] = self.feature_extraction_class.from_json_file(_a )
_a : List[Any] = feat_extract_first.to_dict()
_a : Dict = feat_extract_second.to_dict()
_a : str = dict_first.pop('''mel_filters''' )
_a : str = dict_second.pop('''mel_filters''' )
self.assertTrue(np.allclose(_a , _a ) )
self.assertEqual(_a , _a )
def __lowercase ( self ) -> Union[str, Any]:
# Initialize feature_extractor
_a : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_dict )
# create three inputs of length 800, 1000, and 1200
_a : Any = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
_a : List[str] = [np.asarray(_a ) for speech_input in speech_inputs]
# Test not batched input
_a : Tuple = feature_extractor(np_speech_inputs[0] , return_tensors='''np''' , sampling_rate=4_4_1_0_0 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test batched
_a : Dict = feature_extractor(_a , return_tensors='''np''' , sampling_rate=4_4_1_0_0 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test audio masking
_a : Union[str, Any] = feature_extractor(
_a , return_tensors='''np''' , sampling_rate=4_4_1_0_0 , mask_audio=_a ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test 2-D numpy arrays are batched.
_a : Optional[Any] = [floats_list((1, x) )[0] for x in (8_0_0, 8_0_0, 8_0_0)]
_a : int = np.asarray(_a )
_a : Tuple = feature_extractor(_a , return_tensors='''np''' , sampling_rate=4_4_1_0_0 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
def __lowercase ( self , _a ) -> Optional[Any]:
_a : List[Any] = load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' )
# automatic decoding with librispeech
_a : Optional[int] = ds.sort('''id''' ).select(range(_a ) )[:num_samples]['''audio''']
return [x["array"] for x in speech_samples]
def __lowercase ( self ) -> int:
_a : Union[str, Any] = self._load_datasamples(1 )
_a : int = TvltFeatureExtractor()
_a : Union[str, Any] = feature_extractor(_a , return_tensors='''pt''' ).audio_values
self.assertEquals(audio_values.shape , (1, 1, 1_9_2, 1_2_8) )
_a : Union[str, Any] = torch.tensor([[-0.3032, -0.2708], [-0.4434, -0.4007]] )
self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2] , _a , atol=1e-4 ) )
| 15
| 1
|
from __future__ import annotations
import unittest
from transformers import LEDConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFLEDForConditionalGeneration, TFLEDModel
@require_tf
class A:
'''simple docstring'''
UpperCamelCase = LEDConfig
UpperCamelCase = {}
UpperCamelCase = '''gelu'''
def __init__( self : str , A_ : Union[str, Any] , A_ : str=13 , A_ : List[str]=7 , A_ : List[Any]=True , A_ : Tuple=False , A_ : str=99 , A_ : Tuple=32 , A_ : Dict=2 , A_ : List[Any]=4 , A_ : Tuple=37 , A_ : List[str]=0.1 , A_ : Any=0.1 , A_ : Union[str, Any]=20 , A_ : List[Any]=2 , A_ : Tuple=1 , A_ : Optional[int]=0 , A_ : List[str]=4 , ) -> str:
"""simple docstring"""
lowerCamelCase_ = parent
lowerCamelCase_ = batch_size
lowerCamelCase_ = seq_length
lowerCamelCase_ = is_training
lowerCamelCase_ = use_labels
lowerCamelCase_ = vocab_size
lowerCamelCase_ = hidden_size
lowerCamelCase_ = num_hidden_layers
lowerCamelCase_ = num_attention_heads
lowerCamelCase_ = intermediate_size
lowerCamelCase_ = hidden_dropout_prob
lowerCamelCase_ = attention_probs_dropout_prob
lowerCamelCase_ = max_position_embeddings
lowerCamelCase_ = eos_token_id
lowerCamelCase_ = pad_token_id
lowerCamelCase_ = bos_token_id
lowerCamelCase_ = attention_window
# `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size
# [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention
# returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1]
# because its local attention only attends to `self.attention_window` and one before and one after
lowerCamelCase_ = self.attention_window + 2
# because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for
# the `test_attention_outputs` and `test_hidden_states_output` tests
lowerCamelCase_ = (
self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window
)
def a__ ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
lowerCamelCase_ = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
lowerCamelCase_ = tf.concat([input_ids, eos_tensor] , axis=1 )
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase_ = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , attention_window=self.attention_window , **self.config_updates , )
lowerCamelCase_ = prepare_led_inputs_dict(A_ , A_ , A_ )
lowerCamelCase_ = tf.concat(
[tf.zeros_like(A_ )[:, :-1], tf.ones_like(A_ )[:, -1:]] , axis=-1 , )
lowerCamelCase_ = global_attention_mask
return config, inputs_dict
def a__ ( self : Tuple , A_ : Optional[int] , A_ : Any ) -> Any:
"""simple docstring"""
lowerCamelCase_ = TFLEDModel(config=A_ ).get_decoder()
lowerCamelCase_ = inputs_dict['input_ids']
lowerCamelCase_ = input_ids[:1, :]
lowerCamelCase_ = inputs_dict['attention_mask'][:1, :]
lowerCamelCase_ = 1
# first forward pass
lowerCamelCase_ = model(A_ , attention_mask=A_ , use_cache=A_ )
lowerCamelCase_ , lowerCamelCase_ = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
lowerCamelCase_ = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowerCamelCase_ = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
lowerCamelCase_ = tf.concat([input_ids, next_tokens] , axis=-1 )
lowerCamelCase_ = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
lowerCamelCase_ = model(A_ , attention_mask=A_ )[0]
lowerCamelCase_ = model(A_ , attention_mask=A_ , past_key_values=A_ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
lowerCamelCase_ = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
lowerCamelCase_ = output_from_no_past[:, -3:, random_slice_idx]
lowerCamelCase_ = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(A_ , A_ , rtol=1E-3 )
def _SCREAMING_SNAKE_CASE ( lowercase : Any , lowercase : Any , lowercase : List[str] , lowercase : Tuple=None , lowercase : Dict=None , lowercase : str=None , lowercase : Any=None , ):
'''simple docstring'''
if attention_mask is None:
lowerCamelCase_ = tf.cast(tf.math.not_equal(lowercase , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
lowerCamelCase_ = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
lowerCamelCase_ = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
lowerCamelCase_ = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"attention_mask": attention_mask,
"decoder_input_ids": decoder_input_ids,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
}
@require_tf
class A( UpperCamelCase , UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = (TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else ()
UpperCamelCase = (TFLEDForConditionalGeneration,) if is_tf_available() else ()
UpperCamelCase = (
{
'''conversational''': TFLEDForConditionalGeneration,
'''feature-extraction''': TFLEDModel,
'''summarization''': TFLEDForConditionalGeneration,
'''text2text-generation''': TFLEDForConditionalGeneration,
'''translation''': TFLEDForConditionalGeneration,
}
if is_tf_available()
else {}
)
UpperCamelCase = True
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
def a__ ( self : List[str] ) -> str:
"""simple docstring"""
lowerCamelCase_ = TFLEDModelTester(self )
lowerCamelCase_ = ConfigTester(self , config_class=A_ )
def a__ ( self : Tuple ) -> str:
"""simple docstring"""
self.config_tester.run_common_tests()
def a__ ( self : int ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*A_ )
def a__ ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase_ = tf.zeros_like(inputs_dict['attention_mask'] )
lowerCamelCase_ = 2
lowerCamelCase_ = tf.where(
tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices , 1 , inputs_dict['global_attention_mask'] , )
lowerCamelCase_ = True
lowerCamelCase_ = self.model_tester.seq_length
lowerCamelCase_ = self.model_tester.encoder_seq_length
def check_decoder_attentions_output(A_ : Any ):
lowerCamelCase_ = outputs.decoder_attentions
self.assertEqual(len(A_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
def check_encoder_attentions_output(A_ : Optional[Any] ):
lowerCamelCase_ = [t.numpy() for t in outputs.encoder_attentions]
lowerCamelCase_ = [t.numpy() for t in outputs.encoder_global_attentions]
self.assertEqual(len(A_ ) , self.model_tester.num_hidden_layers )
self.assertEqual(len(A_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
self.assertListEqual(
list(global_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices] , )
for model_class in self.all_model_classes:
lowerCamelCase_ = True
lowerCamelCase_ = False
lowerCamelCase_ = False
lowerCamelCase_ = model_class(A_ )
lowerCamelCase_ = model(self._prepare_for_class(A_ , A_ ) )
lowerCamelCase_ = len(A_ )
self.assertEqual(config.output_hidden_states , A_ )
check_encoder_attentions_output(A_ )
if self.is_encoder_decoder:
lowerCamelCase_ = model_class(A_ )
lowerCamelCase_ = model(self._prepare_for_class(A_ , A_ ) )
self.assertEqual(config.output_hidden_states , A_ )
check_decoder_attentions_output(A_ )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
lowerCamelCase_ = True
lowerCamelCase_ = model_class(A_ )
lowerCamelCase_ = model(self._prepare_for_class(A_ , A_ ) )
self.assertEqual(config.output_hidden_states , A_ )
check_encoder_attentions_output(A_ )
# Check attention is always last and order is fine
lowerCamelCase_ = True
lowerCamelCase_ = True
lowerCamelCase_ = model_class(A_ )
lowerCamelCase_ = model(self._prepare_for_class(A_ , A_ ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(A_ ) )
self.assertEqual(model.config.output_hidden_states , A_ )
check_encoder_attentions_output(A_ )
@unittest.skip('LED keeps using potentially symbolic tensors in conditionals and breaks tracing.' )
def a__ ( self : Tuple ) -> Any:
"""simple docstring"""
pass
def a__ ( self : Optional[Any] ) -> str:
"""simple docstring"""
pass
def _SCREAMING_SNAKE_CASE ( lowercase : Dict ):
'''simple docstring'''
return tf.constant(lowercase , dtype=tf.intaa )
lowerCamelCase : Dict = 1e-4
@slow
@require_tf
class A( unittest.TestCase ):
'''simple docstring'''
def a__ ( self : Any ) -> int:
"""simple docstring"""
lowerCamelCase_ = TFLEDForConditionalGeneration.from_pretrained('allenai/led-base-16384' ).led
# change to intended input here
lowerCamelCase_ = _long_tensor([512 * [0, 31414, 232, 328, 740, 1140, 12695, 69]] )
lowerCamelCase_ = _long_tensor([128 * [0, 31414, 232, 328, 740, 1140, 12695, 69]] )
lowerCamelCase_ = prepare_led_inputs_dict(model.config , A_ , A_ )
lowerCamelCase_ = model(**A_ )[0]
lowerCamelCase_ = (1, 1024, 768)
self.assertEqual(output.shape , A_ )
# change to expected output here
lowerCamelCase_ = tf.convert_to_tensor(
[[2.3050, 2.8279, 0.6531], [-1.8457, -0.1455, -3.5661], [-1.0186, 0.4586, -2.2043]] , )
tf.debugging.assert_near(output[:, :3, :3] , A_ , atol=1E-3 )
def a__ ( self : List[str] ) -> Any:
"""simple docstring"""
lowerCamelCase_ = TFLEDForConditionalGeneration.from_pretrained('allenai/led-base-16384' )
# change to intended input here
lowerCamelCase_ = _long_tensor([512 * [0, 31414, 232, 328, 740, 1140, 12695, 69]] )
lowerCamelCase_ = _long_tensor([128 * [0, 31414, 232, 328, 740, 1140, 12695, 69]] )
lowerCamelCase_ = prepare_led_inputs_dict(model.config , A_ , A_ )
lowerCamelCase_ = model(**A_ )[0]
lowerCamelCase_ = (1, 1024, model.config.vocab_size)
self.assertEqual(output.shape , A_ )
# change to expected output here
lowerCamelCase_ = tf.convert_to_tensor(
[[33.6507, 6.4572, 16.8089], [5.8739, -2.4238, 11.2902], [-3.2139, -4.3149, 4.2783]] , )
tf.debugging.assert_near(output[:, :3, :3] , A_ , atol=1E-3 , rtol=1E-3 )
| 204
|
import warnings
from ...utils import logging
from .image_processing_glpn import GLPNImageProcessor
lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
class A( UpperCamelCase ):
'''simple docstring'''
def __init__( self : int , *A_ : str , **A_ : Optional[int] ) -> None:
"""simple docstring"""
warnings.warn(
'The class GLPNFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use GLPNImageProcessor instead.' , A_ , )
super().__init__(*A_ , **A_ )
| 204
| 1
|
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel
from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS,
CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class a_ ( a_ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : Tuple = DiTPipeline
lowerCamelCase__ : str = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS
lowerCamelCase__ : Optional[Any] = PipelineTesterMixin.required_optional_params - {
'''latents''',
'''num_images_per_prompt''',
'''callback''',
'''callback_steps''',
}
lowerCamelCase__ : Optional[int] = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS
lowerCamelCase__ : Optional[int] = False
def a__ (self ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCamelCase__ : List[str] = TransformeraDModel(
sample_size=1_6, num_layers=2, patch_size=4, attention_head_dim=8, num_attention_heads=2, in_channels=4, out_channels=8, attention_bias=lowercase_, activation_fn='gelu-approximate', num_embeds_ada_norm=1_0_0_0, norm_type='ada_norm_zero', norm_elementwise_affine=lowercase_, )
lowerCamelCase__ : List[Any] = AutoencoderKL()
lowerCamelCase__ : Union[str, Any] = DDIMScheduler()
lowerCamelCase__ : Tuple = {'transformer': transformer.eval(), 'vae': vae.eval(), 'scheduler': scheduler}
return components
def a__ (self, lowerCamelCase_, lowerCamelCase_=0 ):
'''simple docstring'''
if str(lowercase_ ).startswith('mps' ):
lowerCamelCase__ : Tuple = torch.manual_seed(lowercase_ )
else:
lowerCamelCase__ : List[Any] = torch.Generator(device=lowercase_ ).manual_seed(lowercase_ )
lowerCamelCase__ : int = {
'class_labels': [1],
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Any = 'cpu'
lowerCamelCase__ : Any = self.get_dummy_components()
lowerCamelCase__ : Optional[int] = self.pipeline_class(**lowercase_ )
pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
lowerCamelCase__ : Dict = self.get_dummy_inputs(lowercase_ )
lowerCamelCase__ : Dict = pipe(**lowercase_ ).images
lowerCamelCase__ : Optional[Any] = image[0, -3:, -3:, -1]
self.assertEqual(image.shape, (1, 1_6, 1_6, 3) )
lowerCamelCase__ : Tuple = np.array([0.2_946, 0.6_601, 0.4_329, 0.3_296, 0.4_144, 0.5_319, 0.7_273, 0.5_013, 0.4_457] )
lowerCamelCase__ : Optional[int] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(lowercase_, 1e-3 )
def a__ (self ):
'''simple docstring'''
self._test_inference_batch_single_identical(relax_max_difference=lowercase_, expected_max_diff=1e-3 )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available(), reason='XFormers attention is only available with CUDA and `xformers` installed', )
def a__ (self ):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
@require_torch_gpu
@slow
class a_ ( unittest.TestCase ):
'''simple docstring'''
def a__ (self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = torch.manual_seed(0 )
lowerCamelCase__ : Tuple = DiTPipeline.from_pretrained('facebook/DiT-XL-2-256' )
pipe.to('cuda' )
lowerCamelCase__ : Optional[int] = ['vase', 'umbrella', 'white shark', 'white wolf']
lowerCamelCase__ : int = pipe.get_label_ids(lowercase_ )
lowerCamelCase__ : Dict = pipe(lowercase_, generator=lowercase_, num_inference_steps=4_0, output_type='np' ).images
for word, image in zip(lowercase_, lowercase_ ):
lowerCamelCase__ : str = load_numpy(
f'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy''' )
assert np.abs((expected_image - image).max() ) < 1e-2
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = DiTPipeline.from_pretrained('facebook/DiT-XL-2-512' )
lowerCamelCase__ : Optional[int] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.to('cuda' )
lowerCamelCase__ : List[Any] = ['vase', 'umbrella']
lowerCamelCase__ : Optional[Any] = pipe.get_label_ids(lowercase_ )
lowerCamelCase__ : Any = torch.manual_seed(0 )
lowerCamelCase__ : str = pipe(lowercase_, generator=lowercase_, num_inference_steps=2_5, output_type='np' ).images
for word, image in zip(lowercase_, lowercase_ ):
lowerCamelCase__ : Optional[Any] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
f'''/dit/{word}_512.npy''' )
assert np.abs((expected_image - image).max() ) < 1e-1
| 364
|
"""simple docstring"""
# Note: if you intend to run this script make sure you look under scripts/fsmt/
# to locate the appropriate script to do the work correctly. There is a set of scripts to:
# - download and prepare data and run the conversion script
# - perform eval to get the best hparam into the config
# - generate model_cards - useful if you have multiple models from the same paper
import argparse
import json
import os
import re
from collections import OrderedDict
from os.path import basename, dirname
import fairseq
import torch
from fairseq import hub_utils
from fairseq.data.dictionary import Dictionary
from transformers import FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
A_ : Optional[Any] = 2
# based on the results of a search on a range of `num_beams`, `length_penalty` and `early_stopping`
# values against wmt19 test data to obtain the best BLEU scores, we will use the following defaults:
#
# * `num_beams`: 5 (higher scores better, but requires more memory/is slower, can be adjusted by users)
# * `early_stopping`: `False` consistently scored better
# * `length_penalty` varied, so will assign the best one depending on the model
A_ : List[Any] = {
# fairseq:
"wmt19-ru-en": {"length_penalty": 1.1},
"wmt19-en-ru": {"length_penalty": 1.15},
"wmt19-en-de": {"length_penalty": 1.0},
"wmt19-de-en": {"length_penalty": 1.1},
# allenai:
"wmt16-en-de-dist-12-1": {"length_penalty": 0.6},
"wmt16-en-de-dist-6-1": {"length_penalty": 0.6},
"wmt16-en-de-12-1": {"length_penalty": 0.8},
"wmt19-de-en-6-6-base": {"length_penalty": 0.6},
"wmt19-de-en-6-6-big": {"length_penalty": 0.6},
}
# this remaps the different models to their organization names
A_ : str = {}
for m in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
A_ : str = "facebook"
for m in [
"wmt16-en-de-dist-12-1",
"wmt16-en-de-dist-6-1",
"wmt16-en-de-12-1",
"wmt19-de-en-6-6-base",
"wmt19-de-en-6-6-big",
]:
A_ : Optional[Any] = "allenai"
def lowerCamelCase_ ( _lowerCamelCase ):
# (1) remove word breaking symbol, (2) add word ending symbol where the word is not broken up,
# e.g.: d = {'le@@': 5, 'tt@@': 6, 'er': 7} => {'le': 5, 'tt': 6, 'er</w>': 7}
lowerCamelCase__ : List[Any] = dict((re.sub(r'@@$' , '' , _lowerCamelCase ), v) if k.endswith('@@' ) else (re.sub(r'$' , '</w>' , _lowerCamelCase ), v) for k, v in d.items() )
lowerCamelCase__ : int = '<s> <pad> </s> <unk>'.split()
# restore the special tokens
for k in keep_keys:
del da[f'''{k}</w>''']
lowerCamelCase__ : List[str] = d[k] # restore
return da
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
# prep
assert os.path.exists(_lowerCamelCase )
os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase )
print(f'''Writing results to {pytorch_dump_folder_path}''' )
# handle various types of models
lowerCamelCase__ : Optional[int] = basename(_lowerCamelCase )
lowerCamelCase__ : str = dirname(_lowerCamelCase )
lowerCamelCase__ : Any = fairseq.model_parallel.models.transformer.ModelParallelTransformerModel
lowerCamelCase__ : int = cls.hub_models()
lowerCamelCase__ : str = {'bpe': 'fastbpe', 'tokenizer': 'moses'}
lowerCamelCase__ : Optional[Any] = '.'
# note: since the model dump is old, fairseq has upgraded its model some
# time later, and it does a whole lot of rewrites and splits on the saved
# weights, therefore we can't use torch.load() directly on the model file.
# see: upgrade_state_dict(state_dict) in fairseq_model.py
print(f'''using checkpoint {checkpoint_file}''' )
lowerCamelCase__ : Any = hub_utils.from_pretrained(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , archive_map=_lowerCamelCase , **_lowerCamelCase )
lowerCamelCase__ : List[str] = vars(chkpt['args']['model'] )
lowerCamelCase__ : Optional[Any] = args['source_lang']
lowerCamelCase__ : List[str] = args['target_lang']
lowerCamelCase__ : List[str] = dirname(_lowerCamelCase )
lowerCamelCase__ : Union[str, Any] = basename(_lowerCamelCase )
# dicts
lowerCamelCase__ : Optional[Any] = os.path.join(_lowerCamelCase , f'''dict.{src_lang}.txt''' )
lowerCamelCase__ : Optional[Any] = os.path.join(_lowerCamelCase , f'''dict.{tgt_lang}.txt''' )
lowerCamelCase__ : Dict = Dictionary.load(_lowerCamelCase )
lowerCamelCase__ : List[Any] = rewrite_dict_keys(src_dict.indices )
lowerCamelCase__ : int = len(_lowerCamelCase )
lowerCamelCase__ : List[Any] = os.path.join(_lowerCamelCase , 'vocab-src.json' )
print(f'''Generating {src_vocab_file} of {src_vocab_size} of {src_lang} records''' )
with open(_lowerCamelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(_lowerCamelCase , ensure_ascii=_lowerCamelCase , indent=_lowerCamelCase ) )
# detect whether this is a do_lower_case situation, which can be derived by checking whether we
# have at least one uppercase letter in the source vocab
lowerCamelCase__ : Optional[int] = True
for k in src_vocab.keys():
if not k.islower():
lowerCamelCase__ : int = False
break
lowerCamelCase__ : str = Dictionary.load(_lowerCamelCase )
lowerCamelCase__ : Union[str, Any] = rewrite_dict_keys(tgt_dict.indices )
lowerCamelCase__ : Optional[Any] = len(_lowerCamelCase )
lowerCamelCase__ : Dict = os.path.join(_lowerCamelCase , 'vocab-tgt.json' )
print(f'''Generating {tgt_vocab_file} of {tgt_vocab_size} of {tgt_lang} records''' )
with open(_lowerCamelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(_lowerCamelCase , ensure_ascii=_lowerCamelCase , indent=_lowerCamelCase ) )
# merges_file (bpecodes)
lowerCamelCase__ : List[Any] = os.path.join(_lowerCamelCase , VOCAB_FILES_NAMES['merges_file'] )
for fn in ["bpecodes", "code"]: # older fairseq called the merges file "code"
lowerCamelCase__ : Optional[int] = os.path.join(_lowerCamelCase , _lowerCamelCase )
if os.path.exists(_lowerCamelCase ):
break
with open(_lowerCamelCase , encoding='utf-8' ) as fin:
lowerCamelCase__ : Union[str, Any] = fin.read()
lowerCamelCase__ : Any = re.sub(r' \d+$' , '' , _lowerCamelCase , 0 , re.M ) # remove frequency number
print(f'''Generating {merges_file}''' )
with open(_lowerCamelCase , 'w' , encoding='utf-8' ) as fout:
fout.write(_lowerCamelCase )
# model config
lowerCamelCase__ : Dict = os.path.join(_lowerCamelCase , 'config.json' )
# validate bpe/tokenizer config, as currently it's hardcoded to moses+fastbpe -
# may have to modify the tokenizer if a different type is used by a future model
assert args["bpe"] == "fastbpe", f'''need to extend tokenizer to support bpe={args['bpe']}'''
assert args["tokenizer"] == "moses", f'''need to extend tokenizer to support bpe={args['tokenizer']}'''
lowerCamelCase__ : Optional[int] = {
'architectures': ['FSMTForConditionalGeneration'],
'model_type': 'fsmt',
'activation_dropout': args['activation_dropout'],
'activation_function': 'relu',
'attention_dropout': args['attention_dropout'],
'd_model': args['decoder_embed_dim'],
'dropout': args['dropout'],
'init_std': 0.02,
'max_position_embeddings': args['max_source_positions'],
'num_hidden_layers': args['encoder_layers'],
'src_vocab_size': src_vocab_size,
'tgt_vocab_size': tgt_vocab_size,
'langs': [src_lang, tgt_lang],
'encoder_attention_heads': args['encoder_attention_heads'],
'encoder_ffn_dim': args['encoder_ffn_embed_dim'],
'encoder_layerdrop': args['encoder_layerdrop'],
'encoder_layers': args['encoder_layers'],
'decoder_attention_heads': args['decoder_attention_heads'],
'decoder_ffn_dim': args['decoder_ffn_embed_dim'],
'decoder_layerdrop': args['decoder_layerdrop'],
'decoder_layers': args['decoder_layers'],
'bos_token_id': 0,
'pad_token_id': 1,
'eos_token_id': 2,
'is_encoder_decoder': True,
'scale_embedding': not args['no_scale_embedding'],
'tie_word_embeddings': args['share_all_embeddings'],
}
# good hparam defaults to start with
lowerCamelCase__ : str = 5
lowerCamelCase__ : Tuple = False
if model_dir in best_score_hparams and "length_penalty" in best_score_hparams[model_dir]:
lowerCamelCase__ : List[str] = best_score_hparams[model_dir]['length_penalty']
else:
lowerCamelCase__ : List[Any] = 1.0
print(f'''Generating {fsmt_model_config_file}''' )
with open(_lowerCamelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(_lowerCamelCase , ensure_ascii=_lowerCamelCase , indent=_lowerCamelCase ) )
# tokenizer config
lowerCamelCase__ : Dict = os.path.join(_lowerCamelCase , _lowerCamelCase )
lowerCamelCase__ : int = {
'langs': [src_lang, tgt_lang],
'model_max_length': 1024,
'do_lower_case': do_lower_case,
}
print(f'''Generating {fsmt_tokenizer_config_file}''' )
with open(_lowerCamelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(_lowerCamelCase , ensure_ascii=_lowerCamelCase , indent=_lowerCamelCase ) )
# model
lowerCamelCase__ : List[str] = chkpt['models'][0]
lowerCamelCase__ : Optional[Any] = model.state_dict()
# rename keys to start with 'model.'
lowerCamelCase__ : str = OrderedDict(('model.' + k, v) for k, v in model_state_dict.items() )
# remove unneeded keys
lowerCamelCase__ : int = [
'model.model',
'model.encoder.version',
'model.decoder.version',
'model.encoder_embed_tokens.weight',
'model.decoder_embed_tokens.weight',
'model.encoder.embed_positions._float_tensor',
'model.decoder.embed_positions._float_tensor',
]
for k in ignore_keys:
model_state_dict.pop(_lowerCamelCase , _lowerCamelCase )
lowerCamelCase__ : Any = FSMTConfig.from_pretrained(_lowerCamelCase )
lowerCamelCase__ : Union[str, Any] = FSMTForConditionalGeneration(_lowerCamelCase )
# check that it loads ok
model_new.load_state_dict(_lowerCamelCase , strict=_lowerCamelCase )
# save
lowerCamelCase__ : List[Any] = os.path.join(_lowerCamelCase , _lowerCamelCase )
print(f'''Generating {pytorch_weights_dump_path}''' )
torch.save(_lowerCamelCase , _lowerCamelCase )
print('Conversion is done!' )
print('\nLast step is to upload the files to s3' )
print(f'''cd {data_root}''' )
print(f'''transformers-cli upload {model_dir}''' )
if __name__ == "__main__":
A_ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--fsmt_checkpoint_path",
default=None,
type=str,
required=True,
help=(
"Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,"
" bpecodes, etc."
),
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
A_ : Dict = parser.parse_args()
convert_fsmt_checkpoint_to_pytorch(args.fsmt_checkpoint_path, args.pytorch_dump_folder_path)
| 316
| 0
|
"""simple docstring"""
import argparse
import torch
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_from_original_stable_diffusion_ckpt
if __name__ == "__main__":
a__ : int = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the checkpoint to convert.'''
)
# !wget https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml
parser.add_argument(
'''--original_config_file''',
default=None,
type=str,
help='''The YAML config file corresponding to the original architecture.''',
)
parser.add_argument(
'''--num_in_channels''',
default=None,
type=int,
help='''The number of input channels. If `None` number of input channels will be automatically inferred.''',
)
parser.add_argument(
'''--scheduler_type''',
default='''pndm''',
type=str,
help='''Type of scheduler to use. Should be one of [\'pndm\', \'lms\', \'ddim\', \'euler\', \'euler-ancestral\', \'dpm\']''',
)
parser.add_argument(
'''--pipeline_type''',
default=None,
type=str,
help=(
'''The pipeline type. One of \'FrozenOpenCLIPEmbedder\', \'FrozenCLIPEmbedder\', \'PaintByExample\''''
'''. If `None` pipeline will be automatically inferred.'''
),
)
parser.add_argument(
'''--image_size''',
default=None,
type=int,
help=(
'''The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2'''
''' Base. Use 768 for Stable Diffusion v2.'''
),
)
parser.add_argument(
'''--prediction_type''',
default=None,
type=str,
help=(
'''The prediction type that the model was trained on. Use \'epsilon\' for Stable Diffusion v1.X and Stable'''
''' Diffusion v2 Base. Use \'v_prediction\' for Stable Diffusion v2.'''
),
)
parser.add_argument(
'''--extract_ema''',
action='''store_true''',
help=(
'''Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights'''
''' or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield'''
''' higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.'''
),
)
parser.add_argument(
'''--upcast_attention''',
action='''store_true''',
help=(
'''Whether the attention computation should always be upcasted. This is necessary when running stable'''
''' diffusion 2.1.'''
),
)
parser.add_argument(
'''--from_safetensors''',
action='''store_true''',
help='''If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.''',
)
parser.add_argument(
'''--to_safetensors''',
action='''store_true''',
help='''Whether to store pipeline in safetensors format or not.''',
)
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
parser.add_argument('''--device''', type=str, help='''Device to use (e.g. cpu, cuda:0, cuda:1, etc.)''')
parser.add_argument(
'''--stable_unclip''',
type=str,
default=None,
required=False,
help='''Set if this is a stable unCLIP model. One of \'txt2img\' or \'img2img\'.''',
)
parser.add_argument(
'''--stable_unclip_prior''',
type=str,
default=None,
required=False,
help='''Set if this is a stable unCLIP txt2img model. Selects which prior to use. If `--stable_unclip` is set to `txt2img`, the karlo prior (https://huggingface.co/kakaobrain/karlo-v1-alpha/tree/main/prior) is selected by default.''',
)
parser.add_argument(
'''--clip_stats_path''',
type=str,
help='''Path to the clip stats file. Only required if the stable unclip model\'s config specifies `model.params.noise_aug_config.params.clip_stats_path`.''',
required=False,
)
parser.add_argument(
'''--controlnet''', action='''store_true''', default=None, help='''Set flag if this is a controlnet checkpoint.'''
)
parser.add_argument('''--half''', action='''store_true''', help='''Save weights in half precision.''')
parser.add_argument(
'''--vae_path''',
type=str,
default=None,
required=False,
help='''Set to a path, hub id to an already converted vae to not convert it again.''',
)
a__ : Optional[int] = parser.parse_args()
a__ : Union[str, Any] = download_from_original_stable_diffusion_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
prediction_type=args.prediction_type,
model_type=args.pipeline_type,
extract_ema=args.extract_ema,
scheduler_type=args.scheduler_type,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
stable_unclip=args.stable_unclip,
stable_unclip_prior=args.stable_unclip_prior,
clip_stats_path=args.clip_stats_path,
controlnet=args.controlnet,
vae_path=args.vae_path,
)
if args.half:
pipe.to(torch_dtype=torch.floataa)
if args.controlnet:
# only save the controlnet model
pipe.controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
else:
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 54
|
def A ( lowercase ) -> str:
'''simple docstring'''
return " ".join(
''.join(word[::-1] ) if len(lowercase ) > 4 else word for word in sentence.split() )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(reverse_long_words("Hey wollef sroirraw"))
| 222
| 0
|
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis_float32 (there's also the fix_lavis branch)
# also note: to convert Vicuna checkpoints, we had to include /home/niels/python_projects/checkpoints/FastChat/vicuna-7b in lavis/configs/models/blip2/blip2_instruct_vicuna7b.yaml
# same for Vicuna-13b
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipImageProcessor,
InstructBlipConfig,
InstructBlipForConditionalGeneration,
InstructBlipProcessor,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
LlamaConfig,
LlamaTokenizerFast,
TaConfig,
TaTokenizerFast,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def A ( ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase = 'https://raw.githubusercontent.com/salesforce/LAVIS/main/docs/_static/Confusing-Pictures.jpg'
UpperCamelCase = Image.open(requests.get(lowercase , stream=lowercase ).raw ).convert('RGB' )
return image
def A ( lowercase ) -> List[Any]:
'''simple docstring'''
UpperCamelCase = []
# fmt: off
# vision encoder
rename_keys.append(('visual_encoder.cls_token', 'vision_model.embeddings.class_embedding') )
rename_keys.append(('visual_encoder.pos_embed', 'vision_model.embeddings.position_embedding') )
rename_keys.append(('visual_encoder.patch_embed.proj.weight', 'vision_model.embeddings.patch_embedding.weight') )
rename_keys.append(('visual_encoder.patch_embed.proj.bias', 'vision_model.embeddings.patch_embedding.bias') )
rename_keys.append(('ln_vision.weight', 'vision_model.post_layernorm.weight') )
rename_keys.append(('ln_vision.bias', 'vision_model.post_layernorm.bias') )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((f'''visual_encoder.blocks.{i}.norm1.weight''', f'''vision_model.encoder.layers.{i}.layer_norm1.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.norm1.bias''', f'''vision_model.encoder.layers.{i}.layer_norm1.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.norm2.weight''', f'''vision_model.encoder.layers.{i}.layer_norm2.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.norm2.bias''', f'''vision_model.encoder.layers.{i}.layer_norm2.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.attn.qkv.weight''', f'''vision_model.encoder.layers.{i}.self_attn.qkv.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.attn.proj.weight''', f'''vision_model.encoder.layers.{i}.self_attn.projection.weight''',) )
rename_keys.append((f'''visual_encoder.blocks.{i}.attn.proj.bias''', f'''vision_model.encoder.layers.{i}.self_attn.projection.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc1.weight''', f'''vision_model.encoder.layers.{i}.mlp.fc1.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc1.bias''', f'''vision_model.encoder.layers.{i}.mlp.fc1.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc2.weight''', f'''vision_model.encoder.layers.{i}.mlp.fc2.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc2.bias''', f'''vision_model.encoder.layers.{i}.mlp.fc2.bias''') )
# QFormer
rename_keys.append(('Qformer.bert.embeddings.LayerNorm.weight', 'qformer.embeddings.layernorm.weight') )
rename_keys.append(('Qformer.bert.embeddings.LayerNorm.bias', 'qformer.embeddings.layernorm.bias') )
# fmt: on
return rename_keys
def A ( lowercase , lowercase , lowercase ) -> Tuple:
'''simple docstring'''
UpperCamelCase = dct.pop(lowercase )
UpperCamelCase = val
def A ( lowercase , lowercase ) -> Tuple:
'''simple docstring'''
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
UpperCamelCase = state_dict.pop(f'''visual_encoder.blocks.{i}.attn.q_bias''' )
UpperCamelCase = state_dict.pop(f'''visual_encoder.blocks.{i}.attn.v_bias''' )
# next, set bias in the state dict
UpperCamelCase = torch.cat((q_bias, torch.zeros_like(lowercase , requires_grad=lowercase ), v_bias) )
UpperCamelCase = qkv_bias
def A ( lowercase ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase = 364 if 'coco' in model_name else 224
UpperCamelCase = InstructBlipVisionConfig(image_size=lowercase ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "t5-xl" in model_name:
UpperCamelCase = TaConfig.from_pretrained('google/flan-t5-xl' , dense_act_fn='gelu' , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
UpperCamelCase = TaConfig.from_pretrained('google/flan-t5-xxl' , dense_act_fn='gelu' , bos_token_id=1 ).to_dict()
elif "vicuna-7b" in model_name:
UpperCamelCase = LlamaConfig.from_pretrained('decapoda-research/llama-7b-hf' , vocab_size=32_001 ).to_dict()
elif "vicuna-13b" in model_name:
UpperCamelCase = LlamaConfig.from_pretrained('decapoda-research/llama-13b-hf' , vocab_size=32_001 ).to_dict()
else:
raise ValueError('Model name not supported' )
# the authors add one special "[DEC]" token to the vocab of Q-Former, hence vocab size = 30522 + 1
UpperCamelCase = InstructBlipQFormerConfig(vocab_size=30_523 ).to_dict()
UpperCamelCase = InstructBlipConfig(vision_config=lowercase , text_config=lowercase , qformer_config=lowercase )
return config, image_size
@torch.no_grad()
def A ( lowercase , lowercase=None , lowercase=False ) -> Any:
'''simple docstring'''
UpperCamelCase = AutoTokenizer.from_pretrained('bert-base-uncased' , truncation_side='left' )
qformer_tokenizer.add_special_tokens({'bos_token': '[DEC]'} )
if "t5" in model_name:
UpperCamelCase = TaTokenizerFast.from_pretrained('google/flan-t5-xl' , truncation_side='left' )
elif "vicuna" in model_name:
# the following was used in the original implementation:
# tokenizer = LlamaTokenizer.from_pretrained("huggyllama/llama-7b", use_fast=False, truncation_side="left")
# tokenizer.add_special_tokens({"pad_token": "[PAD]"})
# tokenizer.add_special_tokens({"bos_token": "</s>"})
# tokenizer.add_special_tokens({"eos_token": "</s>"})
# tokenizer.add_special_tokens({"unk_token": "</s>"})
UpperCamelCase = LlamaTokenizerFast.from_pretrained(
'huggyllama/llama-7b' , truncation_side='left' , bos_token='</s>' , unk_token='</s>' )
tokenizer.add_special_tokens({'pad_token': '[PAD]'} )
UpperCamelCase , UpperCamelCase = get_blipa_config(lowercase )
UpperCamelCase = InstructBlipForConditionalGeneration(lowercase ).eval()
UpperCamelCase = {
'instructblip-vicuna-7b': ('blip2_vicuna_instruct', 'vicuna7b'),
'instructblip-vicuna-13b': ('blip2_vicuna_instruct', 'vicuna13b'),
'instructblip-flan-t5-xl': ('blip2_t5_instruct', 'flant5xl'),
'instructblip-flan-t5-xxl': ('blip2_t5_instruct', 'flant5xxl'),
}
UpperCamelCase , UpperCamelCase = model_name_to_original[model_name]
# load original model
print('Loading original model...' )
UpperCamelCase = 'cuda:1' if torch.cuda.is_available() else 'cpu'
UpperCamelCase = 'cuda:2' if torch.cuda.is_available() else 'cpu'
UpperCamelCase , UpperCamelCase , UpperCamelCase = load_model_and_preprocess(
name=lowercase , model_type=lowercase , is_eval=lowercase , device=lowercase )
original_model.eval()
print('Done!' )
# update state dict keys
UpperCamelCase = original_model.state_dict()
UpperCamelCase = create_rename_keys(lowercase )
for src, dest in rename_keys:
rename_key(lowercase , lowercase , lowercase )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
UpperCamelCase = state_dict.pop(lowercase )
if key.startswith('Qformer.bert' ):
UpperCamelCase = key.replace('Qformer.bert' , 'qformer' )
if "attention.self" in key:
UpperCamelCase = key.replace('self' , 'attention' )
if "llm_proj" in key:
UpperCamelCase = key.replace('llm_proj' , 'language_projection' )
if "t5_proj" in key:
UpperCamelCase = key.replace('t5_proj' , 'language_projection' )
if key.startswith('llm_model' ):
UpperCamelCase = key.replace('llm_model' , 'language_model' )
if key.startswith('t5' ):
UpperCamelCase = key.replace('t5' , 'language' )
UpperCamelCase = val
# read in qv biases
read_in_q_v_bias(lowercase , lowercase )
# note: weights get loaded in torch.float32 by default
hf_model.load_state_dict(lowercase , strict=lowercase )
UpperCamelCase = load_demo_image()
UpperCamelCase = 'What is unusual about this image?'
# create processor
UpperCamelCase = BlipImageProcessor(
size={'height': image_size, 'width': image_size} , image_mean=lowercase , image_std=lowercase )
UpperCamelCase = InstructBlipProcessor(
image_processor=lowercase , tokenizer=lowercase , qformer_tokenizer=lowercase , )
UpperCamelCase = processor(images=lowercase , text=lowercase , return_tensors='pt' ).to(lowercase )
# make sure processor creates exact same pixel values
UpperCamelCase = vis_processors['eval'](lowercase ).unsqueeze(0 ).to(lowercase )
UpperCamelCase = inputs.pixel_values
assert torch.allclose(original_pixel_values.to(pixel_values.device ) , lowercase )
original_model.to(lowercase )
hf_model.to(lowercase )
with torch.no_grad():
if "vicuna" in model_name:
UpperCamelCase = original_model({'image': original_pixel_values, 'text_input': [prompt]} ).logits
UpperCamelCase = hf_model(**lowercase ).logits
else:
UpperCamelCase = original_model(
{'image': original_pixel_values, 'text_input': [prompt], 'text_output': ['\n']} ).logits
UpperCamelCase = tokenizer('\n' , return_tensors='pt' ).input_ids.to(lowercase )
UpperCamelCase = label_input_ids.masked_fill(label_input_ids == tokenizer.pad_token_id , -100 )
UpperCamelCase = hf_model(**lowercase , labels=lowercase ).logits
print('First values of original logits:' , original_logits[0, :3, :3] )
print('First values of HF logits:' , logits[0, :3, :3] )
# assert values
assert original_logits.shape == logits.shape
UpperCamelCase = 1e-4 if 'vicuna' in model_name else 1e-5
assert torch.allclose(original_logits.to(logits.device ) , lowercase , atol=lowercase )
print('Looks ok!' )
print('Generating with original model...' )
UpperCamelCase = original_model.generate({'image': original_pixel_values, 'prompt': prompt} , num_beams=5 )
# important: we need to cast the weights of the HF model to the appropriate type
print('Generating with HF model...' )
UpperCamelCase = hf_model.generate(
**lowercase , do_sample=lowercase , num_beams=5 , max_length=256 , min_length=1 , top_p=0.9 , repetition_penalty=1.5 , length_penalty=1.0 , temperature=1 , )
if "vicuna" in model_name:
# convert output id 0 to 2 (eos_token_id)
# TODO add this in the generate method?
UpperCamelCase = 2
print('Original generation:' , lowercase )
UpperCamelCase = processor.batch_decode(lowercase , skip_special_tokens=lowercase )
UpperCamelCase = [text.strip() for text in output_text]
print('HF generation:' , lowercase )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(lowercase )
hf_model.save_pretrained(lowercase )
if push_to_hub:
processor.push_to_hub(f'''Salesforce/{model_name}''' )
hf_model.push_to_hub(f'''Salesforce/{model_name}''' )
if __name__ == "__main__":
_UpperCAmelCase : List[Any] = argparse.ArgumentParser()
_UpperCAmelCase : Optional[int] = [
"instructblip-vicuna-7b",
"instructblip-vicuna-13b",
"instructblip-flan-t5-xl",
"instructblip-flan-t5-xxl",
]
parser.add_argument(
"--model_name",
default="instructblip-flan-t5-xl",
choices=choices,
type=str,
help="Path to hf config.json of model to convert",
)
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether to push the model and processor to the hub after converting",
)
_UpperCAmelCase : List[Any] = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 110
|
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class lowercase ( _SCREAMING_SNAKE_CASE ):
def __UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(A_ , 'tf_padding' ) )
self.parent.assertTrue(hasattr(A_ , 'depth_multiplier' ) )
class lowercase :
def __init__( self , A_ , A_=13 , A_=3 , A_=32 , A_=0.25 , A_=8 , A_=8 , A_=6 , A_=32 , A_=True , A_=True , A_=True , A_="relu6" , A_=1_280 , A_=0.1 , A_=0.02 , A_=True , A_=True , A_=10 , A_=None , ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = num_channels
UpperCamelCase = image_size
UpperCamelCase = depth_multiplier
UpperCamelCase = depth_divisible_by
UpperCamelCase = min_depth
UpperCamelCase = expand_ratio
UpperCamelCase = tf_padding
UpperCamelCase = output_stride
UpperCamelCase = first_layer_is_expansion
UpperCamelCase = finegrained_output
UpperCamelCase = hidden_act
UpperCamelCase = last_hidden_size if finegrained_output else int(last_hidden_size * depth_multiplier )
UpperCamelCase = classifier_dropout_prob
UpperCamelCase = use_labels
UpperCamelCase = is_training
UpperCamelCase = num_labels
UpperCamelCase = initializer_range
UpperCamelCase = scope
def __UpperCamelCase ( self ) -> int:
"""simple docstring"""
UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase = None
UpperCamelCase = None
if self.use_labels:
UpperCamelCase = ids_tensor([self.batch_size] , self.num_labels )
UpperCamelCase = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
UpperCamelCase = self.get_config()
return config, pixel_values, labels, pixel_labels
def __UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
return MobileNetVaConfig(
num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , depth_divisible_by=self.depth_divisible_by , min_depth=self.min_depth , expand_ratio=self.expand_ratio , output_stride=self.output_stride , first_layer_is_expansion=self.first_layer_is_expansion , finegrained_output=self.finegrained_output , hidden_act=self.hidden_act , tf_padding=self.tf_padding , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def __UpperCamelCase ( self , A_ , A_ , A_ , A_ ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = MobileNetVaModel(config=A_ )
model.to(A_ )
model.eval()
UpperCamelCase = model(A_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
self.parent.assertEqual(
result.pooler_output.shape , (self.batch_size, self.last_hidden_size) , )
def __UpperCamelCase ( self , A_ , A_ , A_ , A_ ) -> int:
"""simple docstring"""
UpperCamelCase = self.num_labels
UpperCamelCase = MobileNetVaForImageClassification(A_ )
model.to(A_ )
model.eval()
UpperCamelCase = model(A_ , labels=A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCamelCase ( self , A_ , A_ , A_ , A_ ) -> List[str]:
"""simple docstring"""
UpperCamelCase = self.num_labels
UpperCamelCase = MobileNetVaForSemanticSegmentation(A_ )
model.to(A_ )
model.eval()
UpperCamelCase = model(A_ )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
UpperCamelCase = model(A_ , labels=A_ )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def __UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
UpperCamelCase = self.prepare_config_and_inputs()
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = config_and_inputs
UpperCamelCase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , unittest.TestCase ):
__lowercase : Optional[Any] = (
(MobileNetVaModel, MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation)
if is_torch_available()
else ()
)
__lowercase : Optional[int] = (
{
"feature-extraction": MobileNetVaModel,
"image-classification": MobileNetVaForImageClassification,
"image-segmentation": MobileNetVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__lowercase : Optional[int] = False
__lowercase : List[str] = False
__lowercase : List[str] = False
__lowercase : Dict = False
def __UpperCamelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = MobileNetVaModelTester(self )
UpperCamelCase = MobileNetVaConfigTester(self , config_class=A_ , has_text_modality=A_ )
def __UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='MobileNetV2 does not use inputs_embeds' )
def __UpperCamelCase ( self ) -> List[Any]:
"""simple docstring"""
pass
@unittest.skip(reason='MobileNetV2 does not support input and output embeddings' )
def __UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
pass
@unittest.skip(reason='MobileNetV2 does not output attentions' )
def __UpperCamelCase ( self ) -> List[Any]:
"""simple docstring"""
pass
def __UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase = model_class(A_ )
UpperCamelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase = [*signature.parameters.keys()]
UpperCamelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] , A_ )
def __UpperCamelCase ( self ) -> Any:
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_ )
def __UpperCamelCase ( self ) -> int:
"""simple docstring"""
def check_hidden_states_output(A_ , A_ , A_ ):
UpperCamelCase = model_class(A_ )
model.to(A_ )
model.eval()
with torch.no_grad():
UpperCamelCase = model(**self._prepare_for_class(A_ , A_ ) )
UpperCamelCase = outputs.hidden_states
UpperCamelCase = 16
self.assertEqual(len(A_ ) , A_ )
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase = True
check_hidden_states_output(A_ , A_ , A_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCamelCase = True
check_hidden_states_output(A_ , A_ , A_ )
def __UpperCamelCase ( self ) -> int:
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A_ )
def __UpperCamelCase ( self ) -> str:
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*A_ )
@slow
def __UpperCamelCase ( self ) -> List[Any]:
"""simple docstring"""
for model_name in MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase = MobileNetVaModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
def A ( ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class lowercase ( unittest.TestCase ):
@cached_property
def __UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
return (
MobileNetVaImageProcessor.from_pretrained('google/mobilenet_v2_1.0_224' ) if is_vision_available() else None
)
@slow
def __UpperCamelCase ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = MobileNetVaForImageClassification.from_pretrained('google/mobilenet_v2_1.0_224' ).to(A_ )
UpperCamelCase = self.default_image_processor
UpperCamelCase = prepare_img()
UpperCamelCase = image_processor(images=A_ , return_tensors='pt' ).to(A_ )
# forward pass
with torch.no_grad():
UpperCamelCase = model(**A_ )
# verify the logits
UpperCamelCase = torch.Size((1, 1_001) )
self.assertEqual(outputs.logits.shape , A_ )
UpperCamelCase = torch.tensor([0.2445, -1.1993, 0.1905] ).to(A_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , A_ , atol=1e-4 ) )
@slow
def __UpperCamelCase ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = MobileNetVaForSemanticSegmentation.from_pretrained('google/deeplabv3_mobilenet_v2_1.0_513' )
UpperCamelCase = model.to(A_ )
UpperCamelCase = MobileNetVaImageProcessor.from_pretrained('google/deeplabv3_mobilenet_v2_1.0_513' )
UpperCamelCase = prepare_img()
UpperCamelCase = image_processor(images=A_ , return_tensors='pt' ).to(A_ )
# forward pass
with torch.no_grad():
UpperCamelCase = model(**A_ )
UpperCamelCase = outputs.logits
# verify the logits
UpperCamelCase = torch.Size((1, 21, 65, 65) )
self.assertEqual(logits.shape , A_ )
UpperCamelCase = torch.tensor(
[
[[17.5790, 17.7581, 18.3355], [18.3257, 18.4230, 18.8973], [18.6169, 18.8650, 19.2187]],
[[-2.1595, -2.0977, -2.3741], [-2.4226, -2.3028, -2.6835], [-2.7819, -2.5991, -2.7706]],
[[4.2058, 4.8317, 4.7638], [4.4136, 5.0361, 4.9383], [4.5028, 4.9644, 4.8734]],
] , device=A_ , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , A_ , atol=1e-4 ) )
| 110
| 1
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = {
'''funnel-transformer/small''': '''https://huggingface.co/funnel-transformer/small/resolve/main/config.json''',
'''funnel-transformer/small-base''': '''https://huggingface.co/funnel-transformer/small-base/resolve/main/config.json''',
'''funnel-transformer/medium''': '''https://huggingface.co/funnel-transformer/medium/resolve/main/config.json''',
'''funnel-transformer/medium-base''': '''https://huggingface.co/funnel-transformer/medium-base/resolve/main/config.json''',
'''funnel-transformer/intermediate''': (
'''https://huggingface.co/funnel-transformer/intermediate/resolve/main/config.json'''
),
'''funnel-transformer/intermediate-base''': (
'''https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/config.json'''
),
'''funnel-transformer/large''': '''https://huggingface.co/funnel-transformer/large/resolve/main/config.json''',
'''funnel-transformer/large-base''': '''https://huggingface.co/funnel-transformer/large-base/resolve/main/config.json''',
'''funnel-transformer/xlarge''': '''https://huggingface.co/funnel-transformer/xlarge/resolve/main/config.json''',
'''funnel-transformer/xlarge-base''': '''https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/config.json''',
}
class UpperCamelCase ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ = "funnel"
SCREAMING_SNAKE_CASE_ = {
"hidden_size": "d_model",
"num_attention_heads": "n_head",
}
def __init__( self, lowerCAmelCase__=3_0522, lowerCAmelCase__=[4, 4, 4], lowerCAmelCase__=None, lowerCAmelCase__=2, lowerCAmelCase__=768, lowerCAmelCase__=12, lowerCAmelCase__=64, lowerCAmelCase__=3072, lowerCAmelCase__="gelu_new", lowerCAmelCase__=0.1, lowerCAmelCase__=0.1, lowerCAmelCase__=0.0, lowerCAmelCase__=0.1, lowerCAmelCase__=None, lowerCAmelCase__=1e-9, lowerCAmelCase__="mean", lowerCAmelCase__="relative_shift", lowerCAmelCase__=True, lowerCAmelCase__=True, lowerCAmelCase__=True, **lowerCAmelCase__, ) -> Union[str, Any]:
snake_case_ = vocab_size
snake_case_ = block_sizes
snake_case_ = [1] * len(lowerCAmelCase__) if block_repeats is None else block_repeats
assert len(lowerCAmelCase__) == len(
self.block_repeats), "`block_sizes` and `block_repeats` should have the same length."
snake_case_ = num_decoder_layers
snake_case_ = d_model
snake_case_ = n_head
snake_case_ = d_head
snake_case_ = d_inner
snake_case_ = hidden_act
snake_case_ = hidden_dropout
snake_case_ = attention_dropout
snake_case_ = activation_dropout
snake_case_ = initializer_range
snake_case_ = initializer_std
snake_case_ = layer_norm_eps
assert pooling_type in [
"mean",
"max",
], f'Got {pooling_type} for `pooling_type` but only \'mean\' and \'max\' are supported.'
snake_case_ = pooling_type
assert attention_type in [
"relative_shift",
"factorized",
], f'Got {attention_type} for `attention_type` but only \'relative_shift\' and \'factorized\' are supported.'
snake_case_ = attention_type
snake_case_ = separate_cls
snake_case_ = truncate_seq
snake_case_ = pool_q_only
super().__init__(**lowerCAmelCase__)
@property
def a_ ( self) -> Optional[Any]:
return sum(self.block_sizes)
@num_hidden_layers.setter
def a_ ( self, lowerCAmelCase__) -> Tuple:
raise NotImplementedError(
'This model does not support the setting of `num_hidden_layers`. Please set `block_sizes`.')
@property
def a_ ( self) -> Optional[int]:
return len(self.block_sizes)
@num_blocks.setter
def a_ ( self, lowerCAmelCase__) -> List[Any]:
raise NotImplementedError('This model does not support the setting of `num_blocks`. Please set `block_sizes`.')
| 69
|
"""simple docstring"""
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModel, PreTrainedModel
from ...utils import logging
__UpperCamelCase = logging.get_logger(__name__)
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> int:
snake_case_ = nn.functional.normalize(UpperCAmelCase )
snake_case_ = nn.functional.normalize(UpperCAmelCase )
return torch.mm(UpperCAmelCase , normalized_text_embeds.t() )
class UpperCamelCase ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ = CLIPConfig
SCREAMING_SNAKE_CASE_ = ["CLIPEncoderLayer"]
def __init__( self, lowerCAmelCase__) -> Optional[int]:
super().__init__(lowerCAmelCase__)
snake_case_ = CLIPVisionModel(config.vision_config)
snake_case_ = nn.Linear(config.vision_config.hidden_size, config.projection_dim, bias=lowerCAmelCase__)
snake_case_ = nn.Parameter(torch.ones(17, config.projection_dim), requires_grad=lowerCAmelCase__)
snake_case_ = nn.Parameter(torch.ones(3, config.projection_dim), requires_grad=lowerCAmelCase__)
snake_case_ = nn.Parameter(torch.ones(17), requires_grad=lowerCAmelCase__)
snake_case_ = nn.Parameter(torch.ones(3), requires_grad=lowerCAmelCase__)
@torch.no_grad()
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__) -> Tuple:
snake_case_ = self.vision_model(lowerCAmelCase__)[1] # pooled_output
snake_case_ = self.visual_projection(lowerCAmelCase__)
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
snake_case_ = cosine_distance(lowerCAmelCase__, self.special_care_embeds).cpu().float().numpy()
snake_case_ = cosine_distance(lowerCAmelCase__, self.concept_embeds).cpu().float().numpy()
snake_case_ = []
snake_case_ = image_embeds.shape[0]
for i in range(lowerCAmelCase__):
snake_case_ = {'special_scores': {}, 'special_care': [], 'concept_scores': {}, 'bad_concepts': []}
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign images
snake_case_ = 0.0
for concept_idx in range(len(special_cos_dist[0])):
snake_case_ = special_cos_dist[i][concept_idx]
snake_case_ = self.special_care_embeds_weights[concept_idx].item()
snake_case_ = round(concept_cos - concept_threshold + adjustment, 3)
if result_img["special_scores"][concept_idx] > 0:
result_img["special_care"].append({concept_idx, result_img['special_scores'][concept_idx]})
snake_case_ = 0.01
for concept_idx in range(len(cos_dist[0])):
snake_case_ = cos_dist[i][concept_idx]
snake_case_ = self.concept_embeds_weights[concept_idx].item()
snake_case_ = round(concept_cos - concept_threshold + adjustment, 3)
if result_img["concept_scores"][concept_idx] > 0:
result_img["bad_concepts"].append(lowerCAmelCase__)
result.append(lowerCAmelCase__)
snake_case_ = [len(res['bad_concepts']) > 0 for res in result]
return images, has_nsfw_concepts
@torch.no_grad()
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__) -> Optional[int]:
snake_case_ = self.vision_model(lowerCAmelCase__)[1] # pooled_output
snake_case_ = self.visual_projection(lowerCAmelCase__)
snake_case_ = cosine_distance(lowerCAmelCase__, self.special_care_embeds)
snake_case_ = cosine_distance(lowerCAmelCase__, self.concept_embeds)
# increase this value to create a stronger `nsfw` filter
# at the cost of increasing the possibility of filtering benign images
snake_case_ = 0.0
snake_case_ = special_cos_dist - self.special_care_embeds_weights + adjustment
# special_scores = special_scores.round(decimals=3)
snake_case_ = torch.any(special_scores > 0, dim=1)
snake_case_ = special_care * 0.01
snake_case_ = special_adjustment.unsqueeze(1).expand(-1, cos_dist.shape[1])
snake_case_ = (cos_dist - self.concept_embeds_weights) + special_adjustment
# concept_scores = concept_scores.round(decimals=3)
snake_case_ = torch.any(concept_scores > 0, dim=1)
return images, has_nsfw_concepts
| 69
| 1
|
'''simple docstring'''
import webbrowser
from sys import argv
from urllib.parse import parse_qs, quote
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
_a : List[str] = """%20""".join(argv[1:]) if len(argv) > 1 else quote(str(input("""Search: """)))
print("""Googling.....""")
_a : str = f'https://www.google.com/search?q={query}&num=100'
_a : List[str] = requests.get(
url,
headers={"""User-Agent""": str(UserAgent().random)},
)
try:
_a : List[Any] = (
BeautifulSoup(res.text, """html.parser""")
.find("""div""", attrs={"""class""": """yuRUbf"""})
.find("""a""")
.get("""href""")
)
except AttributeError:
_a : int = parse_qs(
BeautifulSoup(res.text, """html.parser""")
.find("""div""", attrs={"""class""": """kCrYT"""})
.find("""a""")
.get("""href""")
)["""url"""][0]
webbrowser.open(link)
| 46
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
_a : Union[str, Any] = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : str = ["""BartphoTokenizer"""]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bartpho import BartphoTokenizer
else:
import sys
_a : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 46
| 1
|
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
if collection == []:
return []
# get some information about the collection
lowercase__ : Any = len(lowerCamelCase__ )
lowercase__ : str = max(lowerCamelCase__ )
lowercase__ : List[str] = min(lowerCamelCase__ )
# create the counting array
lowercase__ : str = coll_max + 1 - coll_min
lowercase__ : Optional[int] = [0] * counting_arr_length
# count how much a number appears in the collection
for number in collection:
counting_arr[number - coll_min] += 1
# sum each position with it's predecessors. now, counting_arr[i] tells
# us how many elements <= i has in the collection
for i in range(1 , lowerCamelCase__ ):
lowercase__ : Optional[int] = counting_arr[i] + counting_arr[i - 1]
# create the output collection
lowercase__ : Optional[int] = [0] * coll_len
# place the elements in the output, respecting the original order (stable
# sort) from end to begin, updating counting_arr
for i in reversed(range(0 , lowerCamelCase__ ) ):
lowercase__ : Union[str, Any] = collection[i]
counting_arr[collection[i] - coll_min] -= 1
return ordered
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
return "".join([chr(lowerCamelCase__ ) for i in counting_sort([ord(lowerCamelCase__ ) for c in string] )] )
if __name__ == "__main__":
# Test string sort
assert counting_sort_string('''thisisthestring''') == "eghhiiinrsssttt"
lowerCAmelCase__ = input('''Enter numbers separated by a comma:\n''').strip()
lowerCAmelCase__ = [int(item) for item in user_input.split(''',''')]
print(counting_sort(unsorted))
| 130
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCAmelCase__ = {
'''configuration_biogpt''': ['''BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BioGptConfig'''],
'''tokenization_biogpt''': ['''BioGptTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BioGptForCausalLM''',
'''BioGptForTokenClassification''',
'''BioGptForSequenceClassification''',
'''BioGptModel''',
'''BioGptPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig
from .tokenization_biogpt import BioGptTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_biogpt import (
BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST,
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 130
| 1
|
import torch
from transformers import PreTrainedModel, XLMRobertaConfig, XLMRobertaModel
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : Tuple = "M-CLIP"
def __init__( self , A_=1024 , A_=768 , **A_ ) -> Any:
__UpperCamelCase =transformerDimSize
__UpperCamelCase =imageDimSize
super().__init__(**A_ )
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : Tuple = MCLIPConfig
def __init__( self , A_ , *A_ , **A_ ) -> Tuple:
super().__init__(A_ , *A_ , **A_ )
__UpperCamelCase =XLMRobertaModel(A_ )
__UpperCamelCase =torch.nn.Linear(
in_features=config.transformerDimensions , out_features=config.numDims )
def _a ( self , A_ , A_ ) -> Optional[int]:
__UpperCamelCase =self.transformer(input_ids=A_ , attention_mask=A_ )[0]
__UpperCamelCase =(embs * attention_mask.unsqueeze(2 )).sum(dim=1 ) / attention_mask.sum(dim=1 )[:, None]
return self.LinearTransformation(A_ ), embs
| 368
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bert import BertTokenizer
_A = logging.get_logger(__name__)
_A = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
_A = {
'vocab_file': {
'bert-base-uncased': 'https://huggingface.co/bert-base-uncased/resolve/main/vocab.txt',
'bert-large-uncased': 'https://huggingface.co/bert-large-uncased/resolve/main/vocab.txt',
'bert-base-cased': 'https://huggingface.co/bert-base-cased/resolve/main/vocab.txt',
'bert-large-cased': 'https://huggingface.co/bert-large-cased/resolve/main/vocab.txt',
'bert-base-multilingual-uncased': (
'https://huggingface.co/bert-base-multilingual-uncased/resolve/main/vocab.txt'
),
'bert-base-multilingual-cased': 'https://huggingface.co/bert-base-multilingual-cased/resolve/main/vocab.txt',
'bert-base-chinese': 'https://huggingface.co/bert-base-chinese/resolve/main/vocab.txt',
'bert-base-german-cased': 'https://huggingface.co/bert-base-german-cased/resolve/main/vocab.txt',
'bert-large-uncased-whole-word-masking': (
'https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/vocab.txt'
),
'bert-large-cased-whole-word-masking': (
'https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/vocab.txt'
),
'bert-large-uncased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt'
),
'bert-large-cased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt'
),
'bert-base-cased-finetuned-mrpc': (
'https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/vocab.txt'
),
'bert-base-german-dbmdz-cased': 'https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/vocab.txt',
'bert-base-german-dbmdz-uncased': (
'https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/vocab.txt'
),
'TurkuNLP/bert-base-finnish-cased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/vocab.txt'
),
'TurkuNLP/bert-base-finnish-uncased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/vocab.txt'
),
'wietsedv/bert-base-dutch-cased': (
'https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'bert-base-uncased': 'https://huggingface.co/bert-base-uncased/resolve/main/tokenizer.json',
'bert-large-uncased': 'https://huggingface.co/bert-large-uncased/resolve/main/tokenizer.json',
'bert-base-cased': 'https://huggingface.co/bert-base-cased/resolve/main/tokenizer.json',
'bert-large-cased': 'https://huggingface.co/bert-large-cased/resolve/main/tokenizer.json',
'bert-base-multilingual-uncased': (
'https://huggingface.co/bert-base-multilingual-uncased/resolve/main/tokenizer.json'
),
'bert-base-multilingual-cased': (
'https://huggingface.co/bert-base-multilingual-cased/resolve/main/tokenizer.json'
),
'bert-base-chinese': 'https://huggingface.co/bert-base-chinese/resolve/main/tokenizer.json',
'bert-base-german-cased': 'https://huggingface.co/bert-base-german-cased/resolve/main/tokenizer.json',
'bert-large-uncased-whole-word-masking': (
'https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/tokenizer.json'
),
'bert-large-cased-whole-word-masking': (
'https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/tokenizer.json'
),
'bert-large-uncased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json'
),
'bert-large-cased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json'
),
'bert-base-cased-finetuned-mrpc': (
'https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/tokenizer.json'
),
'bert-base-german-dbmdz-cased': (
'https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/tokenizer.json'
),
'bert-base-german-dbmdz-uncased': (
'https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/tokenizer.json'
),
'TurkuNLP/bert-base-finnish-cased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/tokenizer.json'
),
'TurkuNLP/bert-base-finnish-uncased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/tokenizer.json'
),
'wietsedv/bert-base-dutch-cased': (
'https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/tokenizer.json'
),
},
}
_A = {
'bert-base-uncased': 512,
'bert-large-uncased': 512,
'bert-base-cased': 512,
'bert-large-cased': 512,
'bert-base-multilingual-uncased': 512,
'bert-base-multilingual-cased': 512,
'bert-base-chinese': 512,
'bert-base-german-cased': 512,
'bert-large-uncased-whole-word-masking': 512,
'bert-large-cased-whole-word-masking': 512,
'bert-large-uncased-whole-word-masking-finetuned-squad': 512,
'bert-large-cased-whole-word-masking-finetuned-squad': 512,
'bert-base-cased-finetuned-mrpc': 512,
'bert-base-german-dbmdz-cased': 512,
'bert-base-german-dbmdz-uncased': 512,
'TurkuNLP/bert-base-finnish-cased-v1': 512,
'TurkuNLP/bert-base-finnish-uncased-v1': 512,
'wietsedv/bert-base-dutch-cased': 512,
}
_A = {
'bert-base-uncased': {'do_lower_case': True},
'bert-large-uncased': {'do_lower_case': True},
'bert-base-cased': {'do_lower_case': False},
'bert-large-cased': {'do_lower_case': False},
'bert-base-multilingual-uncased': {'do_lower_case': True},
'bert-base-multilingual-cased': {'do_lower_case': False},
'bert-base-chinese': {'do_lower_case': False},
'bert-base-german-cased': {'do_lower_case': False},
'bert-large-uncased-whole-word-masking': {'do_lower_case': True},
'bert-large-cased-whole-word-masking': {'do_lower_case': False},
'bert-large-uncased-whole-word-masking-finetuned-squad': {'do_lower_case': True},
'bert-large-cased-whole-word-masking-finetuned-squad': {'do_lower_case': False},
'bert-base-cased-finetuned-mrpc': {'do_lower_case': False},
'bert-base-german-dbmdz-cased': {'do_lower_case': False},
'bert-base-german-dbmdz-uncased': {'do_lower_case': True},
'TurkuNLP/bert-base-finnish-cased-v1': {'do_lower_case': False},
'TurkuNLP/bert-base-finnish-uncased-v1': {'do_lower_case': True},
'wietsedv/bert-base-dutch-cased': {'do_lower_case': False},
}
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : Any = VOCAB_FILES_NAMES
UpperCAmelCase__ : List[str] = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ : Optional[int] = PRETRAINED_INIT_CONFIGURATION
UpperCAmelCase__ : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ : Dict = BertTokenizer
def __init__( self , A_=None , A_=None , A_=True , A_="[UNK]" , A_="[SEP]" , A_="[PAD]" , A_="[CLS]" , A_="[MASK]" , A_=True , A_=None , **A_ , ) -> Any:
super().__init__(
A_ , tokenizer_file=A_ , do_lower_case=A_ , unk_token=A_ , sep_token=A_ , pad_token=A_ , cls_token=A_ , mask_token=A_ , tokenize_chinese_chars=A_ , strip_accents=A_ , **A_ , )
__UpperCamelCase =json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , A_ ) != do_lower_case
or normalizer_state.get('strip_accents' , A_ ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , A_ ) != tokenize_chinese_chars
):
__UpperCamelCase =getattr(A_ , normalizer_state.pop('type' ) )
__UpperCamelCase =do_lower_case
__UpperCamelCase =strip_accents
__UpperCamelCase =tokenize_chinese_chars
__UpperCamelCase =normalizer_class(**A_ )
__UpperCamelCase =do_lower_case
def _a ( self , A_ , A_=None ) -> List[str]:
__UpperCamelCase =[self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _a ( self , A_ , A_ = None ) -> List[int]:
__UpperCamelCase =[self.sep_token_id]
__UpperCamelCase =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _a ( self , A_ , A_ = None ) -> Tuple[str]:
__UpperCamelCase =self._tokenizer.model.save(A_ , name=A_ )
return tuple(A_ )
| 117
| 0
|
"""simple docstring"""
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import is_accelerate_available, is_torch_available, is_transformers_available, is_xformers_available
from . import BaseDiffusersCLICommand
def __A (_SCREAMING_SNAKE_CASE ) ->Optional[int]:
"""simple docstring"""
return EnvironmentCommand()
class _lowerCAmelCase ( a ):
"""simple docstring"""
@staticmethod
def snake_case ( __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :Tuple = parser.add_parser('env' )
download_parser.set_defaults(func=__UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Tuple = huggingface_hub.__version__
lowerCAmelCase__ :Any = 'not installed'
lowerCAmelCase__ :Optional[int] = 'NA'
if is_torch_available():
import torch
lowerCAmelCase__ :List[str] = torch.__version__
lowerCAmelCase__ :Union[str, Any] = torch.cuda.is_available()
lowerCAmelCase__ :Optional[int] = 'not installed'
if is_transformers_available():
import transformers
lowerCAmelCase__ :str = transformers.__version__
lowerCAmelCase__ :str = 'not installed'
if is_accelerate_available():
import accelerate
lowerCAmelCase__ :Optional[int] = accelerate.__version__
lowerCAmelCase__ :Dict = 'not installed'
if is_xformers_available():
import xformers
lowerCAmelCase__ :Any = xformers.__version__
lowerCAmelCase__ :Union[str, Any] = {
'`diffusers` version': version,
'Platform': platform.platform(),
'Python version': platform.python_version(),
'PyTorch version (GPU?)': F"{pt_version} ({pt_cuda_available})",
'Huggingface_hub version': hub_version,
'Transformers version': transformers_version,
'Accelerate version': accelerate_version,
'xFormers version': xformers_version,
'Using GPU in script?': '<fill in>',
'Using distributed or parallel set-up in script?': '<fill in>',
}
print('\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n' )
print(self.format_dict(__UpperCAmelCase ) )
return info
@staticmethod
def snake_case ( __UpperCAmelCase ):
'''simple docstring'''
return "\n".join([F"- {prop}: {val}" for prop, val in d.items()] ) + "\n"
| 293
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__A = {
"""configuration_data2vec_audio""": ["""DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP""", """Data2VecAudioConfig"""],
"""configuration_data2vec_text""": [
"""DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""Data2VecTextConfig""",
"""Data2VecTextOnnxConfig""",
],
"""configuration_data2vec_vision""": [
"""DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""Data2VecVisionConfig""",
"""Data2VecVisionOnnxConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"""DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Data2VecAudioForAudioFrameClassification""",
"""Data2VecAudioForCTC""",
"""Data2VecAudioForSequenceClassification""",
"""Data2VecAudioForXVector""",
"""Data2VecAudioModel""",
"""Data2VecAudioPreTrainedModel""",
]
__A = [
"""DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Data2VecTextForCausalLM""",
"""Data2VecTextForMaskedLM""",
"""Data2VecTextForMultipleChoice""",
"""Data2VecTextForQuestionAnswering""",
"""Data2VecTextForSequenceClassification""",
"""Data2VecTextForTokenClassification""",
"""Data2VecTextModel""",
"""Data2VecTextPreTrainedModel""",
]
__A = [
"""DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Data2VecVisionForImageClassification""",
"""Data2VecVisionForMaskedImageModeling""",
"""Data2VecVisionForSemanticSegmentation""",
"""Data2VecVisionModel""",
"""Data2VecVisionPreTrainedModel""",
]
if is_tf_available():
__A = [
"""TFData2VecVisionForImageClassification""",
"""TFData2VecVisionForSemanticSegmentation""",
"""TFData2VecVisionModel""",
"""TFData2VecVisionPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_dataavec_audio import DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecAudioConfig
from .configuration_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecTextConfig,
DataaVecTextOnnxConfig,
)
from .configuration_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecVisionConfig,
DataaVecVisionOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dataavec_audio import (
DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecAudioForAudioFrameClassification,
DataaVecAudioForCTC,
DataaVecAudioForSequenceClassification,
DataaVecAudioForXVector,
DataaVecAudioModel,
DataaVecAudioPreTrainedModel,
)
from .modeling_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecTextForCausalLM,
DataaVecTextForMaskedLM,
DataaVecTextForMultipleChoice,
DataaVecTextForQuestionAnswering,
DataaVecTextForSequenceClassification,
DataaVecTextForTokenClassification,
DataaVecTextModel,
DataaVecTextPreTrainedModel,
)
from .modeling_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecVisionForImageClassification,
DataaVecVisionForMaskedImageModeling,
DataaVecVisionForSemanticSegmentation,
DataaVecVisionModel,
DataaVecVisionPreTrainedModel,
)
if is_tf_available():
from .modeling_tf_dataavec_vision import (
TFDataaVecVisionForImageClassification,
TFDataaVecVisionForSemanticSegmentation,
TFDataaVecVisionModel,
TFDataaVecVisionPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 293
| 1
|
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ ( __A : List[Any] ) -> List[Any]:
_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE = [], []
while len(__A ) > 1:
_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE = min(__A ), max(__A )
start.append(__A )
end.append(__A )
collection.remove(__A )
collection.remove(__A )
end.reverse()
return start + collection + end
if __name__ == "__main__":
lowerCamelCase_ = input('Enter numbers separated by a comma:\n').strip()
lowerCamelCase_ = [int(item) for item in user_input.split(',')]
print(*merge_sort(unsorted), sep=',')
| 111
|
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ ( __A : list ) -> list:
if len(__A ) <= 1:
return lst
_SCREAMING_SNAKE_CASE = 1
while i < len(__A ):
if lst[i - 1] <= lst[i]:
i += 1
else:
_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE = lst[i], lst[i - 1]
i -= 1
if i == 0:
_SCREAMING_SNAKE_CASE = 1
return lst
if __name__ == "__main__":
lowerCamelCase_ = input('Enter numbers separated by a comma:\n').strip()
lowerCamelCase_ = [int(item) for item in user_input.split(',')]
print(gnome_sort(unsorted))
| 111
| 1
|
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
"google/pix2struct-textcaps-base": (
"https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json"
),
}
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
UpperCAmelCase_ ='''pix2struct_text_model'''
UpperCAmelCase_ =['''past_key_values''']
UpperCAmelCase_ ={
'''hidden_size''': '''hidden_size''',
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self , _A=50244 , _A=768 , _A=64 , _A=2048 , _A=12 , _A=12 , _A=32 , _A=128 , _A=0.1 , _A=1E-6 , _A=1.0 , _A="gelu_new" , _A=0 , _A=False , _A=0 , _A=1 , _A=False , _A=True , **_A , ) -> Tuple:
SCREAMING_SNAKE_CASE_ = vocab_size
SCREAMING_SNAKE_CASE_ = hidden_size
SCREAMING_SNAKE_CASE_ = d_kv
SCREAMING_SNAKE_CASE_ = d_ff
SCREAMING_SNAKE_CASE_ = num_layers
SCREAMING_SNAKE_CASE_ = num_heads
SCREAMING_SNAKE_CASE_ = relative_attention_num_buckets
SCREAMING_SNAKE_CASE_ = relative_attention_max_distance
SCREAMING_SNAKE_CASE_ = dropout_rate
SCREAMING_SNAKE_CASE_ = layer_norm_epsilon
SCREAMING_SNAKE_CASE_ = initializer_factor
SCREAMING_SNAKE_CASE_ = use_cache
SCREAMING_SNAKE_CASE_ = eos_token_id
SCREAMING_SNAKE_CASE_ = decoder_start_token_id
# for backwards compatibility
SCREAMING_SNAKE_CASE_ = dense_act_fn
super().__init__(
pad_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , decoder_start_token_id=__UpperCAmelCase , tie_word_embeddings=__UpperCAmelCase , is_decoder=__UpperCAmelCase , **__UpperCAmelCase , )
@classmethod
def _UpperCamelCase ( cls , _A , **_A ) -> "PretrainedConfig":
cls._set_token_in_kwargs(__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = cls.get_config_dict(__UpperCAmelCase , **__UpperCAmelCase )
# get the text config dict if we are loading from Pix2StructConfig
if config_dict.get('''model_type''' ) == "pix2struct":
SCREAMING_SNAKE_CASE_ = config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__UpperCAmelCase , **__UpperCAmelCase )
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
UpperCAmelCase_ ='''pix2struct_vision_model'''
def __init__( self , _A=768 , _A=768 , _A=2048 , _A=64 , _A=12 , _A=12 , _A="gelu_new" , _A=1E-6 , _A=0.0 , _A=0.0 , _A=1E-10 , _A=1.0 , _A=4096 , _A=32 , _A=128 , **_A , ) -> Union[str, Any]:
super().__init__(**__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = hidden_size
SCREAMING_SNAKE_CASE_ = patch_embed_hidden_size
SCREAMING_SNAKE_CASE_ = d_ff
SCREAMING_SNAKE_CASE_ = dropout_rate
SCREAMING_SNAKE_CASE_ = num_hidden_layers
SCREAMING_SNAKE_CASE_ = num_attention_heads
SCREAMING_SNAKE_CASE_ = initializer_range
SCREAMING_SNAKE_CASE_ = initializer_factor
SCREAMING_SNAKE_CASE_ = attention_dropout
SCREAMING_SNAKE_CASE_ = layer_norm_eps
SCREAMING_SNAKE_CASE_ = dense_act_fn
SCREAMING_SNAKE_CASE_ = seq_len
SCREAMING_SNAKE_CASE_ = relative_attention_num_buckets
SCREAMING_SNAKE_CASE_ = relative_attention_max_distance
SCREAMING_SNAKE_CASE_ = d_kv
@classmethod
def _UpperCamelCase ( cls , _A , **_A ) -> "PretrainedConfig":
cls._set_token_in_kwargs(__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = cls.get_config_dict(__UpperCAmelCase , **__UpperCAmelCase )
# get the vision config dict if we are loading from Pix2StructConfig
if config_dict.get('''model_type''' ) == "pix2struct":
SCREAMING_SNAKE_CASE_ = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__UpperCAmelCase , **__UpperCAmelCase )
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
UpperCAmelCase_ ='''pix2struct'''
UpperCAmelCase_ =True
def __init__( self , _A=None , _A=None , _A=1.0 , _A=0.02 , _A=False , _A=False , _A=True , **_A , ) -> Tuple:
super().__init__(tie_word_embeddings=__UpperCAmelCase , is_encoder_decoder=__UpperCAmelCase , **__UpperCAmelCase )
if text_config is None:
SCREAMING_SNAKE_CASE_ = {}
logger.info('''text_config is None. Initializing the Pix2StructTextConfig with default values.''' )
if vision_config is None:
SCREAMING_SNAKE_CASE_ = {}
logger.info('''vision_config is None. Initializing the Pix2StructVisionConfig with default values.''' )
SCREAMING_SNAKE_CASE_ = PixaStructTextConfig(**__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = PixaStructVisionConfig(**__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = self.text_config.decoder_start_token_id
SCREAMING_SNAKE_CASE_ = self.text_config.pad_token_id
SCREAMING_SNAKE_CASE_ = self.text_config.eos_token_id
SCREAMING_SNAKE_CASE_ = initializer_factor
SCREAMING_SNAKE_CASE_ = initializer_range
SCREAMING_SNAKE_CASE_ = self.initializer_range
SCREAMING_SNAKE_CASE_ = self.initializer_range
SCREAMING_SNAKE_CASE_ = is_vqa
@classmethod
def _UpperCamelCase ( cls , _A , _A , **_A ) -> Optional[int]:
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **__UpperCAmelCase )
def _UpperCamelCase ( self ) -> str:
SCREAMING_SNAKE_CASE_ = copy.deepcopy(self.__dict__ )
SCREAMING_SNAKE_CASE_ = self.text_config.to_dict()
SCREAMING_SNAKE_CASE_ = self.vision_config.to_dict()
SCREAMING_SNAKE_CASE_ = self.__class__.model_type
return output
| 299
|
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ : Tuple = abs(UpperCamelCase )
lowerCAmelCase__ : List[Any] = 0
while n > 0:
res += n % 10
n //= 10
return res
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = abs(UpperCamelCase )
return n if n < 10 else n % 10 + sum_of_digits(n // 10 )
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
return sum(int(UpperCamelCase ) for c in str(abs(UpperCamelCase ) ) )
def _SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(UpperCamelCase , UpperCamelCase ) -> None:
lowerCAmelCase__ : str = f"""{func.__name__}({value})"""
lowerCAmelCase__ : str = timeit(f"""__main__.{call}""" , setup="""import __main__""" )
print(f"""{call:56} = {func(UpperCamelCase )} -- {timing:.4f} seconds""" )
for value in (262144, 1125899906842624, 1267650600228229401496703205376):
for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact):
benchmark_a_function(UpperCamelCase , UpperCamelCase )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 37
| 0
|
import numpy as np
import datasets
lowercase_ = """
Compute the Mahalanobis Distance
Mahalonobis distance is the distance between a point and a distribution.
And not between two distinct points. It is effectively a multivariate equivalent of the Euclidean distance.
It was introduced by Prof. P. C. Mahalanobis in 1936
and has been used in various statistical applications ever since
[source: https://www.machinelearningplus.com/statistics/mahalanobis-distance/]
"""
lowercase_ = """\
@article{de2000mahalanobis,
title={The mahalanobis distance},
author={De Maesschalck, Roy and Jouan-Rimbaud, Delphine and Massart, D{\'e}sir{\'e} L},
journal={Chemometrics and intelligent laboratory systems},
volume={50},
number={1},
pages={1--18},
year={2000},
publisher={Elsevier}
}
"""
lowercase_ = """
Args:
X: List of datapoints to be compared with the `reference_distribution`.
reference_distribution: List of datapoints from the reference distribution we want to compare to.
Returns:
mahalanobis: The Mahalonobis distance for each datapoint in `X`.
Examples:
>>> mahalanobis_metric = datasets.load_metric(\"mahalanobis\")
>>> results = mahalanobis_metric.compute(reference_distribution=[[0, 1], [1, 0]], X=[[0, 1]])
>>> print(results)
{'mahalanobis': array([0.5])}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE (datasets.Metric ):
def SCREAMING_SNAKE_CASE_ ( self : List[Any] )-> int:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'X': datasets.Sequence(datasets.Value('float' , id='sequence' ) , id='X' ),
} ) , )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , a : Tuple , a : Optional[Any] )-> List[str]:
"""simple docstring"""
lowercase__ = np.array(a )
lowercase__ = np.array(a )
# Assert that arrays are 2D
if len(X.shape ) != 2:
raise ValueError('Expected `X` to be a 2D vector' )
if len(reference_distribution.shape ) != 2:
raise ValueError('Expected `reference_distribution` to be a 2D vector' )
if reference_distribution.shape[0] < 2:
raise ValueError(
'Expected `reference_distribution` to be a 2D vector with more than one element in the first dimension' )
# Get mahalanobis distance for each prediction
lowercase__ = X - np.mean(a )
lowercase__ = np.cov(reference_distribution.T )
try:
lowercase__ = np.linalg.inv(a )
except np.linalg.LinAlgError:
lowercase__ = np.linalg.pinv(a )
lowercase__ = np.dot(a , a )
lowercase__ = np.dot(a , X_minus_mu.T ).diagonal()
return {"mahalanobis": mahal_dist}
| 269
|
import sys
lowercase_ = (
"""73167176531330624919225119674426574742355349194934"""
"""96983520312774506326239578318016984801869478851843"""
"""85861560789112949495459501737958331952853208805511"""
"""12540698747158523863050715693290963295227443043557"""
"""66896648950445244523161731856403098711121722383113"""
"""62229893423380308135336276614282806444486645238749"""
"""30358907296290491560440772390713810515859307960866"""
"""70172427121883998797908792274921901699720888093776"""
"""65727333001053367881220235421809751254540594752243"""
"""52584907711670556013604839586446706324415722155397"""
"""53697817977846174064955149290862569321978468622482"""
"""83972241375657056057490261407972968652414535100474"""
"""82166370484403199890008895243450658541227588666881"""
"""16427171479924442928230863465674813919123162824586"""
"""17866458359124566529476545682848912883142607690042"""
"""24219022671055626321111109370544217506941658960408"""
"""07198403850962455444362981230987879927244284909188"""
"""84580156166097919133875499200524063689912560717606"""
"""05886116467109405077541002256983155200055935729725"""
"""71636269561882670428252483600823257530420752963450"""
)
def __UpperCamelCase (_SCREAMING_SNAKE_CASE = N ) -> int:
lowercase__ = -sys.maxsize - 1
for i in range(len(_SCREAMING_SNAKE_CASE ) - 12 ):
lowercase__ = 1
for j in range(13 ):
product *= int(n[i + j] )
if product > largest_product:
lowercase__ = product
return largest_product
if __name__ == "__main__":
print(f'''{solution() = }''')
| 269
| 1
|
"""simple docstring"""
import torch
from diffusers import CMStochasticIterativeScheduler
from .test_schedulers import SchedulerCommonTest
class __snake_case ( _lowercase):
snake_case__ : Optional[Any] = (CMStochasticIterativeScheduler,)
snake_case__ : Tuple = 1_0
def SCREAMING_SNAKE_CASE ( self : str , **__lowerCAmelCase : List[str] ):
"""simple docstring"""
_lowerCamelCase : List[str] = {
'''num_train_timesteps''': 2_0_1,
'''sigma_min''': 0.0_02,
'''sigma_max''': 80.0,
}
config.update(**__lowerCAmelCase )
return config
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = 1_0
_lowerCamelCase : List[str] = self.get_scheduler_config()
_lowerCamelCase : Union[str, Any] = self.scheduler_classes[0](**__lowerCAmelCase )
scheduler.set_timesteps(__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = scheduler.timesteps[0]
_lowerCamelCase : str = scheduler.timesteps[1]
_lowerCamelCase : List[Any] = self.dummy_sample
_lowerCamelCase : str = 0.1 * sample
_lowerCamelCase : str = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ).prev_sample
_lowerCamelCase : Tuple = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
for timesteps in [1_0, 5_0, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
for clip_denoised in [True, False]:
self.check_over_configs(clip_denoised=__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
_lowerCamelCase : List[Any] = self.scheduler_classes[0]
_lowerCamelCase : Optional[Any] = self.get_scheduler_config()
_lowerCamelCase : int = scheduler_class(**__lowerCAmelCase )
_lowerCamelCase : str = 1
scheduler.set_timesteps(__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = scheduler.timesteps
_lowerCamelCase : Tuple = torch.manual_seed(0 )
_lowerCamelCase : int = self.dummy_model()
_lowerCamelCase : Dict = self.dummy_sample_deter * scheduler.init_noise_sigma
for i, t in enumerate(__lowerCAmelCase ):
# 1. scale model input
_lowerCamelCase : Dict = scheduler.scale_model_input(__lowerCAmelCase , __lowerCAmelCase )
# 2. predict noise residual
_lowerCamelCase : Optional[int] = model(__lowerCAmelCase , __lowerCAmelCase )
# 3. predict previous sample x_t-1
_lowerCamelCase : Any = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , generator=__lowerCAmelCase ).prev_sample
_lowerCamelCase : List[str] = pred_prev_sample
_lowerCamelCase : int = torch.sum(torch.abs(__lowerCAmelCase ) )
_lowerCamelCase : str = torch.mean(torch.abs(__lowerCAmelCase ) )
assert abs(result_sum.item() - 1_92.76_14 ) < 1E-2
assert abs(result_mean.item() - 0.25_10 ) < 1E-3
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = self.scheduler_classes[0]
_lowerCamelCase : List[str] = self.get_scheduler_config()
_lowerCamelCase : str = scheduler_class(**__lowerCAmelCase )
_lowerCamelCase : int = [1_0_6, 0]
scheduler.set_timesteps(timesteps=__lowerCAmelCase )
_lowerCamelCase : str = scheduler.timesteps
_lowerCamelCase : str = torch.manual_seed(0 )
_lowerCamelCase : Optional[int] = self.dummy_model()
_lowerCamelCase : List[str] = self.dummy_sample_deter * scheduler.init_noise_sigma
for t in timesteps:
# 1. scale model input
_lowerCamelCase : Optional[Any] = scheduler.scale_model_input(__lowerCAmelCase , __lowerCAmelCase )
# 2. predict noise residual
_lowerCamelCase : Optional[Any] = model(__lowerCAmelCase , __lowerCAmelCase )
# 3. predict previous sample x_t-1
_lowerCamelCase : Union[str, Any] = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , generator=__lowerCAmelCase ).prev_sample
_lowerCamelCase : List[str] = pred_prev_sample
_lowerCamelCase : Optional[int] = torch.sum(torch.abs(__lowerCAmelCase ) )
_lowerCamelCase : Tuple = torch.mean(torch.abs(__lowerCAmelCase ) )
assert abs(result_sum.item() - 3_47.63_57 ) < 1E-2
assert abs(result_mean.item() - 0.45_27 ) < 1E-3
def SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = self.scheduler_classes[0]
_lowerCamelCase : str = self.get_scheduler_config()
_lowerCamelCase : List[Any] = scheduler_class(**__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = [3_9, 3_0, 1_2, 1_5, 0]
with self.assertRaises(__lowerCAmelCase , msg='''`timesteps` must be in descending order.''' ):
scheduler.set_timesteps(timesteps=__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
_lowerCamelCase : int = self.scheduler_classes[0]
_lowerCamelCase : Any = self.get_scheduler_config()
_lowerCamelCase : List[Any] = scheduler_class(**__lowerCAmelCase )
_lowerCamelCase : List[Any] = [3_9, 3_0, 1_2, 1, 0]
_lowerCamelCase : Optional[Any] = len(__lowerCAmelCase )
with self.assertRaises(__lowerCAmelCase , msg='''Can only pass one of `num_inference_steps` or `timesteps`.''' ):
scheduler.set_timesteps(num_inference_steps=__lowerCAmelCase , timesteps=__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : List[Any] = self.scheduler_classes[0]
_lowerCamelCase : Dict = self.get_scheduler_config()
_lowerCamelCase : Dict = scheduler_class(**__lowerCAmelCase )
_lowerCamelCase : Dict = [scheduler.config.num_train_timesteps]
with self.assertRaises(
__lowerCAmelCase , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ):
scheduler.set_timesteps(timesteps=__lowerCAmelCase )
| 72
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_squeezebert import SqueezeBertTokenizer
lowercase = logging.get_logger(__name__)
lowercase = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
lowercase = {
"vocab_file": {
"squeezebert/squeezebert-uncased": (
"https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt"
),
"squeezebert/squeezebert-mnli": "https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt",
"squeezebert/squeezebert-mnli-headless": (
"https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"squeezebert/squeezebert-uncased": (
"https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json"
),
"squeezebert/squeezebert-mnli": (
"https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json"
),
"squeezebert/squeezebert-mnli-headless": (
"https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json"
),
},
}
lowercase = {
"squeezebert/squeezebert-uncased": 512,
"squeezebert/squeezebert-mnli": 512,
"squeezebert/squeezebert-mnli-headless": 512,
}
lowercase = {
"squeezebert/squeezebert-uncased": {"do_lower_case": True},
"squeezebert/squeezebert-mnli": {"do_lower_case": True},
"squeezebert/squeezebert-mnli-headless": {"do_lower_case": True},
}
class UpperCamelCase_ ( snake_case_ ):
'''simple docstring'''
lowerCAmelCase = VOCAB_FILES_NAMES
lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase = PRETRAINED_INIT_CONFIGURATION
lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase = SqueezeBertTokenizer
def __init__( self , a=None , a=None , a=True , a="[UNK]" , a="[SEP]" , a="[PAD]" , a="[CLS]" , a="[MASK]" , a=True , a=None , **a , ) -> Tuple:
super().__init__(
a , tokenizer_file=a , do_lower_case=a , unk_token=a , sep_token=a , pad_token=a , cls_token=a , mask_token=a , tokenize_chinese_chars=a , strip_accents=a , **a , )
snake_case_ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , a ) != do_lower_case
or normalizer_state.get('strip_accents' , a ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , a ) != tokenize_chinese_chars
):
snake_case_ = getattr(a , normalizer_state.pop('type' ) )
snake_case_ = do_lower_case
snake_case_ = strip_accents
snake_case_ = tokenize_chinese_chars
snake_case_ = normalizer_class(**a )
snake_case_ = do_lower_case
def _UpperCamelCase ( self , a , a=None ) -> Tuple:
snake_case_ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _UpperCamelCase ( self , a , a = None ) -> List[int]:
snake_case_ = [self.sep_token_id]
snake_case_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _UpperCamelCase ( self , a , a = None ) -> Tuple[str]:
snake_case_ = self._tokenizer.model.save(a , name=a )
return tuple(a )
| 178
| 0
|
"""simple docstring"""
def __lowerCamelCase ( __UpperCamelCase = 4000000 ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase_ : Dict = []
lowerCAmelCase_ : List[str] = 0, 1
while b <= n:
if b % 2 == 0:
even_fibs.append(_UpperCAmelCase )
lowerCAmelCase_ : Optional[Any] = b, a + b
return sum(_UpperCAmelCase )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 365
|
"""simple docstring"""
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"""EleutherAI/gpt-neo-1.3B""": """https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json""",
# See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo
}
class __lowerCamelCase ( A__ ):
'''simple docstring'''
a_ : Union[str, Any] = """gpt_neo"""
a_ : List[Any] = ["""past_key_values"""]
a_ : Optional[Any] = {"""num_attention_heads""": """num_heads""", """num_hidden_layers""": """num_layers"""}
def __init__( self : Optional[int] , a_ : List[str]=5_02_57 , a_ : List[str]=20_48 , a_ : Union[str, Any]=20_48 , a_ : Union[str, Any]=24 , a_ : Optional[int]=[[["global", "local"], 12]] , a_ : str=16 , a_ : Optional[Any]=None , a_ : str=2_56 , a_ : Union[str, Any]="gelu_new" , a_ : Optional[int]=0.0 , a_ : Optional[Any]=0.0 , a_ : List[Any]=0.0 , a_ : List[Any]=0.1 , a_ : Optional[Any]=1e-5 , a_ : Optional[Any]=0.02 , a_ : int=True , a_ : Optional[Any]=5_02_56 , a_ : Tuple=5_02_56 , **a_ : str , ):
lowerCAmelCase_ : Optional[Any] = vocab_size
lowerCAmelCase_ : str = max_position_embeddings
lowerCAmelCase_ : Tuple = hidden_size
lowerCAmelCase_ : Union[str, Any] = num_layers
lowerCAmelCase_ : str = num_heads
lowerCAmelCase_ : List[str] = intermediate_size
lowerCAmelCase_ : Union[str, Any] = window_size
lowerCAmelCase_ : Any = activation_function
lowerCAmelCase_ : str = resid_dropout
lowerCAmelCase_ : Union[str, Any] = embed_dropout
lowerCAmelCase_ : Optional[Any] = attention_dropout
lowerCAmelCase_ : Dict = classifier_dropout
lowerCAmelCase_ : int = layer_norm_epsilon
lowerCAmelCase_ : Dict = initializer_range
lowerCAmelCase_ : List[Any] = use_cache
lowerCAmelCase_ : Optional[int] = bos_token_id
lowerCAmelCase_ : str = eos_token_id
lowerCAmelCase_ : Optional[Any] = attention_types
lowerCAmelCase_ : Optional[Any] = self.expand_attention_types_params(a_ )
if len(self.attention_layers ) != self.num_layers:
raise ValueError(
"Configuration for convolutional module is incorrect. "
"It is required that `len(config.attention_layers)` == `config.num_layers` "
f'''but is `len(config.attention_layers) = {len(self.attention_layers )}`, '''
f'''`config.num_layers = {self.num_layers}`. '''
"`config.attention_layers` is prepared using `config.attention_types`. "
"Please verify the value of `config.attention_types` argument." )
super().__init__(bos_token_id=a_ , eos_token_id=a_ , **a_ )
@staticmethod
def lowerCamelCase ( a_ : Optional[Any] ):
lowerCAmelCase_ : int = []
for item in attention_types:
for _ in range(item[1] ):
attentions.extend(item[0] )
return attentions
def __lowerCamelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> int:
"""simple docstring"""
import torch
lowerCAmelCase_ : str = input.size()
lowerCAmelCase_ : List[Any] = len(__UpperCamelCase )
lowerCAmelCase_ : Tuple = shape[dimension]
lowerCAmelCase_ : Tuple = torch.arange(0 , __UpperCamelCase , __UpperCamelCase )
lowerCAmelCase_ : List[Any] = torch.div(sizedim - size , __UpperCamelCase , rounding_mode="floor" ) + 1
lowerCAmelCase_ : Dict = torch.arange(__UpperCamelCase ) + low_indices[:min_length][:, None]
lowerCAmelCase_ : Tuple = [slice(__UpperCamelCase )] * rank
lowerCAmelCase_ : List[str] = indices
lowerCAmelCase_ : Dict = input[s]
lowerCAmelCase_ : Tuple = list(range(0 , rank + 1 ) )
perm.append(perm.pop(dimension + 1 ) )
return sliced.permute(__UpperCamelCase )
def __lowerCamelCase ( __UpperCamelCase , __UpperCamelCase ) -> Any:
"""simple docstring"""
import torch
lowerCAmelCase_ : Optional[int] = torch.arange(1 , __UpperCamelCase )
lowerCAmelCase_ : Tuple = torch.remainder(__UpperCamelCase , __UpperCamelCase )
lowerCAmelCase_ : Tuple = remainders == 0
lowerCAmelCase_ : List[Any] = candidates[divisor_indices]
lowerCAmelCase_ : List[str] = torch.max(__UpperCamelCase )
return largest_divisor, torch.div(__UpperCamelCase , __UpperCamelCase , rounding_mode="floor" )
class __lowerCamelCase ( A__ ):
'''simple docstring'''
@property
def lowerCamelCase ( self : List[str] ):
lowerCAmelCase_ : Any = OrderedDict({"input_ids": {0: "batch", 1: "sequence"}} )
if self.use_past:
self.fill_with_past_key_values_(a_ , direction="inputs" )
lowerCAmelCase_ : int = {0: "batch", 1: "past_sequence + sequence"}
else:
lowerCAmelCase_ : str = {0: "batch", 1: "sequence"}
return common_inputs
@property
def lowerCamelCase ( self : int ):
return self._config.num_heads
def lowerCamelCase ( self : Optional[Any] , a_ : PreTrainedTokenizer , a_ : int = -1 , a_ : int = -1 , a_ : bool = False , a_ : Optional[TensorType] = None , ):
lowerCAmelCase_ : int = super(a_ , self ).generate_dummy_inputs(
a_ , batch_size=a_ , seq_length=a_ , is_pair=a_ , framework=a_ )
# We need to order the input in the way they appears in the forward()
lowerCAmelCase_ : str = OrderedDict({"input_ids": common_inputs["input_ids"]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
lowerCAmelCase_ , lowerCAmelCase_ : Optional[Any] = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
lowerCAmelCase_ : str = seqlen + 2
lowerCAmelCase_ : Tuple = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
lowerCAmelCase_ : Optional[int] = [
(torch.zeros(a_ ), torch.zeros(a_ )) for _ in range(self.num_layers )
]
lowerCAmelCase_ : Tuple = common_inputs["attention_mask"]
if self.use_past:
lowerCAmelCase_ : List[str] = ordered_inputs["attention_mask"].dtype
lowerCAmelCase_ : Optional[Any] = torch.cat(
[ordered_inputs["attention_mask"], torch.ones(a_ , a_ , dtype=a_ )] , dim=1 )
return ordered_inputs
@property
def lowerCamelCase ( self : Union[str, Any] ):
return 13
| 161
| 0
|
SCREAMING_SNAKE_CASE :int = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
def UpperCAmelCase ( ) -> None:
"""simple docstring"""
__A = input("Enter message: " )
__A = input("Enter key [alphanumeric]: " )
__A = input("Encrypt/Decrypt [e/d]: " )
if mode.lower().startswith("e" ):
__A = "encrypt"
__A = encrypt_message(a_ , a_ )
elif mode.lower().startswith("d" ):
__A = "decrypt"
__A = decrypt_message(a_ , a_ )
print(F'''\n{mode.title()}ed message:''' )
print(a_ )
def UpperCAmelCase ( a_ , a_ ) -> str:
"""simple docstring"""
return translate_message(a_ , a_ , "encrypt" )
def UpperCAmelCase ( a_ , a_ ) -> str:
"""simple docstring"""
return translate_message(a_ , a_ , "decrypt" )
def UpperCAmelCase ( a_ , a_ , a_ ) -> str:
"""simple docstring"""
__A = []
__A = 0
__A = key.upper()
for symbol in message:
__A = LETTERS.find(symbol.upper() )
if num != -1:
if mode == "encrypt":
num += LETTERS.find(key[key_index] )
elif mode == "decrypt":
num -= LETTERS.find(key[key_index] )
num %= len(a_ )
if symbol.isupper():
translated.append(LETTERS[num] )
elif symbol.islower():
translated.append(LETTERS[num].lower() )
key_index += 1
if key_index == len(a_ ):
__A = 0
else:
translated.append(a_ )
return "".join(a_ )
if __name__ == "__main__":
main()
| 15
|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
SCREAMING_SNAKE_CASE :Optional[int] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE :Optional[int] = {'vocab_file': 'sentencepiece.bpe.model'}
SCREAMING_SNAKE_CASE :Tuple = {
'vocab_file': {
'camembert-base': 'https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model',
}
}
SCREAMING_SNAKE_CASE :List[Any] = {
'camembert-base': 512,
}
SCREAMING_SNAKE_CASE :List[str] = '▁'
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
snake_case_ = VOCAB_FILES_NAMES
snake_case_ = PRETRAINED_VOCAB_FILES_MAP
snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ = ["input_ids", "attention_mask"]
def __init__( self : Optional[Any] ,A : List[str] ,A : List[Any]="<s>" ,A : Tuple="</s>" ,A : Any="</s>" ,A : Optional[Any]="<s>" ,A : Tuple="<unk>" ,A : str="<pad>" ,A : int="<mask>" ,A : Optional[int]=["<s>NOTUSED", "</s>NOTUSED"] ,A : Optional[Dict[str, Any]] = None ,**A : Optional[Any] ,):
# Mask token behave like a normal word, i.e. include the space before it
__A = AddedToken(A ,lstrip=A ,rstrip=A ) if isinstance(A ,A ) else mask_token
__A = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=A ,eos_token=A ,unk_token=A ,sep_token=A ,cls_token=A ,pad_token=A ,mask_token=A ,additional_special_tokens=A ,sp_model_kwargs=self.sp_model_kwargs ,**A ,)
__A = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(A ) )
__A = vocab_file
# HACK: These tokens were added by fairseq but don't seem to be actually used when duplicated in the actual
# sentencepiece vocabulary (this is the case for <s> and </s>
__A = {"<s>NOTUSED": 0, "<pad>": 1, "</s>NOTUSED": 2, "<unk>": 3}
__A = len(self.fairseq_tokens_to_ids )
__A = len(self.sp_model ) + len(self.fairseq_tokens_to_ids )
__A = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def UpperCamelCase_ ( self : int ,A : List[int] ,A : Optional[List[int]] = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__A = [self.cls_token_id]
__A = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def UpperCamelCase_ ( self : Dict ,A : List[int] ,A : Optional[List[int]] = None ,A : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A ,token_ids_a=A ,already_has_special_tokens=A )
if token_ids_a is None:
return [1] + ([0] * len(A )) + [1]
return [1] + ([0] * len(A )) + [1, 1] + ([0] * len(A )) + [1]
def UpperCamelCase_ ( self : Union[str, Any] ,A : List[int] ,A : Optional[List[int]] = None ):
__A = [self.sep_token_id]
__A = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def UpperCamelCase_ ( self : Dict ):
return len(self.fairseq_tokens_to_ids ) + len(self.sp_model )
def UpperCamelCase_ ( self : int ):
__A = {self.convert_ids_to_tokens(A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def UpperCamelCase_ ( self : Any ,A : str ):
return self.sp_model.encode(A ,out_type=A )
def UpperCamelCase_ ( self : List[str] ,A : Dict ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
elif self.sp_model.PieceToId(A ) == 0:
# Convert sentence piece unk token to fairseq unk token index
return self.unk_token_id
return self.fairseq_offset + self.sp_model.PieceToId(A )
def UpperCamelCase_ ( self : Dict ,A : Tuple ):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def UpperCamelCase_ ( self : Optional[Any] ,A : Dict ):
__A = []
__A = ""
__A = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(A ) + token
__A = True
__A = []
else:
current_sub_tokens.append(A )
__A = False
out_string += self.sp_model.decode(A )
return out_string.strip()
def __getstate__( self : Dict ):
__A = self.__dict__.copy()
__A = None
return state
def __setstate__( self : Union[str, Any] ,A : Any ):
__A = d
# for backward compatibility
if not hasattr(self ,"sp_model_kwargs" ):
__A = {}
__A = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCamelCase_ ( self : Any ,A : str ,A : Optional[str] = None ):
if not os.path.isdir(A ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
__A = os.path.join(
A ,(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,A )
elif not os.path.isfile(self.vocab_file ):
with open(A ,"wb" ) as fi:
__A = self.sp_model.serialized_model_proto()
fi.write(A )
return (out_vocab_file,)
| 15
| 1
|
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Dict=2_8123 ) -> str:
UpperCAmelCase_ = [1] * (limit + 1)
for i in range(2 , int(limit**0.5 ) + 1 ):
sum_divs[i * i] += i
for k in range(i + 1 , limit // i + 1 ):
sum_divs[k * i] += k + i
UpperCAmelCase_ = set()
UpperCAmelCase_ = 0
for n in range(1 , limit + 1 ):
if sum_divs[n] > n:
abundants.add(__UpperCamelCase )
if not any((n - a in abundants) for a in abundants ):
res += n
return res
if __name__ == "__main__":
print(solution())
| 370
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DetrConfig, DetrForObjectDetection, DetrForSegmentation, DetrImageProcessor, ResNetConfig
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCamelCase = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Tuple ) -> Dict:
# initialize config
if "resnet-50" in model_name:
UpperCAmelCase_ = ResNetConfig.from_pretrained('''microsoft/resnet-50''' )
elif "resnet-101" in model_name:
UpperCAmelCase_ = ResNetConfig.from_pretrained('''microsoft/resnet-101''' )
else:
raise ValueError('''Model name should include either resnet50 or resnet101''' )
UpperCAmelCase_ = DetrConfig(use_timm_backbone=__UpperCamelCase , backbone_config=__UpperCamelCase )
# set label attributes
UpperCAmelCase_ = '''panoptic''' in model_name
if is_panoptic:
UpperCAmelCase_ = 250
else:
UpperCAmelCase_ = 91
UpperCAmelCase_ = '''huggingface/label-files'''
UpperCAmelCase_ = '''coco-detection-id2label.json'''
UpperCAmelCase_ = json.load(open(hf_hub_download(__UpperCamelCase , __UpperCamelCase , repo_type='''dataset''' ) , '''r''' ) )
UpperCAmelCase_ = {int(__UpperCamelCase ): v for k, v in idalabel.items()}
UpperCAmelCase_ = idalabel
UpperCAmelCase_ = {v: k for k, v in idalabel.items()}
return config, is_panoptic
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[str] ) -> Union[str, Any]:
# here we list all keys to be renamed (original name on the left, our name on the right)
UpperCAmelCase_ = []
# stem
# fmt: off
rename_keys.append(('''backbone.0.body.conv1.weight''', '''backbone.conv_encoder.model.embedder.embedder.convolution.weight''') )
rename_keys.append(('''backbone.0.body.bn1.weight''', '''backbone.conv_encoder.model.embedder.embedder.normalization.weight''') )
rename_keys.append(('''backbone.0.body.bn1.bias''', '''backbone.conv_encoder.model.embedder.embedder.normalization.bias''') )
rename_keys.append(('''backbone.0.body.bn1.running_mean''', '''backbone.conv_encoder.model.embedder.embedder.normalization.running_mean''') )
rename_keys.append(('''backbone.0.body.bn1.running_var''', '''backbone.conv_encoder.model.embedder.embedder.normalization.running_var''') )
# stages
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
# shortcut
if layer_idx == 0:
rename_keys.append(
(
f'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.0.weight',
f'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.convolution.weight',
) )
rename_keys.append(
(
f'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.weight',
f'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.weight',
) )
rename_keys.append(
(
f'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.bias',
f'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.bias',
) )
rename_keys.append(
(
f'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_mean',
f'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_mean',
) )
rename_keys.append(
(
f'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_var',
f'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_var',
) )
# 3 convs
for i in range(3 ):
rename_keys.append(
(
f'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.conv{i+1}.weight',
f'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.convolution.weight',
) )
rename_keys.append(
(
f'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.weight',
f'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.weight',
) )
rename_keys.append(
(
f'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.bias',
f'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.bias',
) )
rename_keys.append(
(
f'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_mean',
f'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_mean',
) )
rename_keys.append(
(
f'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_var',
f'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_var',
) )
# fmt: on
for i in range(config.encoder_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(
f'transformer.encoder.layers.{i}.self_attn.out_proj.weight',
f'encoder.layers.{i}.self_attn.out_proj.weight',
) )
rename_keys.append(
(f'transformer.encoder.layers.{i}.self_attn.out_proj.bias', f'encoder.layers.{i}.self_attn.out_proj.bias') )
rename_keys.append((f'transformer.encoder.layers.{i}.linear1.weight', f'encoder.layers.{i}.fc1.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.linear1.bias', f'encoder.layers.{i}.fc1.bias') )
rename_keys.append((f'transformer.encoder.layers.{i}.linear2.weight', f'encoder.layers.{i}.fc2.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.linear2.bias', f'encoder.layers.{i}.fc2.bias') )
rename_keys.append(
(f'transformer.encoder.layers.{i}.norm1.weight', f'encoder.layers.{i}.self_attn_layer_norm.weight') )
rename_keys.append(
(f'transformer.encoder.layers.{i}.norm1.bias', f'encoder.layers.{i}.self_attn_layer_norm.bias') )
rename_keys.append(
(f'transformer.encoder.layers.{i}.norm2.weight', f'encoder.layers.{i}.final_layer_norm.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.norm2.bias', f'encoder.layers.{i}.final_layer_norm.bias') )
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(
f'transformer.decoder.layers.{i}.self_attn.out_proj.weight',
f'decoder.layers.{i}.self_attn.out_proj.weight',
) )
rename_keys.append(
(f'transformer.decoder.layers.{i}.self_attn.out_proj.bias', f'decoder.layers.{i}.self_attn.out_proj.bias') )
rename_keys.append(
(
f'transformer.decoder.layers.{i}.multihead_attn.out_proj.weight',
f'decoder.layers.{i}.encoder_attn.out_proj.weight',
) )
rename_keys.append(
(
f'transformer.decoder.layers.{i}.multihead_attn.out_proj.bias',
f'decoder.layers.{i}.encoder_attn.out_proj.bias',
) )
rename_keys.append((f'transformer.decoder.layers.{i}.linear1.weight', f'decoder.layers.{i}.fc1.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.linear1.bias', f'decoder.layers.{i}.fc1.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.linear2.weight', f'decoder.layers.{i}.fc2.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.linear2.bias', f'decoder.layers.{i}.fc2.bias') )
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm1.weight', f'decoder.layers.{i}.self_attn_layer_norm.weight') )
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm1.bias', f'decoder.layers.{i}.self_attn_layer_norm.bias') )
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm2.weight', f'decoder.layers.{i}.encoder_attn_layer_norm.weight') )
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm2.bias', f'decoder.layers.{i}.encoder_attn_layer_norm.bias') )
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm3.weight', f'decoder.layers.{i}.final_layer_norm.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.norm3.bias', f'decoder.layers.{i}.final_layer_norm.bias') )
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
('''input_proj.weight''', '''input_projection.weight'''),
('''input_proj.bias''', '''input_projection.bias'''),
('''query_embed.weight''', '''query_position_embeddings.weight'''),
('''transformer.decoder.norm.weight''', '''decoder.layernorm.weight'''),
('''transformer.decoder.norm.bias''', '''decoder.layernorm.bias'''),
('''class_embed.weight''', '''class_labels_classifier.weight'''),
('''class_embed.bias''', '''class_labels_classifier.bias'''),
('''bbox_embed.layers.0.weight''', '''bbox_predictor.layers.0.weight'''),
('''bbox_embed.layers.0.bias''', '''bbox_predictor.layers.0.bias'''),
('''bbox_embed.layers.1.weight''', '''bbox_predictor.layers.1.weight'''),
('''bbox_embed.layers.1.bias''', '''bbox_predictor.layers.1.bias'''),
('''bbox_embed.layers.2.weight''', '''bbox_predictor.layers.2.weight'''),
('''bbox_embed.layers.2.bias''', '''bbox_predictor.layers.2.bias'''),
] )
return rename_keys
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Optional[Any] , __UpperCamelCase : int , __UpperCamelCase : Optional[int] ) -> Union[str, Any]:
UpperCAmelCase_ = state_dict.pop(__UpperCamelCase )
UpperCAmelCase_ = val
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : str , __UpperCamelCase : List[Any]=False ) -> Dict:
UpperCAmelCase_ = ''''''
if is_panoptic:
UpperCAmelCase_ = '''detr.'''
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
UpperCAmelCase_ = state_dict.pop(f'{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight' )
UpperCAmelCase_ = state_dict.pop(f'{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase_ = in_proj_weight[:256, :]
UpperCAmelCase_ = in_proj_bias[:256]
UpperCAmelCase_ = in_proj_weight[256:512, :]
UpperCAmelCase_ = in_proj_bias[256:512]
UpperCAmelCase_ = in_proj_weight[-256:, :]
UpperCAmelCase_ = in_proj_bias[-256:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
UpperCAmelCase_ = state_dict.pop(f'{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight' )
UpperCAmelCase_ = state_dict.pop(f'{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase_ = in_proj_weight[:256, :]
UpperCAmelCase_ = in_proj_bias[:256]
UpperCAmelCase_ = in_proj_weight[256:512, :]
UpperCAmelCase_ = in_proj_bias[256:512]
UpperCAmelCase_ = in_proj_weight[-256:, :]
UpperCAmelCase_ = in_proj_bias[-256:]
# read in weights + bias of input projection layer of cross-attention
UpperCAmelCase_ = state_dict.pop(
f'{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight' )
UpperCAmelCase_ = state_dict.pop(f'{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias' )
# next, add query, keys and values (in that order) of cross-attention to the state dict
UpperCAmelCase_ = in_proj_weight_cross_attn[:256, :]
UpperCAmelCase_ = in_proj_bias_cross_attn[:256]
UpperCAmelCase_ = in_proj_weight_cross_attn[256:512, :]
UpperCAmelCase_ = in_proj_bias_cross_attn[256:512]
UpperCAmelCase_ = in_proj_weight_cross_attn[-256:, :]
UpperCAmelCase_ = in_proj_bias_cross_attn[-256:]
def SCREAMING_SNAKE_CASE ( ) -> int:
UpperCAmelCase_ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
UpperCAmelCase_ = Image.open(requests.get(__UpperCamelCase , stream=__UpperCamelCase ).raw )
return im
@torch.no_grad()
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Tuple , __UpperCamelCase : Any=None , __UpperCamelCase : Optional[Any]=False ) -> Optional[Any]:
UpperCAmelCase_ , UpperCAmelCase_ = get_detr_config(__UpperCamelCase )
# load original model from torch hub
UpperCAmelCase_ = {
'''detr-resnet-50''': '''detr_resnet50''',
'''detr-resnet-101''': '''detr_resnet101''',
}
logger.info(f'Converting model {model_name}...' )
UpperCAmelCase_ = torch.hub.load('''facebookresearch/detr''' , model_name_to_original_name[model_name] , pretrained=__UpperCamelCase ).eval()
UpperCAmelCase_ = detr.state_dict()
# rename keys
for src, dest in create_rename_keys(__UpperCamelCase ):
if is_panoptic:
UpperCAmelCase_ = '''detr.''' + src
rename_key(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# query, key and value matrices need special treatment
read_in_q_k_v(__UpperCamelCase , is_panoptic=__UpperCamelCase )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
UpperCAmelCase_ = '''detr.model.''' if is_panoptic else '''model.'''
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith('''detr''' )
and not key.startswith('''class_labels_classifier''' )
and not key.startswith('''bbox_predictor''' )
):
UpperCAmelCase_ = state_dict.pop(__UpperCamelCase )
UpperCAmelCase_ = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
UpperCAmelCase_ = state_dict.pop(__UpperCamelCase )
UpperCAmelCase_ = val
elif key.startswith('''bbox_attention''' ) or key.startswith('''mask_head''' ):
continue
else:
UpperCAmelCase_ = state_dict.pop(__UpperCamelCase )
UpperCAmelCase_ = val
else:
if not key.startswith('''class_labels_classifier''' ) and not key.startswith('''bbox_predictor''' ):
UpperCAmelCase_ = state_dict.pop(__UpperCamelCase )
UpperCAmelCase_ = val
# finally, create HuggingFace model and load state dict
UpperCAmelCase_ = DetrForSegmentation(__UpperCamelCase ) if is_panoptic else DetrForObjectDetection(__UpperCamelCase )
model.load_state_dict(__UpperCamelCase )
model.eval()
# verify our conversion on an image
UpperCAmelCase_ = '''coco_panoptic''' if is_panoptic else '''coco_detection'''
UpperCAmelCase_ = DetrImageProcessor(format=__UpperCamelCase )
UpperCAmelCase_ = processor(images=prepare_img() , return_tensors='''pt''' )
UpperCAmelCase_ = encoding['''pixel_values''']
UpperCAmelCase_ = detr(__UpperCamelCase )
UpperCAmelCase_ = model(__UpperCamelCase )
assert torch.allclose(outputs.logits , original_outputs['''pred_logits'''] , atol=1e-3 )
assert torch.allclose(outputs.pred_boxes , original_outputs['''pred_boxes'''] , atol=1e-3 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs['''pred_masks'''] , atol=1e-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(f'Saving PyTorch model and image processor to {pytorch_dump_folder_path}...' )
Path(__UpperCamelCase ).mkdir(exist_ok=__UpperCamelCase )
model.save_pretrained(__UpperCamelCase )
processor.save_pretrained(__UpperCamelCase )
if push_to_hub:
# Upload model and image processor to the hub
logger.info('''Uploading PyTorch model and image processor to the hub...''' )
model.push_to_hub(f'nielsr/{model_name}' )
processor.push_to_hub(f'nielsr/{model_name}' )
if __name__ == "__main__":
_lowerCamelCase = argparse.ArgumentParser()
parser.add_argument(
'--model_name',
default='detr-resnet-50',
type=str,
choices=['detr-resnet-50', 'detr-resnet-101'],
help='Name of the DETR model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
parser.add_argument('--push_to_hub', action='store_true', help='Whether to push the model to the hub or not.')
_lowerCamelCase = parser.parse_args()
convert_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 177
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
__lowerCAmelCase : List[str] = {
'configuration_longt5': ['LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LongT5Config', 'LongT5OnnxConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : int = [
'LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST',
'LongT5EncoderModel',
'LongT5ForConditionalGeneration',
'LongT5Model',
'LongT5PreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Tuple = [
'FlaxLongT5ForConditionalGeneration',
'FlaxLongT5Model',
'FlaxLongT5PreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_longta import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongTaConfig, LongTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longta import (
LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST,
LongTaEncoderModel,
LongTaForConditionalGeneration,
LongTaModel,
LongTaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_longta import (
FlaxLongTaForConditionalGeneration,
FlaxLongTaModel,
FlaxLongTaPreTrainedModel,
)
else:
import sys
__lowerCAmelCase : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 107
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
a : List[str] = {
'configuration_vision_encoder_decoder': ['VisionEncoderDecoderConfig', 'VisionEncoderDecoderOnnxConfig']
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : List[Any] = ['VisionEncoderDecoderModel']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Optional[Any] = ['TFVisionEncoderDecoderModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Tuple = ['FlaxVisionEncoderDecoderModel']
if TYPE_CHECKING:
from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel
else:
import sys
a : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 147
| 0
|
import itertools
import random
import unittest
import numpy as np
from transformers import ASTFeatureExtractor
from transformers.testing_utils import require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
UpperCAmelCase = random.Random()
if is_torch_available():
import torch
def __lowerCAmelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=1.0 , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None )-> Optional[int]:
"""simple docstring"""
if rng is None:
snake_case_ = global_rng
snake_case_ = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=7 , _UpperCAmelCase=4_00 , _UpperCAmelCase=20_00 , _UpperCAmelCase=1 , _UpperCAmelCase=0.0 , _UpperCAmelCase=1_60_00 , _UpperCAmelCase=True , _UpperCAmelCase=True , ):
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = min_seq_length
snake_case_ = max_seq_length
snake_case_ = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
snake_case_ = feature_size
snake_case_ = padding_value
snake_case_ = sampling_rate
snake_case_ = return_attention_mask
snake_case_ = do_normalize
def UpperCamelCase__ ( self ):
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def UpperCamelCase__ ( self , _UpperCAmelCase=False , _UpperCAmelCase=False ):
def _flatten(_UpperCAmelCase ):
return list(itertools.chain(*_UpperCAmelCase ) )
if equal_length:
snake_case_ = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
snake_case_ = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
snake_case_ = [np.asarray(_UpperCAmelCase ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class lowerCAmelCase_ ( lowerCamelCase__ , unittest.TestCase ):
'''simple docstring'''
__snake_case = ASTFeatureExtractor
def UpperCamelCase__ ( self ):
snake_case_ = ASTFeatureExtractionTester(self )
def UpperCamelCase__ ( self ):
# Tests that all call wrap to encode_plus and batch_encode_plus
snake_case_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
snake_case_ = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
snake_case_ = [np.asarray(_UpperCAmelCase ) for speech_input in speech_inputs]
# Test not batched input
snake_case_ = feat_extract(speech_inputs[0] , return_tensors='''np''' ).input_values
snake_case_ = feat_extract(np_speech_inputs[0] , return_tensors='''np''' ).input_values
self.assertTrue(np.allclose(_UpperCAmelCase , _UpperCAmelCase , atol=1E-3 ) )
# Test batched
snake_case_ = feat_extract(_UpperCAmelCase , padding=_UpperCAmelCase , return_tensors='''np''' ).input_values
snake_case_ = feat_extract(_UpperCAmelCase , padding=_UpperCAmelCase , return_tensors='''np''' ).input_values
for enc_seq_a, enc_seq_a in zip(_UpperCAmelCase , _UpperCAmelCase ):
self.assertTrue(np.allclose(_UpperCAmelCase , _UpperCAmelCase , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
snake_case_ = [floats_list((1, x) )[0] for x in (8_00, 8_00, 8_00)]
snake_case_ = np.asarray(_UpperCAmelCase )
snake_case_ = feat_extract(_UpperCAmelCase , return_tensors='''np''' ).input_values
snake_case_ = feat_extract(_UpperCAmelCase , return_tensors='''np''' ).input_values
for enc_seq_a, enc_seq_a in zip(_UpperCAmelCase , _UpperCAmelCase ):
self.assertTrue(np.allclose(_UpperCAmelCase , _UpperCAmelCase , atol=1E-3 ) )
@require_torch
def UpperCamelCase__ ( self ):
import torch
snake_case_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
snake_case_ = np.random.rand(1_00 ).astype(np.floataa )
snake_case_ = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
snake_case_ = feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''np''' )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
snake_case_ = feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''pt''' )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def UpperCamelCase__ ( self , _UpperCAmelCase ):
from datasets import load_dataset
snake_case_ = load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' )
# automatic decoding with librispeech
snake_case_ = ds.sort('''id''' ).select(range(_UpperCAmelCase ) )[:num_samples]['''audio''']
return [x["array"] for x in speech_samples]
@require_torch
def UpperCamelCase__ ( self ):
# fmt: off
snake_case_ = torch.tensor(
[-0.9_894, -1.2_776, -0.9_066, -1.2_776, -0.9_349, -1.2_609, -1.0_386, -1.2_776,
-1.1_561, -1.2_776, -1.2_052, -1.2_723, -1.2_190, -1.2_132, -1.2_776, -1.1_133,
-1.1_953, -1.1_343, -1.1_584, -1.2_203, -1.1_770, -1.2_474, -1.2_381, -1.1_936,
-0.9_270, -0.8_317, -0.8_049, -0.7_706, -0.7_565, -0.7_869] )
# fmt: on
snake_case_ = self._load_datasamples(1 )
snake_case_ = ASTFeatureExtractor()
snake_case_ = feature_extractor(_UpperCAmelCase , return_tensors='''pt''' ).input_values
self.assertEquals(input_values.shape , (1, 10_24, 1_28) )
self.assertTrue(torch.allclose(input_values[0, 0, :30] , _UpperCAmelCase , atol=1E-4 ) )
| 267
|
import argparse
import json
from typing import List
from ltp import LTP
from transformers.models.bert.tokenization_bert import BertTokenizer
def __lowerCAmelCase (SCREAMING_SNAKE_CASE )-> Tuple:
"""simple docstring"""
if (
(cp >= 0X4E00 and cp <= 0X9FFF)
or (cp >= 0X3400 and cp <= 0X4DBF) #
or (cp >= 0X2_0000 and cp <= 0X2_A6DF) #
or (cp >= 0X2_A700 and cp <= 0X2_B73F) #
or (cp >= 0X2_B740 and cp <= 0X2_B81F) #
or (cp >= 0X2_B820 and cp <= 0X2_CEAF) #
or (cp >= 0XF900 and cp <= 0XFAFF)
or (cp >= 0X2_F800 and cp <= 0X2_FA1F) #
): #
return True
return False
def __lowerCAmelCase (SCREAMING_SNAKE_CASE )-> List[Any]:
"""simple docstring"""
for char in word:
snake_case_ = ord(SCREAMING_SNAKE_CASE )
if not _is_chinese_char(SCREAMING_SNAKE_CASE ):
return 0
return 1
def __lowerCAmelCase (SCREAMING_SNAKE_CASE )-> str:
"""simple docstring"""
snake_case_ = set()
for token in tokens:
snake_case_ = len(SCREAMING_SNAKE_CASE ) > 1 and is_chinese(SCREAMING_SNAKE_CASE )
if chinese_word:
word_set.add(SCREAMING_SNAKE_CASE )
snake_case_ = list(SCREAMING_SNAKE_CASE )
return word_list
def __lowerCAmelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )-> List[str]:
"""simple docstring"""
if not chinese_word_set:
return bert_tokens
snake_case_ = max([len(SCREAMING_SNAKE_CASE ) for w in chinese_word_set] )
snake_case_ = bert_tokens
snake_case_ , snake_case_ = 0, len(SCREAMING_SNAKE_CASE )
while start < end:
snake_case_ = True
if is_chinese(bert_word[start] ):
snake_case_ = min(end - start , SCREAMING_SNAKE_CASE )
for i in range(SCREAMING_SNAKE_CASE , 1 , -1 ):
snake_case_ = ''''''.join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
snake_case_ = '''##''' + bert_word[j]
snake_case_ = start + i
snake_case_ = False
break
if single_word:
start += 1
return bert_word
def __lowerCAmelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )-> Any:
"""simple docstring"""
snake_case_ = []
for i in range(0 , len(SCREAMING_SNAKE_CASE ) , 100 ):
snake_case_ = ltp_tokenizer.pipeline(lines[i : i + 100] , tasks=['''cws'''] ).cws
snake_case_ = [get_chinese_word(SCREAMING_SNAKE_CASE ) for r in res]
ltp_res.extend(SCREAMING_SNAKE_CASE )
assert len(SCREAMING_SNAKE_CASE ) == len(SCREAMING_SNAKE_CASE )
snake_case_ = []
for i in range(0 , len(SCREAMING_SNAKE_CASE ) , 100 ):
snake_case_ = bert_tokenizer(lines[i : i + 100] , add_special_tokens=SCREAMING_SNAKE_CASE , truncation=SCREAMING_SNAKE_CASE , max_length=512 )
bert_res.extend(res['''input_ids'''] )
assert len(SCREAMING_SNAKE_CASE ) == len(SCREAMING_SNAKE_CASE )
snake_case_ = []
for input_ids, chinese_word in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
snake_case_ = []
for id in input_ids:
snake_case_ = bert_tokenizer._convert_id_to_token(SCREAMING_SNAKE_CASE )
input_tokens.append(SCREAMING_SNAKE_CASE )
snake_case_ = add_sub_symbol(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
snake_case_ = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(SCREAMING_SNAKE_CASE ):
if token[:2] == "##":
snake_case_ = token[2:]
# save chinese tokens' pos
if len(SCREAMING_SNAKE_CASE ) == 1 and _is_chinese_char(ord(SCREAMING_SNAKE_CASE ) ):
ref_id.append(SCREAMING_SNAKE_CASE )
ref_ids.append(SCREAMING_SNAKE_CASE )
assert len(SCREAMING_SNAKE_CASE ) == len(SCREAMING_SNAKE_CASE )
return ref_ids
def __lowerCAmelCase (SCREAMING_SNAKE_CASE )-> Tuple:
"""simple docstring"""
with open(args.file_name , '''r''' , encoding='''utf-8''' ) as f:
snake_case_ = f.readlines()
snake_case_ = [line.strip() for line in data if len(SCREAMING_SNAKE_CASE ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
snake_case_ = LTP(args.ltp ) # faster in GPU device
snake_case_ = BertTokenizer.from_pretrained(args.bert )
snake_case_ = prepare_ref(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
with open(args.save_path , '''w''' , encoding='''utf-8''' ) as f:
snake_case_ = [json.dumps(SCREAMING_SNAKE_CASE ) + '''\n''' for ref in ref_ids]
f.writelines(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
UpperCAmelCase = argparse.ArgumentParser(description="""prepare_chinese_ref""")
parser.add_argument(
"""--file_name""",
required=False,
type=str,
default="""./resources/chinese-demo.txt""",
help="""file need process, same as training data in lm""",
)
parser.add_argument(
"""--ltp""",
required=False,
type=str,
default="""./resources/ltp""",
help="""resources for LTP tokenizer, usually a path""",
)
parser.add_argument(
"""--bert""",
required=False,
type=str,
default="""./resources/robert""",
help="""resources for Bert tokenizer""",
)
parser.add_argument(
"""--save_path""",
required=False,
type=str,
default="""./resources/ref.txt""",
help="""path to save res""",
)
UpperCAmelCase = parser.parse_args()
main(args)
| 267
| 1
|
from __future__ import annotations
import unittest
from transformers import RoFormerConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerModel,
)
from transformers.models.roformer.modeling_tf_roformer import (
TFRoFormerSelfAttention,
TFRoFormerSinusoidalPositionalEmbedding,
)
class lowercase :
def __init__( self , snake_case , snake_case=13 , snake_case=7 , snake_case=True , snake_case=True , snake_case=True , snake_case=True , snake_case=99 , snake_case=32 , snake_case=2 , snake_case=4 , snake_case=37 , snake_case="gelu" , snake_case=0.1 , snake_case=0.1 , snake_case=512 , snake_case=16 , snake_case=2 , snake_case=0.02 , snake_case=3 , snake_case=4 , snake_case=None , ):
snake_case_ = parent
snake_case_ = 13
snake_case_ = 7
snake_case_ = True
snake_case_ = True
snake_case_ = True
snake_case_ = True
snake_case_ = 99
snake_case_ = 32
snake_case_ = 2
snake_case_ = 4
snake_case_ = 37
snake_case_ = 'gelu'
snake_case_ = 0.1
snake_case_ = 0.1
snake_case_ = 512
snake_case_ = 16
snake_case_ = 2
snake_case_ = 0.02
snake_case_ = 3
snake_case_ = 4
snake_case_ = None
def a ( self ):
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ = None
if self.use_input_mask:
snake_case_ = random_attention_mask([self.batch_size, self.seq_length] )
snake_case_ = None
if self.use_token_type_ids:
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case_ = None
snake_case_ = None
snake_case_ = None
if self.use_labels:
snake_case_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case_ = ids_tensor([self.batch_size] , self.num_choices )
snake_case_ = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=snake_case , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def a ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ):
snake_case_ = TFRoFormerModel(config=snake_case )
snake_case_ = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
snake_case_ = [input_ids, input_mask]
snake_case_ = model(snake_case )
snake_case_ = model(snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ):
snake_case_ = True
snake_case_ = TFRoFormerForCausalLM(config=snake_case )
snake_case_ = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
snake_case_ = model(snake_case )['logits']
self.parent.assertListEqual(
list(prediction_scores.numpy().shape ) , [self.batch_size, self.seq_length, self.vocab_size] )
def a ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ):
snake_case_ = TFRoFormerForMaskedLM(config=snake_case )
snake_case_ = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
snake_case_ = model(snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def a ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ):
snake_case_ = self.num_labels
snake_case_ = TFRoFormerForSequenceClassification(config=snake_case )
snake_case_ = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
snake_case_ = model(snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def a ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ):
snake_case_ = self.num_choices
snake_case_ = TFRoFormerForMultipleChoice(config=snake_case )
snake_case_ = tf.tile(tf.expand_dims(snake_case , 1 ) , (1, self.num_choices, 1) )
snake_case_ = tf.tile(tf.expand_dims(snake_case , 1 ) , (1, self.num_choices, 1) )
snake_case_ = tf.tile(tf.expand_dims(snake_case , 1 ) , (1, self.num_choices, 1) )
snake_case_ = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
snake_case_ = model(snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def a ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ):
snake_case_ = self.num_labels
snake_case_ = TFRoFormerForTokenClassification(config=snake_case )
snake_case_ = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
snake_case_ = model(snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def a ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ):
snake_case_ = TFRoFormerForQuestionAnswering(config=snake_case )
snake_case_ = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
snake_case_ = model(snake_case )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def a ( self ):
snake_case_ = self.prepare_config_and_inputs()
(
(
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) ,
) = config_and_inputs
snake_case_ = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class lowercase ( lowercase_ , lowercase_ , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = (
(
TFRoFormerModel,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerForMultipleChoice,
)
if is_tf_available()
else ()
)
__SCREAMING_SNAKE_CASE : Any = (
{
'''feature-extraction''': TFRoFormerModel,
'''fill-mask''': TFRoFormerForMaskedLM,
'''question-answering''': TFRoFormerForQuestionAnswering,
'''text-classification''': TFRoFormerForSequenceClassification,
'''text-generation''': TFRoFormerForCausalLM,
'''token-classification''': TFRoFormerForTokenClassification,
'''zero-shot''': TFRoFormerForSequenceClassification,
}
if is_tf_available()
else {}
)
__SCREAMING_SNAKE_CASE : Optional[Any] = False
__SCREAMING_SNAKE_CASE : int = False
def a ( self , snake_case , snake_case , snake_case , snake_case , snake_case ):
if pipeline_test_casse_name == "TextGenerationPipelineTests":
return True
return False
def a ( self ):
snake_case_ = TFRoFormerModelTester(self )
snake_case_ = ConfigTester(self , config_class=snake_case , hidden_size=37 )
def a ( self ):
self.config_tester.run_common_tests()
def a ( self ):
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case )
def a ( self ):
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*snake_case )
def a ( self ):
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head(*snake_case )
def a ( self ):
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*snake_case )
def a ( self ):
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*snake_case )
def a ( self ):
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*snake_case )
def a ( self ):
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*snake_case )
@slow
def a ( self ):
snake_case_ = TFRoFormerModel.from_pretrained('junnyu/roformer_chinese_base' )
self.assertIsNotNone(snake_case )
@require_tf
class lowercase ( unittest.TestCase ):
@slow
def a ( self ):
snake_case_ = TFRoFormerForMaskedLM.from_pretrained('junnyu/roformer_chinese_base' )
snake_case_ = tf.constant([[0, 1, 2, 3, 4, 5]] )
snake_case_ = model(snake_case )[0]
# TODO Replace vocab size
snake_case_ = 5_0000
snake_case_ = [1, 6, vocab_size]
self.assertEqual(output.shape , snake_case )
print(output[:, :3, :3] )
# TODO Replace values below with what was printed above.
snake_case_ = tf.constant(
[
[
[-0.12_05_33_41, -1.0_26_49_01, 0.29_22_19_46],
[-1.5_13_37_83, 0.19_74_33, 0.15_19_06_07],
[-5.0_13_54_03, -3.90_02_56, -0.84_03_87_64],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , snake_case , atol=1e-4 )
@require_tf
class lowercase ( unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Dict = 1e-4
def a ( self ):
snake_case_ = tf.constant([[4, 10]] )
snake_case_ = TFRoFormerSinusoidalPositionalEmbedding(num_positions=6 , embedding_dim=6 )
snake_case_ = emba(input_ids.shape )
snake_case_ = tf.constant(
[[0.00_00, 0.00_00, 0.00_00, 1.00_00, 1.00_00, 1.00_00], [0.84_15, 0.04_64, 0.00_22, 0.54_03, 0.99_89, 1.00_00]] )
tf.debugging.assert_near(snake_case , snake_case , atol=self.tolerance )
def a ( self ):
snake_case_ = tf.constant(
[
[0.00_00, 0.00_00, 0.00_00, 0.00_00, 0.00_00],
[0.84_15, 0.82_19, 0.80_20, 0.78_19, 0.76_17],
[0.90_93, 0.93_64, 0.95_81, 0.97_49, 0.98_70],
] )
snake_case_ = TFRoFormerSinusoidalPositionalEmbedding(num_positions=512 , embedding_dim=512 )
emba([2, 16, 512] )
snake_case_ = emba.weight[:3, :5]
tf.debugging.assert_near(snake_case , snake_case , atol=self.tolerance )
@require_tf
class lowercase ( unittest.TestCase ):
__SCREAMING_SNAKE_CASE : str = 1e-4
def a ( self ):
# 2,12,16,64
snake_case_ = tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 100
snake_case_ = -tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 100
snake_case_ = TFRoFormerSinusoidalPositionalEmbedding(num_positions=32 , embedding_dim=64 )
snake_case_ = embed_positions([2, 16, 768] )[None, None, :, :]
snake_case_ , snake_case_ = TFRoFormerSelfAttention.apply_rotary_position_embeddings(
snake_case , snake_case , snake_case )
snake_case_ = tf.constant(
[
[0.00_00, 0.01_00, 0.02_00, 0.03_00, 0.04_00, 0.05_00, 0.06_00, 0.07_00],
[-0.20_12, 0.88_97, 0.02_63, 0.94_01, 0.20_74, 0.94_63, 0.34_81, 0.93_43],
[-1.70_57, 0.62_71, -1.21_45, 1.38_97, -0.63_03, 1.76_47, -0.11_73, 1.89_85],
[-2.17_31, -1.63_97, -2.73_58, 0.28_54, -2.18_40, 1.71_83, -1.30_18, 2.48_71],
[0.27_17, -3.61_73, -2.92_06, -2.19_88, -3.66_38, 0.38_58, -2.91_55, 2.29_80],
[3.98_59, -2.15_80, -0.79_84, -4.49_04, -4.11_81, -2.02_52, -4.47_82, 1.12_53],
] )
snake_case_ = tf.constant(
[
[0.00_00, -0.01_00, -0.02_00, -0.03_00, -0.04_00, -0.05_00, -0.06_00, -0.07_00],
[0.20_12, -0.88_97, -0.02_63, -0.94_01, -0.20_74, -0.94_63, -0.34_81, -0.93_43],
[1.70_57, -0.62_71, 1.21_45, -1.38_97, 0.63_03, -1.76_47, 0.11_73, -1.89_85],
[2.17_31, 1.63_97, 2.73_58, -0.28_54, 2.18_40, -1.71_83, 1.30_18, -2.48_71],
[-0.27_17, 3.61_73, 2.92_06, 2.19_88, 3.66_38, -0.38_58, 2.91_55, -2.29_80],
[-3.98_59, 2.15_80, 0.79_84, 4.49_04, 4.11_81, 2.02_52, 4.47_82, -1.12_53],
] )
tf.debugging.assert_near(query_layer[0, 0, :6, :8] , snake_case , atol=self.tolerance )
tf.debugging.assert_near(key_layer[0, 0, :6, :8] , snake_case , atol=self.tolerance )
| 285
|
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
_UpperCAmelCase : Tuple = logging.get_logger(__name__)
class lowercase ( lowercase_ ):
__SCREAMING_SNAKE_CASE : Optional[Any] = ['''input_features''', '''is_longer''']
def __init__( self , snake_case=64 , snake_case=4_8000 , snake_case=480 , snake_case=10 , snake_case=1024 , snake_case=0.0 , snake_case=False , snake_case = 0 , snake_case = 1_4000 , snake_case = None , snake_case = "fusion" , snake_case = "repeatpad" , **snake_case , ):
super().__init__(
feature_size=snake_case , sampling_rate=snake_case , padding_value=snake_case , return_attention_mask=snake_case , **snake_case , )
snake_case_ = top_db
snake_case_ = truncation
snake_case_ = padding
snake_case_ = fft_window_size
snake_case_ = (fft_window_size >> 1) + 1
snake_case_ = hop_length
snake_case_ = max_length_s
snake_case_ = max_length_s * sampling_rate
snake_case_ = sampling_rate
snake_case_ = frequency_min
snake_case_ = frequency_max
snake_case_ = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=snake_case , min_frequency=snake_case , max_frequency=snake_case , sampling_rate=snake_case , norm=snake_case , mel_scale='htk' , )
snake_case_ = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=snake_case , min_frequency=snake_case , max_frequency=snake_case , sampling_rate=snake_case , norm='slaney' , mel_scale='slaney' , )
def a ( self ):
snake_case_ = copy.deepcopy(self.__dict__ )
snake_case_ = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def a ( self , snake_case , snake_case = None ):
snake_case_ = spectrogram(
snake_case , window_function(self.fft_window_size , 'hann' ) , frame_length=self.fft_window_size , hop_length=self.hop_length , power=2.0 , mel_filters=snake_case , log_mel='dB' , )
return log_mel_spectrogram.T
def a ( self , snake_case , snake_case , snake_case ):
snake_case_ = np.array_split(list(range(0 , total_frames - chunk_frames + 1 ) ) , 3 )
if len(ranges[1] ) == 0:
# if the audio is too short, we just use the first chunk
snake_case_ = [0]
if len(ranges[2] ) == 0:
# if the audio is too short, we just use the first chunk
snake_case_ = [0]
# randomly choose index for each part
snake_case_ = np.random.choice(ranges[0] )
snake_case_ = np.random.choice(ranges[1] )
snake_case_ = np.random.choice(ranges[2] )
snake_case_ = mel[idx_front : idx_front + chunk_frames, :]
snake_case_ = mel[idx_middle : idx_middle + chunk_frames, :]
snake_case_ = mel[idx_back : idx_back + chunk_frames, :]
snake_case_ = torch.tensor(mel[None, None, :] )
snake_case_ = torch.nn.functional.interpolate(
snake_case , size=[chunk_frames, 64] , mode='bilinear' , align_corners=snake_case )
snake_case_ = mel_shrink[0][0].numpy()
snake_case_ = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] , axis=0 )
return mel_fusion
def a ( self , snake_case , snake_case , snake_case , snake_case ):
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
snake_case_ = True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
snake_case_ = len(snake_case ) - max_length
snake_case_ = np.random.randint(0 , overflow + 1 )
snake_case_ = waveform[idx : idx + max_length]
snake_case_ = self._np_extract_fbank_features(snake_case , self.mel_filters_slaney )[None, :]
elif truncation == "fusion":
snake_case_ = self._np_extract_fbank_features(snake_case , self.mel_filters )
snake_case_ = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
snake_case_ = mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
snake_case_ = np.stack([mel, mel, mel, mel] , axis=0 )
snake_case_ = False
else:
snake_case_ = self._random_mel_fusion(snake_case , snake_case , snake_case )
snake_case_ = True
else:
raise NotImplementedError(F'''data_truncating {truncation} not implemented''' )
else:
snake_case_ = False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
snake_case_ = int(max_length / len(snake_case ) )
snake_case_ = np.stack(np.tile(snake_case , n_repeat + 1 ) )[:max_length]
if padding == "repeatpad":
snake_case_ = int(max_length / len(snake_case ) )
snake_case_ = np.stack(np.tile(snake_case , snake_case ) )
snake_case_ = np.pad(snake_case , (0, max_length - waveform.shape[0]) , mode='constant' , constant_values=0 )
if truncation == "fusion":
snake_case_ = self._np_extract_fbank_features(snake_case , self.mel_filters )
snake_case_ = np.stack([input_mel, input_mel, input_mel, input_mel] , axis=0 )
else:
snake_case_ = self._np_extract_fbank_features(snake_case , self.mel_filters_slaney )[None, :]
return input_mel, longer
def __call__( self , snake_case , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , **snake_case , ):
snake_case_ = truncation if truncation is not None else self.truncation
snake_case_ = padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'''The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a'''
F''' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input'''
F''' was sampled with {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
'It is strongly recommended to pass the `sampling_rate` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
snake_case_ = isinstance(snake_case , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' )
snake_case_ = is_batched_numpy or (
isinstance(snake_case , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
snake_case_ = [np.asarray(snake_case , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(snake_case , np.ndarray ):
snake_case_ = np.asarray(snake_case , dtype=np.floataa )
elif isinstance(snake_case , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
snake_case_ = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
snake_case_ = [np.asarray(snake_case )]
# convert to mel spectrogram, truncate and pad if needed.
snake_case_ = [
self._get_input_mel(snake_case , max_length if max_length else self.nb_max_samples , snake_case , snake_case )
for waveform in raw_speech
]
snake_case_ = []
snake_case_ = []
for mel, longer in padded_inputs:
input_mel.append(snake_case )
is_longer.append(snake_case )
if truncation == "fusion" and sum(snake_case ) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
snake_case_ = np.random.randint(0 , len(snake_case ) )
snake_case_ = True
if isinstance(input_mel[0] , snake_case ):
snake_case_ = [np.asarray(snake_case , dtype=np.floataa ) for feature in input_mel]
# is_longer is a list of bool
snake_case_ = [[longer] for longer in is_longer]
snake_case_ = {'input_features': input_mel, 'is_longer': is_longer}
snake_case_ = BatchFeature(snake_case )
if return_tensors is not None:
snake_case_ = input_features.convert_to_tensors(snake_case )
return input_features
| 285
| 1
|
import re
import string
import numpy as np
import datasets
lowerCamelCase__ = """
Returns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.
"""
lowerCamelCase__ = """
Args:
predictions: List of predicted texts.
references: List of reference texts.
regexes_to_ignore: List, defaults to None. Regex expressions of characters to
ignore when calculating the exact matches. Note: these regexes are removed
from the input data before the changes based on the options below (e.g. ignore_case,
ignore_punctuation, ignore_numbers) are applied.
ignore_case: Boolean, defaults to False. If true, turns everything
to lowercase so that capitalization differences are ignored.
ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before
comparing predictions and references.
ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before
comparing predictions and references.
Returns:
exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.
Examples:
>>> exact_match = datasets.load_metric(\"exact_match\")
>>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]
>>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]
>>> results = exact_match.compute(references=refs, predictions=preds)
>>> print(round(results[\"exact_match\"], 1))
25.0
>>> exact_match = datasets.load_metric(\"exact_match\")
>>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]
>>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]
>>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\"], ignore_case=True, ignore_punctuation=True)
>>> print(round(results[\"exact_match\"], 1))
50.0
>>> exact_match = datasets.load_metric(\"exact_match\")
>>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]
>>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]
>>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\", \"YELL\"], ignore_case=True, ignore_punctuation=True)
>>> print(round(results[\"exact_match\"], 1))
75.0
>>> exact_match = datasets.load_metric(\"exact_match\")
>>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]
>>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]
>>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\", \"YELL\"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)
>>> print(round(results[\"exact_match\"], 1))
100.0
>>> exact_match = datasets.load_metric(\"exact_match\")
>>> refs = [\"The cat sat on the mat.\", \"Theaters are great.\", \"It's like comparing oranges and apples.\"]
>>> preds = [\"The cat sat on the mat?\", \"Theaters are great.\", \"It's like comparing apples and oranges.\"]
>>> results = exact_match.compute(references=refs, predictions=preds)
>>> print(round(results[\"exact_match\"], 1))
33.3
"""
lowerCamelCase__ = """
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A__ ( datasets.Metric ):
def _lowerCamelCase ( self : str ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , reference_urls=[] , )
def _lowerCamelCase ( self : Optional[Any] , a : Tuple , a : Any , a : Tuple=None , a : Any=False , a : Optional[Any]=False , a : List[Any]=False , ):
'''simple docstring'''
if regexes_to_ignore is not None:
for s in regexes_to_ignore:
lowerCAmelCase__ : int = np.array([re.sub(a , '' , a ) for x in predictions] )
lowerCAmelCase__ : List[Any] = np.array([re.sub(a , '' , a ) for x in references] )
else:
lowerCAmelCase__ : int = np.asarray(a )
lowerCAmelCase__ : Tuple = np.asarray(a )
if ignore_case:
lowerCAmelCase__ : List[str] = np.char.lower(a )
lowerCAmelCase__ : Optional[int] = np.char.lower(a )
if ignore_punctuation:
lowerCAmelCase__ : Optional[int] = string.punctuation.maketrans('' , '' , string.punctuation )
lowerCAmelCase__ : Optional[int] = np.char.translate(a , table=a )
lowerCAmelCase__ : Dict = np.char.translate(a , table=a )
if ignore_numbers:
lowerCAmelCase__ : int = string.digits.maketrans('' , '' , string.digits )
lowerCAmelCase__ : Union[str, Any] = np.char.translate(a , table=a )
lowerCAmelCase__ : List[str] = np.char.translate(a , table=a )
lowerCAmelCase__ : Union[str, Any] = predictions == references
return {"exact_match": np.mean(a ) * 100}
| 307
|
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
lowerCamelCase__ = """2.13.1"""
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse("""3.7"""):
raise ImportWarning(
"""To use `datasets`, Python>=3.7 is required, and the current version of Python doesn't match this condition."""
)
if version.parse(pyarrow.__version__).major < 8:
raise ImportWarning(
"""To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn't match this condition.\n"""
"""If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`."""
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
lowerCamelCase__ = concatenate_datasets
lowerCamelCase__ = DownloadConfig
lowerCamelCase__ = DownloadManager
lowerCamelCase__ = DownloadMode
lowerCamelCase__ = DownloadConfig
lowerCamelCase__ = DownloadMode
lowerCamelCase__ = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
| 307
| 1
|
"""simple docstring"""
import warnings
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class lowercase ( _UpperCAmelCase ):
_SCREAMING_SNAKE_CASE = ['image_processor', 'tokenizer']
_SCREAMING_SNAKE_CASE = 'FlavaImageProcessor'
_SCREAMING_SNAKE_CASE = ('BertTokenizer', 'BertTokenizerFast')
def __init__( self , lowercase=None , lowercase=None , **lowercase ) -> str:
lowerCAmelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , lowercase , )
lowerCAmelCase = kwargs.pop("""feature_extractor""" )
lowerCAmelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(lowercase , lowercase )
lowerCAmelCase = self.image_processor
def __call__( self , lowercase = None , lowercase = None , lowercase = True , lowercase = False , lowercase = False , lowercase = None , lowercase = 0 , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = False , lowercase = False , lowercase = False , lowercase = False , lowercase = True , lowercase = None , **lowercase , ) -> Dict:
if text is None and images is None:
raise ValueError("""You have to specify either text or images. Both cannot be none.""" )
if text is not None:
lowerCAmelCase = self.tokenizer(
text=lowercase , add_special_tokens=lowercase , padding=lowercase , truncation=lowercase , max_length=lowercase , stride=lowercase , pad_to_multiple_of=lowercase , return_token_type_ids=lowercase , return_attention_mask=lowercase , return_overflowing_tokens=lowercase , return_special_tokens_mask=lowercase , return_offsets_mapping=lowercase , return_length=lowercase , verbose=lowercase , return_tensors=lowercase , **lowercase , )
if images is not None:
lowerCAmelCase = self.image_processor(
lowercase , return_image_mask=lowercase , return_codebook_pixels=lowercase , return_tensors=lowercase , **lowercase , )
if text is not None and images is not None:
encoding.update(lowercase )
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowercase ) , tensor_type=lowercase )
def _snake_case ( self , *lowercase , **lowercase ) -> Dict:
return self.tokenizer.batch_decode(*lowercase , **lowercase )
def _snake_case ( self , *lowercase , **lowercase ) -> List[Any]:
return self.tokenizer.decode(*lowercase , **lowercase )
@property
def _snake_case ( self ) -> Any:
lowerCAmelCase = self.tokenizer.model_input_names
lowerCAmelCase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def _snake_case ( self ) -> List[Any]:
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , lowercase , )
return self.image_processor_class
@property
def _snake_case ( self ) -> int:
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , lowercase , )
return self.image_processor
| 46
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
SCREAMING_SNAKE_CASE__ = "▁"
SCREAMING_SNAKE_CASE__ = {"vocab_file": "spiece.model"}
SCREAMING_SNAKE_CASE__ = {
"vocab_file": {"google/pegasus-xsum": "https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model"}
}
SCREAMING_SNAKE_CASE__ = {
"google/pegasus-xsum": 512,
}
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
class lowercase ( _UpperCAmelCase ):
_SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
_SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
_SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
_SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_SCREAMING_SNAKE_CASE = ['input_ids', 'attention_mask']
def __init__( self , lowercase , lowercase="<pad>" , lowercase="</s>" , lowercase="<unk>" , lowercase="<mask_2>" , lowercase="<mask_1>" , lowercase=None , lowercase=103 , lowercase = None , **lowercase , ) -> None:
lowerCAmelCase = offset
if additional_special_tokens is not None:
if not isinstance(lowercase , lowercase ):
raise TypeError(
f'additional_special_tokens should be of type {type(lowercase )}, but is'
f' {type(lowercase )}' )
lowerCAmelCase = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
f'<unk_{i}>' for i in range(len(lowercase ) , self.offset - 1 )
]
if len(set(lowercase ) ) != len(lowercase ):
raise ValueError(
"""Please make sure that the provided additional_special_tokens do not contain an incorrectly"""
f' shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.' )
lowerCAmelCase = additional_special_tokens_extended
else:
lowerCAmelCase = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [f'<unk_{i}>' for i in range(2 , self.offset )]
lowerCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=lowercase , unk_token=lowercase , mask_token=lowercase , pad_token=lowercase , mask_token_sent=lowercase , offset=lowercase , additional_special_tokens=lowercase , sp_model_kwargs=self.sp_model_kwargs , **lowercase , )
lowerCAmelCase = mask_token_sent
lowerCAmelCase = vocab_file
lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowercase )
# add special tokens to encoder dict
lowerCAmelCase = {
0: self.pad_token,
1: self.eos_token,
}
if self.mask_token_sent is not None:
self.encoder.update(
{
2: self.mask_token_sent,
3: self.mask_token,
} )
if self.offset > 0:
# entries 2-104 are only used for pretraining and called <mask_1>, <mask_2>, unk_2, ...unk_102
# mask_token_sent is already added to list -> so start at 1
self.encoder.update({i + 3: additional_special_tokens[i] for i in range(1 , self.offset - 1 )} )
lowerCAmelCase = {v: k for k, v in self.encoder.items()}
@property
def _snake_case ( self ) -> int:
return len(self.sp_model ) + self.offset
def _snake_case ( self ) -> Dict[str, int]:
lowerCAmelCase = {self.convert_ids_to_tokens(lowercase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> Optional[int]:
lowerCAmelCase = self.__dict__.copy()
lowerCAmelCase = None
return state
def __setstate__( self , lowercase ) -> List[Any]:
lowerCAmelCase = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
lowerCAmelCase = {}
lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _snake_case ( self , lowercase ) -> List[str]:
return self.sp_model.encode(lowercase , out_type=lowercase )
def _snake_case ( self , lowercase ) -> int:
if token in self.decoder:
return self.decoder[token]
elif token in self.added_tokens_decoder:
return self.added_tokens_decoder[token]
lowerCAmelCase = self.sp_model.piece_to_id(lowercase )
return sp_id + self.offset
def _snake_case ( self , lowercase ) -> str:
if index in self.encoder:
return self.encoder[index]
elif index in self.added_tokens_encoder:
return self.added_tokens_encoder[index]
else:
lowerCAmelCase = self.sp_model.IdToPiece(index - self.offset )
return token
def _snake_case ( self , lowercase ) -> Optional[int]:
lowerCAmelCase = []
lowerCAmelCase = """"""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(lowercase ) + token
lowerCAmelCase = []
else:
current_sub_tokens.append(lowercase )
out_string += self.sp_model.decode(lowercase )
return out_string.strip()
def _snake_case ( self , lowercase=False ) -> Tuple:
return 1
def _snake_case ( self , lowercase ) -> Tuple:
lowerCAmelCase = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
return [1 if x in all_special_ids else 0 for x in seq]
def _snake_case ( self , lowercase , lowercase = None , lowercase = False ) -> List[int]:
if already_has_special_tokens:
return self._special_token_mask(lowercase )
elif token_ids_a is None:
return self._special_token_mask(lowercase ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def _snake_case ( self , lowercase , lowercase=None ) -> List[int]:
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def _snake_case ( self , lowercase , lowercase = None ) -> Tuple[str]:
if not os.path.isdir(lowercase ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
lowerCAmelCase = os.path.join(
lowercase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowercase )
elif not os.path.isfile(self.vocab_file ):
with open(lowercase , """wb""" ) as fi:
lowerCAmelCase = self.sp_model.serialized_model_proto()
fi.write(lowercase )
return (out_vocab_file,)
| 46
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
SCREAMING_SNAKE_CASE : List[Any] = {
"configuration_transfo_xl": ["TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP", "TransfoXLConfig"],
"tokenization_transfo_xl": ["TransfoXLCorpus", "TransfoXLTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : Dict = [
"TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST",
"AdaptiveEmbedding",
"TransfoXLForSequenceClassification",
"TransfoXLLMHeadModel",
"TransfoXLModel",
"TransfoXLPreTrainedModel",
"load_tf_weights_in_transfo_xl",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : int = [
"TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFAdaptiveEmbedding",
"TFTransfoXLForSequenceClassification",
"TFTransfoXLLMHeadModel",
"TFTransfoXLMainLayer",
"TFTransfoXLModel",
"TFTransfoXLPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_transfo_xl import TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, TransfoXLConfig
from .tokenization_transfo_xl import TransfoXLCorpus, TransfoXLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_transfo_xl import (
TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
AdaptiveEmbedding,
TransfoXLForSequenceClassification,
TransfoXLLMHeadModel,
TransfoXLModel,
TransfoXLPreTrainedModel,
load_tf_weights_in_transfo_xl,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_transfo_xl import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFAdaptiveEmbedding,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLMainLayer,
TFTransfoXLModel,
TFTransfoXLPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 368
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..models.auto import AutoProcessor
from ..models.vision_encoder_decoder import VisionEncoderDecoderModel
from ..utils import is_vision_available
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class UpperCamelCase ( lowercase__ ):
'''simple docstring'''
lowercase : Union[str, Any] ="""naver-clova-ix/donut-base-finetuned-docvqa"""
lowercase : Optional[int] =(
"""This is a tool that answers a question about an document (pdf). It takes an input named `document` which """
"""should be the document containing the information, as well as a `question` that is the question about the """
"""document. It returns a text that contains the answer to the question."""
)
lowercase : Dict ="""document_qa"""
lowercase : List[Any] =AutoProcessor
lowercase : Tuple =VisionEncoderDecoderModel
lowercase : Optional[int] =["""image""", """text"""]
lowercase : Any =["""text"""]
def __init__( self , *UpperCamelCase_ , **UpperCamelCase_ ):
if not is_vision_available():
raise ValueError('''Pillow must be installed to use the DocumentQuestionAnsweringTool.''' )
super().__init__(*UpperCamelCase_ , **UpperCamelCase_ )
def UpperCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ ):
lowercase_ :str = '''<s_docvqa><s_question>{user_input}</s_question><s_answer>'''
lowercase_ :Optional[int] = task_prompt.replace('''{user_input}''' , UpperCamelCase_ )
lowercase_ :List[Any] = self.pre_processor.tokenizer(
UpperCamelCase_ , add_special_tokens=UpperCamelCase_ , return_tensors='''pt''' ).input_ids
lowercase_ :Dict = self.pre_processor(UpperCamelCase_ , return_tensors='''pt''' ).pixel_values
return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values}
def UpperCamelCase ( self , UpperCamelCase_ ):
return self.model.generate(
inputs['''pixel_values'''].to(self.device ) , decoder_input_ids=inputs['''decoder_input_ids'''].to(self.device ) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=UpperCamelCase_ , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=UpperCamelCase_ , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=UpperCamelCase_ , ).sequences
def UpperCamelCase ( self , UpperCamelCase_ ):
lowercase_ :Any = self.pre_processor.batch_decode(UpperCamelCase_ )[0]
lowercase_ :Any = sequence.replace(self.pre_processor.tokenizer.eos_token , '''''' )
lowercase_ :int = sequence.replace(self.pre_processor.tokenizer.pad_token , '''''' )
lowercase_ :Dict = re.sub(R'''<.*?>''' , '''''' , UpperCamelCase_ , count=1 ).strip() # remove first task start token
lowercase_ :Dict = self.pre_processor.tokenajson(UpperCamelCase_ )
return sequence["answer"]
| 252
| 0
|
import time
from dataclasses import dataclass
from multiprocessing import Pool
from unittest import TestCase
from unittest.mock import patch
import multiprocess
import numpy as np
import pytest
from datasets.utils.py_utils import (
NestedDataStructure,
asdict,
iflatmap_unordered,
map_nested,
temp_seed,
temporary_assignment,
zip_dict,
)
from .utils import require_tf, require_torch
def A_ ( _lowerCAmelCase ) -> int: # picklable for multiprocessing
return x.sum()
def A_ ( _lowerCAmelCase ) -> str: # picklable for multiprocessing
return i + 1
@dataclass
class A__ :
_UpperCAmelCase :int
_UpperCAmelCase :str
class A__ ( __snake_case ):
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Dict = {}
UpperCamelCase : Any = []
UpperCamelCase : Dict = 1
UpperCamelCase : Optional[int] = [1, 2]
UpperCamelCase : Union[str, Any] = {"a": 1, "b": 2}
UpperCamelCase : Optional[Any] = {"a": [1, 2], "b": [3, 4]}
UpperCamelCase : Optional[Any] = {"a": {"1": 1}, "b": 2}
UpperCamelCase : Optional[int] = {"a": 1, "b": 2, "c": 3, "d": 4}
UpperCamelCase : Dict = {}
UpperCamelCase : List[str] = []
UpperCamelCase : Union[str, Any] = 2
UpperCamelCase : str = [2, 3]
UpperCamelCase : str = {"a": 2, "b": 3}
UpperCamelCase : Optional[Any] = {"a": [2, 3], "b": [4, 5]}
UpperCamelCase : List[str] = {"a": {"1": 2}, "b": 3}
UpperCamelCase : Dict = {"a": 2, "b": 3, "c": 4, "d": 5}
self.assertEqual(map_nested(A_ , A_ ) , A_ )
self.assertEqual(map_nested(A_ , A_ ) , A_ )
self.assertEqual(map_nested(A_ , A_ ) , A_ )
self.assertEqual(map_nested(A_ , A_ ) , A_ )
self.assertEqual(map_nested(A_ , A_ ) , A_ )
self.assertEqual(map_nested(A_ , A_ ) , A_ )
self.assertEqual(map_nested(A_ , A_ ) , A_ )
self.assertEqual(map_nested(A_ , A_ ) , A_ )
UpperCamelCase : Any = 2
self.assertEqual(map_nested(A_ , A_ , num_proc=A_ ) , A_ )
self.assertEqual(map_nested(A_ , A_ , num_proc=A_ ) , A_ )
self.assertEqual(map_nested(A_ , A_ , num_proc=A_ ) , A_ )
self.assertEqual(map_nested(A_ , A_ , num_proc=A_ ) , A_ )
self.assertEqual(map_nested(A_ , A_ , num_proc=A_ ) , A_ )
self.assertEqual(map_nested(A_ , A_ , num_proc=A_ ) , A_ )
self.assertEqual(map_nested(A_ , A_ , num_proc=A_ ) , A_ )
self.assertEqual(map_nested(A_ , A_ , num_proc=A_ ) , A_ )
UpperCamelCase : Optional[int] = {"a": np.eye(2 ), "b": np.zeros(3 ), "c": np.ones(2 )}
UpperCamelCase : Optional[Any] = {"a": 2, "b": 0, "c": 2}
UpperCamelCase : Optional[Any] = {
"a": np.eye(2 ).astype(A_ ),
"b": np.zeros(3 ).astype(A_ ),
"c": np.ones(2 ).astype(A_ ),
}
self.assertEqual(map_nested(A_ , A_ , map_numpy=A_ ) , A_ )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(A_ , A_ , map_numpy=A_ ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
self.assertEqual(map_nested(A_ , A_ , map_numpy=A_ , num_proc=A_ ) , A_ )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(A_ , A_ , map_numpy=A_ , num_proc=A_ ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
with self.assertRaises(A_ ): # can't pickle a local lambda
map_nested(lambda A_ : x + 1 , A_ , num_proc=A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : List[Any] = {"a": 1, "b": 2}
UpperCamelCase : Union[str, Any] = {"a": 3, "b": 4}
UpperCamelCase : Optional[int] = {"a": 5, "b": 6}
UpperCamelCase : Tuple = sorted([("a", (1, 3, 5)), ("b", (2, 4, 6))] )
self.assertEqual(sorted(zip_dict(A_ , A_ , A_ ) ) , A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
class A__ :
_UpperCAmelCase :int = 'bar'
UpperCamelCase : Dict = Foo()
self.assertEqual(foo.my_attr , "bar" )
with temporary_assignment(A_ , "my_attr" , "BAR" ):
self.assertEqual(foo.my_attr , "BAR" )
self.assertEqual(foo.my_attr , "bar" )
@pytest.mark.parametrize(
"iterable_length, num_proc, expected_num_proc" , [
(1, None, 1),
(1, 1, 1),
(2, None, 1),
(2, 1, 1),
(2, 2, 1),
(2, 3, 1),
(3, 2, 1),
(16, 16, 16),
(16, 17, 16),
(17, 16, 16),
] , )
def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> List[Any]:
with patch("datasets.utils.py_utils._single_map_nested" ) as mock_single_map_nested, patch(
"datasets.parallel.parallel.Pool" ) as mock_multiprocessing_pool:
UpperCamelCase : int = {F"""{i}""": i for i in range(_lowerCAmelCase )}
UpperCamelCase : Optional[int] = map_nested(lambda _lowerCAmelCase : x + 10 , _lowerCAmelCase , num_proc=_lowerCAmelCase , parallel_min_length=16 )
if expected_num_proc == 1:
assert mock_single_map_nested.called
assert not mock_multiprocessing_pool.called
else:
assert not mock_single_map_nested.called
assert mock_multiprocessing_pool.called
assert mock_multiprocessing_pool.call_args[0][0] == expected_num_proc
class A__ ( __snake_case ):
@require_tf
def __UpperCamelCase( self ):
'''simple docstring'''
import tensorflow as tf
from tensorflow.keras import layers
UpperCamelCase : Dict = layers.Dense(2 )
def gen_random_output():
UpperCamelCase : Optional[int] = tf.random.uniform((1, 3) )
return model(A_ ).numpy()
with temp_seed(42 , set_tensorflow=A_ ):
UpperCamelCase : Optional[Any] = gen_random_output()
with temp_seed(42 , set_tensorflow=A_ ):
UpperCamelCase : List[str] = gen_random_output()
UpperCamelCase : Optional[int] = gen_random_output()
np.testing.assert_equal(A_ , A_ )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@require_torch
def __UpperCamelCase( self ):
'''simple docstring'''
import torch
def gen_random_output():
UpperCamelCase : Any = torch.nn.Linear(3 , 2 )
UpperCamelCase : Union[str, Any] = torch.rand(1 , 3 )
return model(A_ ).detach().numpy()
with temp_seed(42 , set_pytorch=A_ ):
UpperCamelCase : List[str] = gen_random_output()
with temp_seed(42 , set_pytorch=A_ ):
UpperCamelCase : Dict = gen_random_output()
UpperCamelCase : str = gen_random_output()
np.testing.assert_equal(A_ , A_ )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
def __UpperCamelCase( self ):
'''simple docstring'''
def gen_random_output():
return np.random.rand(1 , 3 )
with temp_seed(42 ):
UpperCamelCase : Tuple = gen_random_output()
with temp_seed(42 ):
UpperCamelCase : int = gen_random_output()
UpperCamelCase : Any = gen_random_output()
np.testing.assert_equal(A_ , A_ )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@pytest.mark.parametrize("input_data" , [{}] )
def A_ ( _lowerCAmelCase ) -> Dict:
UpperCamelCase : Union[str, Any] = NestedDataStructure(_lowerCAmelCase ).data
assert output_data == input_data
@pytest.mark.parametrize(
"data, expected_output" , [
({}, []),
([], []),
("foo", ["foo"]),
(["foo", "bar"], ["foo", "bar"]),
([["foo", "bar"]], ["foo", "bar"]),
([[["foo"], ["bar"]]], ["foo", "bar"]),
([[["foo"], "bar"]], ["foo", "bar"]),
({"a": 1, "b": 2}, [1, 2]),
({"a": [1, 2], "b": [3, 4]}, [1, 2, 3, 4]),
({"a": [[1, 2]], "b": [[3, 4]]}, [1, 2, 3, 4]),
({"a": [[1, 2]], "b": [3, 4]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [[[3], [4]]]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [[3, 4]]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [3, 4]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [3, [4]]}, [1, 2, 3, 4]),
({"a": {"1": 1}, "b": 2}, [1, 2]),
({"a": {"1": [1]}, "b": 2}, [1, 2]),
({"a": {"1": [1]}, "b": [2]}, [1, 2]),
] , )
def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> Tuple:
UpperCamelCase : Union[str, Any] = NestedDataStructure(_lowerCAmelCase ).flatten()
assert output == expected_output
def A_ ( ) -> List[Any]:
UpperCamelCase : Dict = A(x=1 , y="foobar" )
UpperCamelCase : Optional[Any] = {"x": 1, "y": "foobar"}
assert asdict(_lowerCAmelCase ) == expected_output
UpperCamelCase : Tuple = {"a": {"b": A(x=10 , y="foo" )}, "c": [A(x=20 , y="bar" )]}
UpperCamelCase : str = {"a": {"b": {"x": 10, "y": "foo"}}, "c": [{"x": 20, "y": "bar"}]}
assert asdict(_lowerCAmelCase ) == expected_output
with pytest.raises(_lowerCAmelCase ):
asdict([1, A(x=10 , y="foo" )] )
def A_ ( _lowerCAmelCase ) -> Any:
return text.split()
def A_ ( _lowerCAmelCase ) -> Optional[int]:
yield (time.time(), content)
time.sleep(2 )
yield (time.time(), content)
def A_ ( ) -> int:
with Pool(2 ) as pool:
UpperCamelCase : Optional[Any] = list(iflatmap_unordered(_lowerCAmelCase , _split_text , kwargs_iterable=[{"text": "hello there"}] * 10 ) )
assert out.count("hello" ) == 10
assert out.count("there" ) == 10
assert len(_lowerCAmelCase ) == 20
# check multiprocess from pathos (uses dill for pickling)
with multiprocess.Pool(2 ) as pool:
UpperCamelCase : Tuple = list(iflatmap_unordered(_lowerCAmelCase , _split_text , kwargs_iterable=[{"text": "hello there"}] * 10 ) )
assert out.count("hello" ) == 10
assert out.count("there" ) == 10
assert len(_lowerCAmelCase ) == 20
# check that we get items as fast as possible
with Pool(2 ) as pool:
UpperCamelCase : List[str] = []
for yield_time, content in iflatmap_unordered(
_lowerCAmelCase , _aseconds_generator_of_aitems_with_timing , kwargs_iterable=[{"content": "a"}, {"content": "b"}] ):
assert yield_time < time.time() + 0.1, "we should each item directly after it was yielded"
out.append(_lowerCAmelCase )
assert out.count("a" ) == 2
assert out.count("b" ) == 2
assert len(_lowerCAmelCase ) == 4
| 52
|
import argparse
import torch
from transformers import YosoConfig, YosoForMaskedLM
def A ( _lowercase ):
if "model" in orig_key:
SCREAMING_SNAKE_CASE : int = orig_key.replace('''model.''' , '''''' )
if "norm1" in orig_key:
SCREAMING_SNAKE_CASE : List[Any] = orig_key.replace('''norm1''' , '''attention.output.LayerNorm''' )
if "norm2" in orig_key:
SCREAMING_SNAKE_CASE : str = orig_key.replace('''norm2''' , '''output.LayerNorm''' )
if "norm" in orig_key:
SCREAMING_SNAKE_CASE : Tuple = orig_key.replace('''norm''' , '''LayerNorm''' )
if "transformer" in orig_key:
SCREAMING_SNAKE_CASE : int = orig_key.split('''.''' )[0].split('''_''' )[-1]
SCREAMING_SNAKE_CASE : List[str] = orig_key.replace(f"""transformer_{layer_num}""" , f"""encoder.layer.{layer_num}""" )
if "mha.attn" in orig_key:
SCREAMING_SNAKE_CASE : Any = orig_key.replace('''mha.attn''' , '''attention.self''' )
if "mha" in orig_key:
SCREAMING_SNAKE_CASE : Optional[int] = orig_key.replace('''mha''' , '''attention''' )
if "W_q" in orig_key:
SCREAMING_SNAKE_CASE : List[Any] = orig_key.replace('''W_q''' , '''self.query''' )
if "W_k" in orig_key:
SCREAMING_SNAKE_CASE : Optional[int] = orig_key.replace('''W_k''' , '''self.key''' )
if "W_v" in orig_key:
SCREAMING_SNAKE_CASE : str = orig_key.replace('''W_v''' , '''self.value''' )
if "ff1" in orig_key:
SCREAMING_SNAKE_CASE : Any = orig_key.replace('''ff1''' , '''intermediate.dense''' )
if "ff2" in orig_key:
SCREAMING_SNAKE_CASE : List[Any] = orig_key.replace('''ff2''' , '''output.dense''' )
if "ff" in orig_key:
SCREAMING_SNAKE_CASE : Dict = orig_key.replace('''ff''' , '''output.dense''' )
if "mlm_class" in orig_key:
SCREAMING_SNAKE_CASE : Optional[int] = orig_key.replace('''mlm.mlm_class''' , '''cls.predictions.decoder''' )
if "mlm" in orig_key:
SCREAMING_SNAKE_CASE : str = orig_key.replace('''mlm''' , '''cls.predictions.transform''' )
if "cls" not in orig_key:
SCREAMING_SNAKE_CASE : List[str] = '''yoso.''' + orig_key
return orig_key
def A ( _lowercase , _lowercase ):
for key in orig_state_dict.copy().keys():
SCREAMING_SNAKE_CASE : Union[str, Any] = orig_state_dict.pop(_lowercase )
if ("pooler" in key) or ("sen_class" in key):
continue
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = val
SCREAMING_SNAKE_CASE : List[str] = orig_state_dict['''cls.predictions.decoder.bias''']
SCREAMING_SNAKE_CASE : Dict = torch.arange(_lowercase ).expand((1, -1) ) + 2
return orig_state_dict
def A ( _lowercase , _lowercase , _lowercase ):
SCREAMING_SNAKE_CASE : Tuple = torch.load(_lowercase , map_location='''cpu''' )['''model_state_dict''']
SCREAMING_SNAKE_CASE : List[Any] = YosoConfig.from_json_file(_lowercase )
SCREAMING_SNAKE_CASE : str = YosoForMaskedLM(_lowercase )
SCREAMING_SNAKE_CASE : Union[str, Any] = convert_checkpoint_helper(config.max_position_embeddings , _lowercase )
print(model.load_state_dict(_lowercase ) )
model.eval()
model.save_pretrained(_lowercase )
print(f"""Checkpoint successfuly converted. Model saved at {pytorch_dump_path}""" )
if __name__ == "__main__":
__UpperCamelCase : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--pytorch_model_path', default=None, type=str, required=True, help='Path to YOSO pytorch checkpoint.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The json file for YOSO model config.',
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
__UpperCamelCase : Optional[Any] = parser.parse_args()
convert_yoso_checkpoint(args.pytorch_model_path, args.config_file, args.pytorch_dump_path)
| 182
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
UpperCAmelCase__ = {
"configuration_canine": ["CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP", "CanineConfig"],
"tokenization_canine": ["CanineTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"CANINE_PRETRAINED_MODEL_ARCHIVE_LIST",
"CanineForMultipleChoice",
"CanineForQuestionAnswering",
"CanineForSequenceClassification",
"CanineForTokenClassification",
"CanineLayer",
"CanineModel",
"CaninePreTrainedModel",
"load_tf_weights_in_canine",
]
if TYPE_CHECKING:
from .configuration_canine import CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP, CanineConfig
from .tokenization_canine import CanineTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_canine import (
CANINE_PRETRAINED_MODEL_ARCHIVE_LIST,
CanineForMultipleChoice,
CanineForQuestionAnswering,
CanineForSequenceClassification,
CanineForTokenClassification,
CanineLayer,
CanineModel,
CaninePreTrainedModel,
load_tf_weights_in_canine,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 369
|
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all feature extractors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...feature_extraction_utils import FeatureExtractionMixin
from ...utils import CONFIG_NAME, FEATURE_EXTRACTOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = OrderedDict(
[
("audio-spectrogram-transformer", "ASTFeatureExtractor"),
("beit", "BeitFeatureExtractor"),
("chinese_clip", "ChineseCLIPFeatureExtractor"),
("clap", "ClapFeatureExtractor"),
("clip", "CLIPFeatureExtractor"),
("clipseg", "ViTFeatureExtractor"),
("conditional_detr", "ConditionalDetrFeatureExtractor"),
("convnext", "ConvNextFeatureExtractor"),
("cvt", "ConvNextFeatureExtractor"),
("data2vec-audio", "Wav2Vec2FeatureExtractor"),
("data2vec-vision", "BeitFeatureExtractor"),
("deformable_detr", "DeformableDetrFeatureExtractor"),
("deit", "DeiTFeatureExtractor"),
("detr", "DetrFeatureExtractor"),
("dinat", "ViTFeatureExtractor"),
("donut-swin", "DonutFeatureExtractor"),
("dpt", "DPTFeatureExtractor"),
("encodec", "EncodecFeatureExtractor"),
("flava", "FlavaFeatureExtractor"),
("glpn", "GLPNFeatureExtractor"),
("groupvit", "CLIPFeatureExtractor"),
("hubert", "Wav2Vec2FeatureExtractor"),
("imagegpt", "ImageGPTFeatureExtractor"),
("layoutlmv2", "LayoutLMv2FeatureExtractor"),
("layoutlmv3", "LayoutLMv3FeatureExtractor"),
("levit", "LevitFeatureExtractor"),
("maskformer", "MaskFormerFeatureExtractor"),
("mctct", "MCTCTFeatureExtractor"),
("mobilenet_v1", "MobileNetV1FeatureExtractor"),
("mobilenet_v2", "MobileNetV2FeatureExtractor"),
("mobilevit", "MobileViTFeatureExtractor"),
("nat", "ViTFeatureExtractor"),
("owlvit", "OwlViTFeatureExtractor"),
("perceiver", "PerceiverFeatureExtractor"),
("poolformer", "PoolFormerFeatureExtractor"),
("regnet", "ConvNextFeatureExtractor"),
("resnet", "ConvNextFeatureExtractor"),
("segformer", "SegformerFeatureExtractor"),
("sew", "Wav2Vec2FeatureExtractor"),
("sew-d", "Wav2Vec2FeatureExtractor"),
("speech_to_text", "Speech2TextFeatureExtractor"),
("speecht5", "SpeechT5FeatureExtractor"),
("swiftformer", "ViTFeatureExtractor"),
("swin", "ViTFeatureExtractor"),
("swinv2", "ViTFeatureExtractor"),
("table-transformer", "DetrFeatureExtractor"),
("timesformer", "VideoMAEFeatureExtractor"),
("tvlt", "TvltFeatureExtractor"),
("unispeech", "Wav2Vec2FeatureExtractor"),
("unispeech-sat", "Wav2Vec2FeatureExtractor"),
("van", "ConvNextFeatureExtractor"),
("videomae", "VideoMAEFeatureExtractor"),
("vilt", "ViltFeatureExtractor"),
("vit", "ViTFeatureExtractor"),
("vit_mae", "ViTFeatureExtractor"),
("vit_msn", "ViTFeatureExtractor"),
("wav2vec2", "Wav2Vec2FeatureExtractor"),
("wav2vec2-conformer", "Wav2Vec2FeatureExtractor"),
("wavlm", "Wav2Vec2FeatureExtractor"),
("whisper", "WhisperFeatureExtractor"),
("xclip", "CLIPFeatureExtractor"),
("yolos", "YolosFeatureExtractor"),
]
)
UpperCAmelCase__ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FEATURE_EXTRACTOR_MAPPING_NAMES)
def _a ( a :str ) -> Any:
for module_name, extractors in FEATURE_EXTRACTOR_MAPPING_NAMES.items():
if class_name in extractors:
a = model_type_to_module_name(a )
a = importlib.import_module(F""".{module_name}""" , '''transformers.models''' )
try:
return getattr(a , a )
except AttributeError:
continue
for _, extractor in FEATURE_EXTRACTOR_MAPPING._extra_content.items():
if getattr(a , '''__name__''' , a ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
a = importlib.import_module('''transformers''' )
if hasattr(a , a ):
return getattr(a , a )
return None
def _a ( a :Union[str, os.PathLike] , a :Optional[Union[str, os.PathLike]] = None , a :bool = False , a :bool = False , a :Optional[Dict[str, str]] = None , a :Optional[Union[bool, str]] = None , a :Optional[str] = None , a :bool = False , **a :int , ) -> Tuple:
a = get_file_from_repo(
a , a , cache_dir=a , force_download=a , resume_download=a , proxies=a , use_auth_token=a , revision=a , local_files_only=a , )
if resolved_config_file is None:
logger.info(
'''Could not locate the feature extractor configuration file, will try to use the model config instead.''' )
return {}
with open(a , encoding='''utf-8''' ) as reader:
return json.load(a )
class lowercase_ :
'''simple docstring'''
def __init__( self : Tuple ) ->int:
"""simple docstring"""
raise EnvironmentError(
'''AutoFeatureExtractor is designed to be instantiated '''
'''using the `AutoFeatureExtractor.from_pretrained(pretrained_model_name_or_path)` method.''' )
@classmethod
@replace_list_option_in_docstrings(__UpperCAmelCase )
def __lowerCAmelCase ( cls : int , __UpperCAmelCase : Optional[Any] , **__UpperCAmelCase : Dict ) ->List[Any]:
"""simple docstring"""
a = kwargs.pop('''config''' , __UpperCAmelCase )
a = kwargs.pop('''trust_remote_code''' , __UpperCAmelCase )
a = True
a , a = FeatureExtractionMixin.get_feature_extractor_dict(__UpperCAmelCase , **__UpperCAmelCase )
a = config_dict.get('''feature_extractor_type''' , __UpperCAmelCase )
a = None
if "AutoFeatureExtractor" in config_dict.get('''auto_map''' , {} ):
a = config_dict['''auto_map''']['''AutoFeatureExtractor''']
# If we don't find the feature extractor class in the feature extractor config, let's try the model config.
if feature_extractor_class is None and feature_extractor_auto_map is None:
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
a = AutoConfig.from_pretrained(__UpperCAmelCase , **__UpperCAmelCase )
# It could be in `config.feature_extractor_type``
a = getattr(__UpperCAmelCase , '''feature_extractor_type''' , __UpperCAmelCase )
if hasattr(__UpperCAmelCase , '''auto_map''' ) and "AutoFeatureExtractor" in config.auto_map:
a = config.auto_map['''AutoFeatureExtractor''']
if feature_extractor_class is not None:
a = feature_extractor_class_from_name(__UpperCAmelCase )
a = feature_extractor_auto_map is not None
a = feature_extractor_class is not None or type(__UpperCAmelCase ) in FEATURE_EXTRACTOR_MAPPING
a = resolve_trust_remote_code(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
if has_remote_code and trust_remote_code:
a = get_class_from_dynamic_module(
__UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase )
a = kwargs.pop('''code_revision''' , __UpperCAmelCase )
if os.path.isdir(__UpperCAmelCase ):
feature_extractor_class.register_for_auto_class()
return feature_extractor_class.from_dict(__UpperCAmelCase , **__UpperCAmelCase )
elif feature_extractor_class is not None:
return feature_extractor_class.from_dict(__UpperCAmelCase , **__UpperCAmelCase )
# Last try: we use the FEATURE_EXTRACTOR_MAPPING.
elif type(__UpperCAmelCase ) in FEATURE_EXTRACTOR_MAPPING:
a = FEATURE_EXTRACTOR_MAPPING[type(__UpperCAmelCase )]
return feature_extractor_class.from_dict(__UpperCAmelCase , **__UpperCAmelCase )
raise ValueError(
F"""Unrecognized feature extractor in {pretrained_model_name_or_path}. Should have a """
F"""`feature_extractor_type` key in its {FEATURE_EXTRACTOR_NAME} of {CONFIG_NAME}, or one of the following """
F"""`model_type` keys in its {CONFIG_NAME}: {', '.join(c for c in FEATURE_EXTRACTOR_MAPPING_NAMES.keys() )}""" )
@staticmethod
def __lowerCAmelCase ( __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Tuple ) ->Optional[int]:
"""simple docstring"""
FEATURE_EXTRACTOR_MAPPING.register(__UpperCAmelCase , __UpperCAmelCase )
| 26
| 0
|
def A__ ( SCREAMING_SNAKE_CASE__) -> int:
if not numbers:
return 0
if not isinstance(SCREAMING_SNAKE_CASE__ , (list, tuple)) or not all(
isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__) for number in numbers):
raise ValueError("""numbers must be an iterable of integers""")
__snake_case: str = numbers[0]
for i in range(1 , len(SCREAMING_SNAKE_CASE__)):
# update the maximum and minimum subarray products
__snake_case: int = numbers[i]
if number < 0:
__snake_case , __snake_case: Union[str, Any] = min_till_now, max_till_now
__snake_case: Any = max(SCREAMING_SNAKE_CASE__ , max_till_now * number)
__snake_case: Tuple = min(SCREAMING_SNAKE_CASE__ , min_till_now * number)
# update the maximum product found till now
__snake_case: str = max(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__)
return max_prod
| 111
|
import argparse
import re
import torch
from CLAP import create_model
from transformers import AutoFeatureExtractor, ClapConfig, ClapModel
__UpperCAmelCase : Any = {
"text_branch": "text_model",
"audio_branch": "audio_model.audio_encoder",
"attn": "attention.self",
"self.proj": "output.dense",
"attention.self_mask": "attn_mask",
"mlp.fc1": "intermediate.dense",
"mlp.fc2": "output.dense",
"norm1": "layernorm_before",
"norm2": "layernorm_after",
"bn0": "batch_norm",
}
__UpperCAmelCase : List[str] = AutoFeatureExtractor.from_pretrained("laion/clap-htsat-unfused", truncation="rand_trunc")
def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=False) -> Union[str, Any]:
__snake_case , __snake_case: int = create_model(
"""HTSAT-tiny""" , """roberta""" , SCREAMING_SNAKE_CASE__ , precision="""fp32""" , device="""cuda:0""" if torch.cuda.is_available() else """cpu""" , enable_fusion=SCREAMING_SNAKE_CASE__ , fusion_type="""aff_2d""" if enable_fusion else None , )
return model, model_cfg
def A__ ( SCREAMING_SNAKE_CASE__) -> Any:
__snake_case: Optional[Any] = {}
__snake_case: int = r""".*sequential.(\d+).*"""
__snake_case: List[str] = r""".*_projection.(\d+).*"""
for key, value in state_dict.items():
# check if any key needs to be modified
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
__snake_case: Tuple = key.replace(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__)
if re.match(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__):
# replace sequential layers with list
__snake_case: Optional[int] = re.match(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__).group(1)
__snake_case: str = key.replace(F'''sequential.{sequential_layer}.''' , F'''layers.{int(SCREAMING_SNAKE_CASE__)//3}.linear.''')
elif re.match(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__):
__snake_case: Any = int(re.match(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__).group(1))
# Because in CLAP they use `nn.Sequential`...
__snake_case: Dict = 1 if projecton_layer == 0 else 2
__snake_case: Any = key.replace(F'''_projection.{projecton_layer}.''' , F'''_projection.linear{transformers_projection_layer}.''')
if "audio" and "qkv" in key:
# split qkv into query key and value
__snake_case: List[str] = value
__snake_case: Optional[Any] = mixed_qkv.size(0) // 3
__snake_case: Union[str, Any] = mixed_qkv[:qkv_dim]
__snake_case: Dict = mixed_qkv[qkv_dim : qkv_dim * 2]
__snake_case: int = mixed_qkv[qkv_dim * 2 :]
__snake_case: Optional[Any] = query_layer
__snake_case: str = key_layer
__snake_case: int = value_layer
else:
__snake_case: Dict = value
return model_state_dict
def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=False) -> Optional[Any]:
__snake_case , __snake_case: List[str] = init_clap(SCREAMING_SNAKE_CASE__ , enable_fusion=SCREAMING_SNAKE_CASE__)
clap_model.eval()
__snake_case: List[str] = clap_model.state_dict()
__snake_case: Optional[int] = rename_state_dict(SCREAMING_SNAKE_CASE__)
__snake_case: Any = ClapConfig()
__snake_case: Dict = enable_fusion
__snake_case: List[str] = ClapModel(SCREAMING_SNAKE_CASE__)
# ignore the spectrogram embedding layer
model.load_state_dict(SCREAMING_SNAKE_CASE__ , strict=SCREAMING_SNAKE_CASE__)
model.save_pretrained(SCREAMING_SNAKE_CASE__)
transformers_config.save_pretrained(SCREAMING_SNAKE_CASE__)
if __name__ == "__main__":
__UpperCAmelCase : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument("--enable_fusion", action="store_true", help="Whether to enable fusion or not")
__UpperCAmelCase : Tuple = parser.parse_args()
convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion)
| 111
| 1
|
'''simple docstring'''
from __future__ import annotations
from math import pi
from typing import Protocol
import matplotlib.pyplot as plt
import numpy as np
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
def lowercase__ ( self : Any , lowerCAmelCase_ : float ) -> float:
'''simple docstring'''
return 0.0
def __lowerCamelCase ( __snake_case : np.ndarray, __snake_case : int ) -> tuple[int | float, int | float]:
"""simple docstring"""
A__ : Tuple =min([-20, np.min(fft_results[1 : samplerate // 2 - 1] )] )
A__ : str =max([20, np.max(fft_results[1 : samplerate // 2 - 1] )] )
return lowest, highest
def __lowerCamelCase ( __snake_case : FilterType, __snake_case : int ) -> None:
"""simple docstring"""
A__ : Any =512
A__ : int =[1] + [0] * (size - 1)
A__ : int =[filter_type.process(__snake_case ) for item in inputs]
A__ : Union[str, Any] =[0] * (samplerate - size) # zero-padding
outputs += filler
A__ : List[Any] =np.abs(np.fft.fft(__snake_case ) )
A__ : int =20 * np.logaa(__snake_case )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24, samplerate / 2 - 1 )
plt.xlabel("""Frequency (Hz)""" )
plt.xscale("""log""" )
# Display within reasonable bounds
A__ : Union[str, Any] =get_bounds(__snake_case, __snake_case )
plt.ylim(max([-80, bounds[0]] ), min([80, bounds[1]] ) )
plt.ylabel("""Gain (dB)""" )
plt.plot(__snake_case )
plt.show()
def __lowerCamelCase ( __snake_case : FilterType, __snake_case : int ) -> None:
"""simple docstring"""
A__ : List[Any] =512
A__ : List[Any] =[1] + [0] * (size - 1)
A__ : Dict =[filter_type.process(__snake_case ) for item in inputs]
A__ : Union[str, Any] =[0] * (samplerate - size) # zero-padding
outputs += filler
A__ : str =np.angle(np.fft.fft(__snake_case ) )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24, samplerate / 2 - 1 )
plt.xlabel("""Frequency (Hz)""" )
plt.xscale("""log""" )
plt.ylim(-2 * pi, 2 * pi )
plt.ylabel("""Phase shift (Radians)""" )
plt.plot(np.unwrap(__snake_case, -2 * pi ) )
plt.show()
| 365
|
'''simple docstring'''
from maths.is_square_free import is_square_free
from maths.prime_factors import prime_factors
def __lowerCamelCase ( __snake_case : int ) -> int:
"""simple docstring"""
A__ : List[Any] =prime_factors(__snake_case )
if is_square_free(__snake_case ):
return -1 if len(__snake_case ) % 2 else 1
return 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 136
| 0
|
"""simple docstring"""
from __future__ import annotations
from math import pi
def _lowercase ( __snake_case ,__snake_case ,__snake_case ) -> dict[str, float]:
if (inductance, frequency, reactance).count(0 ) != 1:
raise ValueError("One and only one argument must be 0" )
if inductance < 0:
raise ValueError("Inductance cannot be negative" )
if frequency < 0:
raise ValueError("Frequency cannot be negative" )
if reactance < 0:
raise ValueError("Inductive reactance cannot be negative" )
if inductance == 0:
return {"inductance": reactance / (2 * pi * frequency)}
elif frequency == 0:
return {"frequency": reactance / (2 * pi * inductance)}
elif reactance == 0:
return {"reactance": 2 * pi * frequency * inductance}
else:
raise ValueError("Exactly one argument must be 0" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 269
|
"""simple docstring"""
from __future__ import annotations
def _lowercase ( __snake_case ,__snake_case ,__snake_case ,__snake_case ) -> list:
__lowerCAmelCase : Dict = []
__lowerCAmelCase , __lowerCAmelCase : Any = input_list[low:mid], input_list[mid : high + 1]
while left and right:
result.append((left if left[0] <= right[0] else right).pop(0 ) )
__lowerCAmelCase : int = result + left + right
return input_list
def _lowercase ( __snake_case ) -> list:
if len(__snake_case ) <= 1:
return input_list
__lowerCAmelCase : int = list(__snake_case )
# iteration for two-way merging
__lowerCAmelCase : Optional[int] = 2
while p <= len(__snake_case ):
# getting low, high and middle value for merge-sort of single list
for i in range(0 ,len(__snake_case ) ,__snake_case ):
__lowerCAmelCase : Union[str, Any] = i
__lowerCAmelCase : Tuple = i + p - 1
__lowerCAmelCase : Optional[Any] = (low + high + 1) // 2
__lowerCAmelCase : Any = merge(__snake_case ,__snake_case ,__snake_case ,__snake_case )
# final merge of last two parts
if p * 2 >= len(__snake_case ):
__lowerCAmelCase : Optional[Any] = i
__lowerCAmelCase : Union[str, Any] = merge(__snake_case ,0 ,__snake_case ,len(__snake_case ) - 1 )
break
p *= 2
return input_list
if __name__ == "__main__":
__snake_case : Union[str, Any] = input('Enter numbers separated by a comma:\n').strip()
if user_input == "":
__snake_case : Optional[int] = []
else:
__snake_case : int = [int(item.strip()) for item in user_input.split(',')]
print(iter_merge_sort(unsorted))
| 269
| 1
|
'''simple docstring'''
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
AutoConfig,
AutoImageProcessor,
CLIPConfig,
CLIPImageProcessor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER
sys.path.append(str(Path(__file__).parent.parent.parent.parent / """utils"""))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def _snake_case ( self ) -> Optional[Any]:
_UpperCAmelCase : str = 0
def _snake_case ( self ) -> Union[str, Any]:
_UpperCAmelCase : Optional[int] = AutoImageProcessor.from_pretrained("""openai/clip-vit-base-patch32""" )
self.assertIsInstance(a_ ,a_ )
def _snake_case ( self ) -> Dict:
with tempfile.TemporaryDirectory() as tmpdirname:
_UpperCAmelCase : Optional[int] = Path(a_ ) / """preprocessor_config.json"""
_UpperCAmelCase : int = Path(a_ ) / """config.json"""
json.dump(
{"""image_processor_type""": """CLIPImageProcessor""", """processor_class""": """CLIPProcessor"""} ,open(a_ ,"""w""" ) ,)
json.dump({"""model_type""": """clip"""} ,open(a_ ,"""w""" ) )
_UpperCAmelCase : Dict = AutoImageProcessor.from_pretrained(a_ )
self.assertIsInstance(a_ ,a_ )
def _snake_case ( self ) -> Union[str, Any]:
# Ensure we can load the image processor from the feature extractor config
with tempfile.TemporaryDirectory() as tmpdirname:
_UpperCAmelCase : List[str] = Path(a_ ) / """preprocessor_config.json"""
_UpperCAmelCase : List[Any] = Path(a_ ) / """config.json"""
json.dump(
{"""feature_extractor_type""": """CLIPFeatureExtractor""", """processor_class""": """CLIPProcessor"""} ,open(a_ ,"""w""" ) ,)
json.dump({"""model_type""": """clip"""} ,open(a_ ,"""w""" ) )
_UpperCAmelCase : int = AutoImageProcessor.from_pretrained(a_ )
self.assertIsInstance(a_ ,a_ )
def _snake_case ( self ) -> Any:
with tempfile.TemporaryDirectory() as tmpdirname:
_UpperCAmelCase : Any = CLIPConfig()
# Create a dummy config file with image_proceesor_type
_UpperCAmelCase : str = Path(a_ ) / """preprocessor_config.json"""
_UpperCAmelCase : Optional[int] = Path(a_ ) / """config.json"""
json.dump(
{"""image_processor_type""": """CLIPImageProcessor""", """processor_class""": """CLIPProcessor"""} ,open(a_ ,"""w""" ) ,)
json.dump({"""model_type""": """clip"""} ,open(a_ ,"""w""" ) )
# remove image_processor_type to make sure config.json alone is enough to load image processor locally
_UpperCAmelCase : List[str] = AutoImageProcessor.from_pretrained(a_ ).to_dict()
config_dict.pop("""image_processor_type""" )
_UpperCAmelCase : List[str] = CLIPImageProcessor(**a_ )
# save in new folder
model_config.save_pretrained(a_ )
config.save_pretrained(a_ )
_UpperCAmelCase : Dict = AutoImageProcessor.from_pretrained(a_ )
# make sure private variable is not incorrectly saved
_UpperCAmelCase : Dict = json.loads(config.to_json_string() )
self.assertTrue("""_processor_class""" not in dict_as_saved )
self.assertIsInstance(a_ ,a_ )
def _snake_case ( self ) -> str:
with tempfile.TemporaryDirectory() as tmpdirname:
_UpperCAmelCase : str = Path(a_ ) / """preprocessor_config.json"""
json.dump(
{"""image_processor_type""": """CLIPImageProcessor""", """processor_class""": """CLIPProcessor"""} ,open(a_ ,"""w""" ) ,)
_UpperCAmelCase : Union[str, Any] = AutoImageProcessor.from_pretrained(a_ )
self.assertIsInstance(a_ ,a_ )
def _snake_case ( self ) -> List[Any]:
with self.assertRaisesRegex(
a_ ,"""clip-base is not a local folder and is not a valid model identifier""" ):
_UpperCAmelCase : Optional[int] = AutoImageProcessor.from_pretrained("""clip-base""" )
def _snake_case ( self ) -> List[str]:
with self.assertRaisesRegex(
a_ ,r"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
_UpperCAmelCase : Tuple = AutoImageProcessor.from_pretrained(a_ ,revision="""aaaaaa""" )
def _snake_case ( self ) -> Optional[Any]:
with self.assertRaisesRegex(
a_ ,"""hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.""" ,):
_UpperCAmelCase : Union[str, Any] = AutoImageProcessor.from_pretrained("""hf-internal-testing/config-no-model""" )
def _snake_case ( self ) -> Optional[Any]:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(a_ ):
_UpperCAmelCase : Dict = AutoImageProcessor.from_pretrained("""hf-internal-testing/test_dynamic_image_processor""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(a_ ):
_UpperCAmelCase : List[str] = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" ,trust_remote_code=a_ )
_UpperCAmelCase : Optional[int] = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" ,trust_remote_code=a_ )
self.assertEqual(image_processor.__class__.__name__ ,"""NewImageProcessor""" )
# Test image processor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(a_ )
_UpperCAmelCase : Union[str, Any] = AutoImageProcessor.from_pretrained(a_ ,trust_remote_code=a_ )
self.assertEqual(reloaded_image_processor.__class__.__name__ ,"""NewImageProcessor""" )
def _snake_case ( self ) -> Any:
try:
AutoConfig.register("""custom""" ,a_ )
AutoImageProcessor.register(a_ ,a_ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(a_ ):
AutoImageProcessor.register(a_ ,a_ )
with tempfile.TemporaryDirectory() as tmpdirname:
_UpperCAmelCase : Optional[int] = Path(a_ ) / """preprocessor_config.json"""
_UpperCAmelCase : Dict = Path(a_ ) / """config.json"""
json.dump(
{"""feature_extractor_type""": """CLIPFeatureExtractor""", """processor_class""": """CLIPProcessor"""} ,open(a_ ,"""w""" ) ,)
json.dump({"""model_type""": """clip"""} ,open(a_ ,"""w""" ) )
_UpperCAmelCase : Union[str, Any] = CustomImageProcessor.from_pretrained(a_ )
# Now that the config is registered, it can be used as any other config with the auto-API
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(a_ )
_UpperCAmelCase : Union[str, Any] = AutoImageProcessor.from_pretrained(a_ )
self.assertIsInstance(a_ ,a_ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
def _snake_case ( self ) -> str:
class lowercase ( _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase = True
try:
AutoConfig.register("""custom""" ,a_ )
AutoImageProcessor.register(a_ ,a_ )
# If remote code is not set, the default is to use local
_UpperCAmelCase : Union[str, Any] = AutoImageProcessor.from_pretrained("""hf-internal-testing/test_dynamic_image_processor""" )
self.assertEqual(image_processor.__class__.__name__ ,"""NewImageProcessor""" )
self.assertTrue(image_processor.is_local )
# If remote code is disabled, we load the local one.
_UpperCAmelCase : Optional[int] = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" ,trust_remote_code=a_ )
self.assertEqual(image_processor.__class__.__name__ ,"""NewImageProcessor""" )
self.assertTrue(image_processor.is_local )
# If remote is enabled, we load from the Hub
_UpperCAmelCase : Tuple = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" ,trust_remote_code=a_ )
self.assertEqual(image_processor.__class__.__name__ ,"""NewImageProcessor""" )
self.assertTrue(not hasattr(a_ ,"""is_local""" ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
| 349
|
'''simple docstring'''
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline
from diffusers.utils import floats_tensor, nightly, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def _snake_case ( self ) -> Dict:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def _snake_case ( self ) -> Optional[int]:
_UpperCAmelCase : List[str] = 1
_UpperCAmelCase : List[str] = 3
_UpperCAmelCase : Union[str, Any] = (32, 32)
_UpperCAmelCase : str = floats_tensor((batch_size, num_channels) + sizes ,rng=random.Random(0 ) ).to(a_ )
return image
@property
def _snake_case ( self ) -> List[Any]:
torch.manual_seed(0 )
_UpperCAmelCase : List[str] = UNetaDConditionModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") ,up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") ,cross_attention_dim=32 ,)
return model
@property
def _snake_case ( self ) -> Optional[int]:
torch.manual_seed(0 )
_UpperCAmelCase : str = AutoencoderKL(
block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] ,up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] ,latent_channels=4 ,)
return model
@property
def _snake_case ( self ) -> Dict:
torch.manual_seed(0 )
_UpperCAmelCase : Any = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1E-0_5 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1_000 ,)
return CLIPTextModel(a_ )
@property
def _snake_case ( self ) -> Union[str, Any]:
def extract(*a_ ,**a_ ):
class lowercase :
"""simple docstring"""
def __init__( self ) -> Any:
_UpperCAmelCase : str = torch.ones([0] )
def _snake_case ( self ,a_ ) -> Any:
self.pixel_values.to(a_ )
return self
return Out()
return extract
def _snake_case ( self ) -> List[str]:
_UpperCAmelCase : List[str] = """cpu""" # ensure determinism for the device-dependent torch.Generator
_UpperCAmelCase : Union[str, Any] = self.dummy_cond_unet
_UpperCAmelCase : int = DDIMScheduler(
beta_start=0.0_0085 ,beta_end=0.012 ,beta_schedule="""scaled_linear""" ,clip_sample=a_ ,set_alpha_to_one=a_ ,)
_UpperCAmelCase : Optional[int] = self.dummy_vae
_UpperCAmelCase : Optional[int] = self.dummy_text_encoder
_UpperCAmelCase : str = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
# make sure here that pndm scheduler skips prk
_UpperCAmelCase : int = StableDiffusionPipeline(
unet=a_ ,scheduler=a_ ,vae=a_ ,text_encoder=a_ ,tokenizer=a_ ,safety_checker=a_ ,feature_extractor=self.dummy_extractor ,)
_UpperCAmelCase : Optional[Any] = sd_pipe.to(a_ )
sd_pipe.set_progress_bar_config(disable=a_ )
_UpperCAmelCase : Union[str, Any] = """A painting of a squirrel eating a burger"""
_UpperCAmelCase : Optional[int] = torch.Generator(device=a_ ).manual_seed(0 )
_UpperCAmelCase : str = sd_pipe([prompt] ,generator=a_ ,guidance_scale=6.0 ,num_inference_steps=2 ,output_type="""np""" )
_UpperCAmelCase : int = output.images
_UpperCAmelCase : Union[str, Any] = torch.Generator(device=a_ ).manual_seed(0 )
_UpperCAmelCase : str = sd_pipe(
[prompt] ,generator=a_ ,guidance_scale=6.0 ,num_inference_steps=2 ,output_type="""np""" ,return_dict=a_ ,)[0]
_UpperCAmelCase : str = image[0, -3:, -3:, -1]
_UpperCAmelCase : Dict = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_UpperCAmelCase : Optional[int] = np.array([0.5756, 0.6118, 0.5005, 0.5041, 0.5471, 0.4726, 0.4976, 0.4865, 0.4864] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def _snake_case ( self ) -> Any:
_UpperCAmelCase : Any = """cpu""" # ensure determinism for the device-dependent torch.Generator
_UpperCAmelCase : Tuple = self.dummy_cond_unet
_UpperCAmelCase : Optional[int] = PNDMScheduler(skip_prk_steps=a_ )
_UpperCAmelCase : int = self.dummy_vae
_UpperCAmelCase : int = self.dummy_text_encoder
_UpperCAmelCase : Union[str, Any] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
# make sure here that pndm scheduler skips prk
_UpperCAmelCase : str = StableDiffusionPipeline(
unet=a_ ,scheduler=a_ ,vae=a_ ,text_encoder=a_ ,tokenizer=a_ ,safety_checker=a_ ,feature_extractor=self.dummy_extractor ,)
_UpperCAmelCase : str = sd_pipe.to(a_ )
sd_pipe.set_progress_bar_config(disable=a_ )
_UpperCAmelCase : int = """A painting of a squirrel eating a burger"""
_UpperCAmelCase : Any = torch.Generator(device=a_ ).manual_seed(0 )
_UpperCAmelCase : List[Any] = sd_pipe([prompt] ,generator=a_ ,guidance_scale=6.0 ,num_inference_steps=2 ,output_type="""np""" )
_UpperCAmelCase : Dict = output.images
_UpperCAmelCase : List[Any] = torch.Generator(device=a_ ).manual_seed(0 )
_UpperCAmelCase : Any = sd_pipe(
[prompt] ,generator=a_ ,guidance_scale=6.0 ,num_inference_steps=2 ,output_type="""np""" ,return_dict=a_ ,)[0]
_UpperCAmelCase : Optional[int] = image[0, -3:, -3:, -1]
_UpperCAmelCase : int = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_UpperCAmelCase : Union[str, Any] = np.array([0.5125, 0.5716, 0.4828, 0.5060, 0.5650, 0.4768, 0.5185, 0.4895, 0.4993] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def _snake_case ( self ) -> Optional[int]:
_UpperCAmelCase : Optional[int] = StableDiffusionPipeline.from_pretrained(
"""hf-internal-testing/tiny-stable-diffusion-lms-pipe""" ,safety_checker=a_ )
assert isinstance(a_ ,a_ )
assert isinstance(pipe.scheduler ,a_ )
assert pipe.safety_checker is None
_UpperCAmelCase : Dict = pipe("""example prompt""" ,num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(a_ )
_UpperCAmelCase : Any = StableDiffusionPipeline.from_pretrained(a_ )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
_UpperCAmelCase : Union[str, Any] = pipe("""example prompt""" ,num_inference_steps=2 ).images[0]
assert image is not None
@unittest.skipIf(torch_device != """cuda""" ,"""This test requires a GPU""" )
def _snake_case ( self ) -> str:
_UpperCAmelCase : Optional[int] = self.dummy_cond_unet
_UpperCAmelCase : str = PNDMScheduler(skip_prk_steps=a_ )
_UpperCAmelCase : List[str] = self.dummy_vae
_UpperCAmelCase : int = self.dummy_text_encoder
_UpperCAmelCase : str = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
# put models in fp16
_UpperCAmelCase : str = unet.half()
_UpperCAmelCase : List[str] = vae.half()
_UpperCAmelCase : Dict = bert.half()
# make sure here that pndm scheduler skips prk
_UpperCAmelCase : Dict = StableDiffusionPipeline(
unet=a_ ,scheduler=a_ ,vae=a_ ,text_encoder=a_ ,tokenizer=a_ ,safety_checker=a_ ,feature_extractor=self.dummy_extractor ,)
_UpperCAmelCase : List[str] = sd_pipe.to(a_ )
sd_pipe.set_progress_bar_config(disable=a_ )
_UpperCAmelCase : str = """A painting of a squirrel eating a burger"""
_UpperCAmelCase : int = sd_pipe([prompt] ,num_inference_steps=2 ,output_type="""np""" ).images
assert image.shape == (1, 64, 64, 3)
@nightly
@require_torch_gpu
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def _snake_case ( self ) -> Any:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _snake_case ( self ) -> str:
_UpperCAmelCase : List[str] = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" ,safety_checker=a_ )
_UpperCAmelCase : Dict = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
_UpperCAmelCase : int = sd_pipe.to(a_ )
sd_pipe.set_progress_bar_config(disable=a_ )
_UpperCAmelCase : List[Any] = (
"""portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle"""
""" coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with"""
""" anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and"""
""" children from bahnhof zoo, detailed """
)
_UpperCAmelCase : Any = 4_003_660_346
_UpperCAmelCase : List[Any] = 7
# without safety guidance (sld_guidance_scale = 0)
_UpperCAmelCase : int = torch.manual_seed(a_ )
_UpperCAmelCase : str = sd_pipe(
[prompt] ,generator=a_ ,guidance_scale=a_ ,num_inference_steps=50 ,output_type="""np""" ,width=512 ,height=512 ,sld_guidance_scale=0 ,)
_UpperCAmelCase : str = output.images
_UpperCAmelCase : Optional[int] = image[0, -3:, -3:, -1]
_UpperCAmelCase : List[str] = [0.2278, 0.2231, 0.2249, 0.2333, 0.2303, 0.1885, 0.2273, 0.2144, 0.2176]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
# without safety guidance (strong configuration)
_UpperCAmelCase : List[str] = torch.manual_seed(a_ )
_UpperCAmelCase : Optional[Any] = sd_pipe(
[prompt] ,generator=a_ ,guidance_scale=a_ ,num_inference_steps=50 ,output_type="""np""" ,width=512 ,height=512 ,sld_guidance_scale=2_000 ,sld_warmup_steps=7 ,sld_threshold=0.025 ,sld_momentum_scale=0.5 ,sld_mom_beta=0.7 ,)
_UpperCAmelCase : List[str] = output.images
_UpperCAmelCase : List[str] = image[0, -3:, -3:, -1]
_UpperCAmelCase : List[str] = [0.2383, 0.2276, 0.236, 0.2192, 0.2186, 0.2053, 0.1971, 0.1901, 0.1719]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _snake_case ( self ) -> int:
_UpperCAmelCase : Any = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" ,safety_checker=a_ )
_UpperCAmelCase : Union[str, Any] = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
_UpperCAmelCase : Union[str, Any] = sd_pipe.to(a_ )
sd_pipe.set_progress_bar_config(disable=a_ )
_UpperCAmelCase : Any = """padme amidala taking a bath artwork, safe for work, no nudity"""
_UpperCAmelCase : Optional[Any] = 2_734_971_755
_UpperCAmelCase : Optional[int] = 7
_UpperCAmelCase : int = torch.manual_seed(a_ )
_UpperCAmelCase : int = sd_pipe(
[prompt] ,generator=a_ ,guidance_scale=a_ ,num_inference_steps=50 ,output_type="""np""" ,width=512 ,height=512 ,sld_guidance_scale=0 ,)
_UpperCAmelCase : Optional[int] = output.images
_UpperCAmelCase : List[Any] = image[0, -3:, -3:, -1]
_UpperCAmelCase : Optional[int] = [0.3502, 0.3622, 0.3396, 0.3642, 0.3478, 0.3318, 0.35, 0.3348, 0.3297]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
_UpperCAmelCase : Optional[int] = torch.manual_seed(a_ )
_UpperCAmelCase : int = sd_pipe(
[prompt] ,generator=a_ ,guidance_scale=a_ ,num_inference_steps=50 ,output_type="""np""" ,width=512 ,height=512 ,sld_guidance_scale=2_000 ,sld_warmup_steps=7 ,sld_threshold=0.025 ,sld_momentum_scale=0.5 ,sld_mom_beta=0.7 ,)
_UpperCAmelCase : Union[str, Any] = output.images
_UpperCAmelCase : Any = image[0, -3:, -3:, -1]
_UpperCAmelCase : List[Any] = [0.5531, 0.5206, 0.4895, 0.5156, 0.5182, 0.4751, 0.4802, 0.4803, 0.4443]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _snake_case ( self ) -> Any:
_UpperCAmelCase : Any = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" )
_UpperCAmelCase : List[str] = sd_pipe.to(a_ )
sd_pipe.set_progress_bar_config(disable=a_ )
_UpperCAmelCase : Optional[int] = (
"""the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c."""
""" leyendecker"""
)
_UpperCAmelCase : Dict = 1_044_355_234
_UpperCAmelCase : int = 12
_UpperCAmelCase : Optional[Any] = torch.manual_seed(a_ )
_UpperCAmelCase : List[str] = sd_pipe(
[prompt] ,generator=a_ ,guidance_scale=a_ ,num_inference_steps=50 ,output_type="""np""" ,width=512 ,height=512 ,sld_guidance_scale=0 ,)
_UpperCAmelCase : List[str] = output.images
_UpperCAmelCase : Union[str, Any] = image[0, -3:, -3:, -1]
_UpperCAmelCase : Dict = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] )
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-7
_UpperCAmelCase : Tuple = torch.manual_seed(a_ )
_UpperCAmelCase : Dict = sd_pipe(
[prompt] ,generator=a_ ,guidance_scale=a_ ,num_inference_steps=50 ,output_type="""np""" ,width=512 ,height=512 ,sld_guidance_scale=2_000 ,sld_warmup_steps=7 ,sld_threshold=0.025 ,sld_momentum_scale=0.5 ,sld_mom_beta=0.7 ,)
_UpperCAmelCase : Optional[Any] = output.images
_UpperCAmelCase : Dict = image[0, -3:, -3:, -1]
_UpperCAmelCase : int = np.array([0.5818, 0.6285, 0.6835, 0.6019, 0.625, 0.6754, 0.6096, 0.6334, 0.6561] )
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 349
| 1
|
import warnings
from ...utils import logging
from .image_processing_yolos import YolosImageProcessor
a_ = logging.get_logger(__name__)
class __lowerCAmelCase ( lowerCAmelCase__ ):
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
warnings.warn(
'''The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use YolosImageProcessor instead.''' , __UpperCAmelCase , )
super().__init__(*__UpperCAmelCase , **__UpperCAmelCase )
| 330
|
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a_ = {"""configuration_mmbt""": ["""MMBTConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ["""MMBTForClassification""", """MMBTModel""", """ModalEmbeddings"""]
if TYPE_CHECKING:
from .configuration_mmbt import MMBTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings
else:
import sys
a_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 330
| 1
|
'''simple docstring'''
class _lowerCAmelCase :
'''simple docstring'''
def __init__(self ) -> List[str]:
_snake_case = """"""
_snake_case = """"""
_snake_case = []
def lowercase (self , UpperCAmelCase , UpperCAmelCase ) -> int:
if m == -1:
return n + 1
elif n == -1:
return m + 1
elif self.dp[m][n] > -1:
return self.dp[m][n]
else:
if self.worda[m] == self.worda[n]:
_snake_case = self.__min_dist_top_down_dp(m - 1 , n - 1 )
else:
_snake_case = self.__min_dist_top_down_dp(UpperCAmelCase , n - 1 )
_snake_case = self.__min_dist_top_down_dp(m - 1 , UpperCAmelCase )
_snake_case = self.__min_dist_top_down_dp(m - 1 , n - 1 )
_snake_case = 1 + min(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
return self.dp[m][n]
def lowercase (self , UpperCAmelCase , UpperCAmelCase ) -> int:
_snake_case = worda
_snake_case = worda
_snake_case = [[-1 for _ in range(len(UpperCAmelCase ) )] for _ in range(len(UpperCAmelCase ) )]
return self.__min_dist_top_down_dp(len(UpperCAmelCase ) - 1 , len(UpperCAmelCase ) - 1 )
def lowercase (self , UpperCAmelCase , UpperCAmelCase ) -> int:
_snake_case = worda
_snake_case = worda
_snake_case = len(UpperCAmelCase )
_snake_case = len(UpperCAmelCase )
_snake_case = [[0 for _ in range(n + 1 )] for _ in range(m + 1 )]
for i in range(m + 1 ):
for j in range(n + 1 ):
if i == 0: # first string is empty
_snake_case = j
elif j == 0: # second string is empty
_snake_case = i
elif worda[i - 1] == worda[j - 1]: # last characters are equal
_snake_case = self.dp[i - 1][j - 1]
else:
_snake_case = self.dp[i][j - 1]
_snake_case = self.dp[i - 1][j]
_snake_case = self.dp[i - 1][j - 1]
_snake_case = 1 + min(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
return self.dp[m][n]
if __name__ == "__main__":
__lowerCAmelCase = EditDistance()
print('****************** Testing Edit Distance DP Algorithm ******************')
print()
__lowerCAmelCase = input('Enter the first string: ').strip()
__lowerCAmelCase = input('Enter the second string: ').strip()
print()
print(f'''The minimum edit distance is: {solver.min_dist_top_down(Sa, Sa)}''')
print(f'''The minimum edit distance is: {solver.min_dist_bottom_up(Sa, Sa)}''')
print()
print('*************** End of Testing Edit Distance DP Algorithm ***************')
| 362
|
'''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_torch_available,
is_transformers_available,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .multicontrolnet import MultiControlNetModel
from .pipeline_controlnet import StableDiffusionControlNetPipeline
from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline
from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline
if is_transformers_available() and is_flax_available():
from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline
| 270
| 0
|
'''simple docstring'''
import random
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase ) -> tuple:
'''simple docstring'''
snake_case_ = [], [], []
for element in data:
if element < pivot:
less.append(lowerCAmelCase_ )
elif element > pivot:
greater.append(lowerCAmelCase_ )
else:
equal.append(lowerCAmelCase_ )
return less, equal, greater
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
if index >= len(lowerCAmelCase_ ) or index < 0:
return None
snake_case_ = items[random.randint(0, len(lowerCAmelCase_ ) - 1 )]
snake_case_ = 0
snake_case_ = _partition(lowerCAmelCase_, lowerCAmelCase_ )
snake_case_ = len(lowerCAmelCase_ )
snake_case_ = len(lowerCAmelCase_ )
# index is the pivot
if m <= index < m + count:
return pivot
# must be in smaller
elif m > index:
return quick_select(lowerCAmelCase_, lowerCAmelCase_ )
# must be in larger
else:
return quick_select(lowerCAmelCase_, index - (m + count) )
| 56
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_nllb import NllbTokenizer
else:
_UpperCAmelCase : Tuple =None
_UpperCAmelCase : int =logging.get_logger(__name__)
_UpperCAmelCase : Dict ={"""vocab_file""": """sentencepiece.bpe.model""", """tokenizer_file""": """tokenizer.json"""}
_UpperCAmelCase : Any ={
"""vocab_file""": {
"""facebook/nllb-200-distilled-600M""": (
"""https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model"""
),
},
"""tokenizer_file""": {
"""facebook/nllb-200-distilled-600M""": (
"""https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json"""
),
},
}
_UpperCAmelCase : int ={
"""facebook/nllb-large-en-ro""": 1024,
"""facebook/nllb-200-distilled-600M""": 1024,
}
# fmt: off
_UpperCAmelCase : Any =["""ace_Arab""", """ace_Latn""", """acm_Arab""", """acq_Arab""", """aeb_Arab""", """afr_Latn""", """ajp_Arab""", """aka_Latn""", """amh_Ethi""", """apc_Arab""", """arb_Arab""", """ars_Arab""", """ary_Arab""", """arz_Arab""", """asm_Beng""", """ast_Latn""", """awa_Deva""", """ayr_Latn""", """azb_Arab""", """azj_Latn""", """bak_Cyrl""", """bam_Latn""", """ban_Latn""", """bel_Cyrl""", """bem_Latn""", """ben_Beng""", """bho_Deva""", """bjn_Arab""", """bjn_Latn""", """bod_Tibt""", """bos_Latn""", """bug_Latn""", """bul_Cyrl""", """cat_Latn""", """ceb_Latn""", """ces_Latn""", """cjk_Latn""", """ckb_Arab""", """crh_Latn""", """cym_Latn""", """dan_Latn""", """deu_Latn""", """dik_Latn""", """dyu_Latn""", """dzo_Tibt""", """ell_Grek""", """eng_Latn""", """epo_Latn""", """est_Latn""", """eus_Latn""", """ewe_Latn""", """fao_Latn""", """pes_Arab""", """fij_Latn""", """fin_Latn""", """fon_Latn""", """fra_Latn""", """fur_Latn""", """fuv_Latn""", """gla_Latn""", """gle_Latn""", """glg_Latn""", """grn_Latn""", """guj_Gujr""", """hat_Latn""", """hau_Latn""", """heb_Hebr""", """hin_Deva""", """hne_Deva""", """hrv_Latn""", """hun_Latn""", """hye_Armn""", """ibo_Latn""", """ilo_Latn""", """ind_Latn""", """isl_Latn""", """ita_Latn""", """jav_Latn""", """jpn_Jpan""", """kab_Latn""", """kac_Latn""", """kam_Latn""", """kan_Knda""", """kas_Arab""", """kas_Deva""", """kat_Geor""", """knc_Arab""", """knc_Latn""", """kaz_Cyrl""", """kbp_Latn""", """kea_Latn""", """khm_Khmr""", """kik_Latn""", """kin_Latn""", """kir_Cyrl""", """kmb_Latn""", """kon_Latn""", """kor_Hang""", """kmr_Latn""", """lao_Laoo""", """lvs_Latn""", """lij_Latn""", """lim_Latn""", """lin_Latn""", """lit_Latn""", """lmo_Latn""", """ltg_Latn""", """ltz_Latn""", """lua_Latn""", """lug_Latn""", """luo_Latn""", """lus_Latn""", """mag_Deva""", """mai_Deva""", """mal_Mlym""", """mar_Deva""", """min_Latn""", """mkd_Cyrl""", """plt_Latn""", """mlt_Latn""", """mni_Beng""", """khk_Cyrl""", """mos_Latn""", """mri_Latn""", """zsm_Latn""", """mya_Mymr""", """nld_Latn""", """nno_Latn""", """nob_Latn""", """npi_Deva""", """nso_Latn""", """nus_Latn""", """nya_Latn""", """oci_Latn""", """gaz_Latn""", """ory_Orya""", """pag_Latn""", """pan_Guru""", """pap_Latn""", """pol_Latn""", """por_Latn""", """prs_Arab""", """pbt_Arab""", """quy_Latn""", """ron_Latn""", """run_Latn""", """rus_Cyrl""", """sag_Latn""", """san_Deva""", """sat_Beng""", """scn_Latn""", """shn_Mymr""", """sin_Sinh""", """slk_Latn""", """slv_Latn""", """smo_Latn""", """sna_Latn""", """snd_Arab""", """som_Latn""", """sot_Latn""", """spa_Latn""", """als_Latn""", """srd_Latn""", """srp_Cyrl""", """ssw_Latn""", """sun_Latn""", """swe_Latn""", """swh_Latn""", """szl_Latn""", """tam_Taml""", """tat_Cyrl""", """tel_Telu""", """tgk_Cyrl""", """tgl_Latn""", """tha_Thai""", """tir_Ethi""", """taq_Latn""", """taq_Tfng""", """tpi_Latn""", """tsn_Latn""", """tso_Latn""", """tuk_Latn""", """tum_Latn""", """tur_Latn""", """twi_Latn""", """tzm_Tfng""", """uig_Arab""", """ukr_Cyrl""", """umb_Latn""", """urd_Arab""", """uzn_Latn""", """vec_Latn""", """vie_Latn""", """war_Latn""", """wol_Latn""", """xho_Latn""", """ydd_Hebr""", """yor_Latn""", """yue_Hant""", """zho_Hans""", """zho_Hant""", """zul_Latn"""]
class snake_case__( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE__ : Dict = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ : Optional[Any] = ["""input_ids""", """attention_mask"""]
SCREAMING_SNAKE_CASE__ : int = NllbTokenizer
SCREAMING_SNAKE_CASE__ : List[int] = []
SCREAMING_SNAKE_CASE__ : List[int] = []
def __init__( self , __lowercase=None , __lowercase=None , __lowercase="<s>" , __lowercase="</s>" , __lowercase="</s>" , __lowercase="<s>" , __lowercase="<unk>" , __lowercase="<pad>" , __lowercase="<mask>" , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase=False , **__lowercase , ) -> int:
# Mask token behave like a normal word, i.e. include the space before it
lowerCAmelCase_ : int = AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase ) if isinstance(__lowercase , __lowercase ) else mask_token
lowerCAmelCase_ : List[Any] = legacy_behaviour
super().__init__(
vocab_file=__lowercase , tokenizer_file=__lowercase , bos_token=__lowercase , eos_token=__lowercase , sep_token=__lowercase , cls_token=__lowercase , unk_token=__lowercase , pad_token=__lowercase , mask_token=__lowercase , src_lang=__lowercase , tgt_lang=__lowercase , additional_special_tokens=__lowercase , legacy_behaviour=__lowercase , **__lowercase , )
lowerCAmelCase_ : Any = vocab_file
lowerCAmelCase_ : List[Any] = False if not self.vocab_file else True
lowerCAmelCase_ : Optional[Any] = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({'''additional_special_tokens''': _additional_special_tokens} )
lowerCAmelCase_ : Optional[Any] = {
lang_code: self.convert_tokens_to_ids(__lowercase ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
lowerCAmelCase_ : Any = src_lang if src_lang is not None else '''eng_Latn'''
lowerCAmelCase_ : str = self.convert_tokens_to_ids(self._src_lang )
lowerCAmelCase_ : Optional[int] = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def lowercase_ ( self ) -> str:
return self._src_lang
@src_lang.setter
def lowercase_ ( self , __lowercase ) -> None:
lowerCAmelCase_ : Any = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def lowercase_ ( self , __lowercase , __lowercase = None ) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def lowercase_ ( self , __lowercase , __lowercase = None ) -> List[int]:
lowerCAmelCase_ : Optional[Any] = [self.sep_token_id]
lowerCAmelCase_ : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowercase_ ( self , __lowercase , __lowercase , __lowercase , __lowercase , **__lowercase ) -> str:
if src_lang is None or tgt_lang is None:
raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' )
lowerCAmelCase_ : List[str] = src_lang
lowerCAmelCase_ : int = self(__lowercase , add_special_tokens=__lowercase , return_tensors=__lowercase , **__lowercase )
lowerCAmelCase_ : Dict = self.convert_tokens_to_ids(__lowercase )
lowerCAmelCase_ : List[Any] = tgt_lang_id
return inputs
def lowercase_ ( self , __lowercase , __lowercase = "eng_Latn" , __lowercase = None , __lowercase = "fra_Latn" , **__lowercase , ) -> BatchEncoding:
lowerCAmelCase_ : List[str] = src_lang
lowerCAmelCase_ : List[str] = tgt_lang
return super().prepare_seqaseq_batch(__lowercase , __lowercase , **__lowercase )
def lowercase_ ( self ) -> List[Any]:
return self.set_src_lang_special_tokens(self.src_lang )
def lowercase_ ( self ) -> str:
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def lowercase_ ( self , __lowercase ) -> None:
lowerCAmelCase_ : List[str] = self.convert_tokens_to_ids(__lowercase )
if self.legacy_behaviour:
lowerCAmelCase_ : Any = []
lowerCAmelCase_ : List[str] = [self.eos_token_id, self.cur_lang_code]
else:
lowerCAmelCase_ : Optional[int] = [self.cur_lang_code]
lowerCAmelCase_ : List[Any] = [self.eos_token_id]
lowerCAmelCase_ : Any = self.convert_ids_to_tokens(self.prefix_tokens )
lowerCAmelCase_ : List[Any] = self.convert_ids_to_tokens(self.suffix_tokens )
lowerCAmelCase_ : Any = processors.TemplateProcessing(
single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def lowercase_ ( self , __lowercase ) -> None:
lowerCAmelCase_ : Dict = self.convert_tokens_to_ids(__lowercase )
if self.legacy_behaviour:
lowerCAmelCase_ : List[Any] = []
lowerCAmelCase_ : Any = [self.eos_token_id, self.cur_lang_code]
else:
lowerCAmelCase_ : Any = [self.cur_lang_code]
lowerCAmelCase_ : Any = [self.eos_token_id]
lowerCAmelCase_ : List[Any] = self.convert_ids_to_tokens(self.prefix_tokens )
lowerCAmelCase_ : Tuple = self.convert_ids_to_tokens(self.suffix_tokens )
lowerCAmelCase_ : Optional[Any] = processors.TemplateProcessing(
single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def lowercase_ ( self , __lowercase , __lowercase = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(__lowercase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory.""" )
return
lowerCAmelCase_ : Any = os.path.join(
__lowercase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowercase ):
copyfile(self.vocab_file , __lowercase )
return (out_vocab_file,)
| 262
| 0
|
from math import ceil
from typing import List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
from ...utils import TensorType, logging
UpperCAmelCase : Union[str, Any] = logging.get_logger(__name__)
class __lowerCAmelCase ( UpperCamelCase__):
_lowercase : List[str] = ["""audio_values""", """audio_mask"""]
def __init__( self , lowerCAmelCase__=2_0_4_8 , lowerCAmelCase__=1 , lowerCAmelCase__=[1_6, 1_6] , lowerCAmelCase__=1_2_8 , lowerCAmelCase__=4_4_1_0_0 , lowerCAmelCase__=8_6 , lowerCAmelCase__=2_0_4_8 , lowerCAmelCase__=0.0 , **lowerCAmelCase__ , ) -> List[str]:
'''simple docstring'''
super().__init__(
feature_size=lowerCAmelCase__ , sampling_rate=lowerCAmelCase__ , padding_value=lowerCAmelCase__ , **lowerCAmelCase__ , )
a__ : Any =spectrogram_length
a__ : List[str] =num_channels
a__ : Dict =patch_size
a__ : str =feature_size // self.patch_size[1]
a__ : str =n_fft
a__ : List[Any] =sampling_rate // hop_length_to_sampling_rate
a__ : Optional[Any] =sampling_rate
a__ : Dict =padding_value
a__ : Optional[Any] =mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=lowerCAmelCase__ , min_frequency=0.0 , max_frequency=2_20_50.0 , sampling_rate=lowerCAmelCase__ , norm="slaney" , mel_scale="slaney" , ).T
def _lowercase ( self , lowerCAmelCase__ ) -> np.ndarray:
'''simple docstring'''
a__ : Dict =spectrogram(
lowerCAmelCase__ , window_function(self.n_fft , "hann" ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel="dB" , db_range=80.0 , )
a__ : Union[str, Any] =log_spec[:, :-1]
a__ : str =log_spec - 20.0
a__ : List[Any] =np.clip(log_spec / 40.0 , -2.0 , 0.0 ) + 1.0
return log_spec
def __call__( self , lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = True , lowerCAmelCase__ = None , lowerCAmelCase__ = False , lowerCAmelCase__ = False , **lowerCAmelCase__ , ) -> BatchFeature:
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
"This feature extractor is set to support sampling rate"
F''' of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled'''
F''' with {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
"It is strongly recommended to pass the `sampling_rate` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
a__ : Optional[Any] =isinstance(lowerCAmelCase__ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' )
a__ : Any =is_batched_numpy or (
isinstance(lowerCAmelCase__ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
a__ : int =[np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(lowerCAmelCase__ , np.ndarray ):
a__ : Dict =np.asarray(lowerCAmelCase__ , dtype=np.floataa )
elif isinstance(lowerCAmelCase__ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
a__ : Optional[int] =raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
a__ : str =[np.asarray([raw_speech] ).T]
# Convert audio signals to log mel spectrograms, truncate by time axis
a__ : Any =[
self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech
]
if isinstance(audio_features[0] , lowerCAmelCase__ ):
a__ : Optional[int] =[np.asarray(lowerCAmelCase__ , dtype=np.floataa ) for feature in audio_features]
# Create audio attention mask
a__ : Dict =max(
[ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch
if return_attention_mask:
a__ : List[Any] =[
(ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1]
+ (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0]
for feature in audio_features
]
a__ : Any =np.array(lowerCAmelCase__ ).astype(np.floataa )
# convert into correct format for padding
a__ : str =max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch
a__ : List[str] =np.ones([len(lowerCAmelCase__ ), 1, max_time_len, self.feature_size] ).astype(np.floataa )
a__ : str =padded_audio_features * self.padding_value
for i in range(len(lowerCAmelCase__ ) ):
a__ : Optional[Any] =audio_features[i]
a__ : List[Any] =feature
# return as BatchFeature
if return_attention_mask:
a__ : Tuple ={"audio_values": padded_audio_features, "audio_mask": audio_mask}
else:
a__ : Union[str, Any] ={"audio_values": padded_audio_features}
a__ : Optional[Any] =BatchFeature(data=lowerCAmelCase__ , tensor_type=lowerCAmelCase__ )
return encoded_inputs
| 148
|
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
UpperCAmelCase : List[Any] = logging.get_logger(__name__)
UpperCAmelCase : Tuple = {
"""Salesforce/instruct-blip-flan-t5""": """https://huggingface.co/Salesforce/instruct-blip-flan-t5/resolve/main/config.json""",
}
class __lowerCAmelCase ( UpperCamelCase__):
_lowercase : Dict = """instructblip_vision_model"""
def __init__( self , lowerCAmelCase__=1_4_0_8 , lowerCAmelCase__=6_1_4_4 , lowerCAmelCase__=3_9 , lowerCAmelCase__=1_6 , lowerCAmelCase__=2_2_4 , lowerCAmelCase__=1_4 , lowerCAmelCase__="gelu" , lowerCAmelCase__=1E-6 , lowerCAmelCase__=0.0 , lowerCAmelCase__=1E-10 , lowerCAmelCase__=True , **lowerCAmelCase__ , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(**lowerCAmelCase__ )
a__ : Tuple =hidden_size
a__ : Any =intermediate_size
a__ : Union[str, Any] =num_hidden_layers
a__ : Optional[Any] =num_attention_heads
a__ : List[str] =patch_size
a__ : int =image_size
a__ : Tuple =initializer_range
a__ : Any =attention_dropout
a__ : List[Any] =layer_norm_eps
a__ : Optional[Any] =hidden_act
a__ : Optional[Any] =qkv_bias
@classmethod
def _lowercase ( cls , lowerCAmelCase__ , **lowerCAmelCase__ ) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(lowerCAmelCase__ )
a__ , a__ : Optional[Any] =cls.get_config_dict(lowerCAmelCase__ , **lowerCAmelCase__ )
# get the vision config dict if we are loading from InstructBlipConfig
if config_dict.get("model_type" ) == "instructblip":
a__ : Any =config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(lowerCAmelCase__ , **lowerCAmelCase__ )
class __lowerCAmelCase ( UpperCamelCase__):
_lowercase : Dict = """instructblip_qformer"""
def __init__( self , lowerCAmelCase__=3_0_5_2_2 , lowerCAmelCase__=7_6_8 , lowerCAmelCase__=1_2 , lowerCAmelCase__=1_2 , lowerCAmelCase__=3_0_7_2 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=5_1_2 , lowerCAmelCase__=0.02 , lowerCAmelCase__=1E-12 , lowerCAmelCase__=0 , lowerCAmelCase__="absolute" , lowerCAmelCase__=2 , lowerCAmelCase__=1_4_0_8 , **lowerCAmelCase__ , ) -> Dict:
'''simple docstring'''
super().__init__(pad_token_id=lowerCAmelCase__ , **lowerCAmelCase__ )
a__ : Optional[int] =vocab_size
a__ : Optional[Any] =hidden_size
a__ : str =num_hidden_layers
a__ : Optional[int] =num_attention_heads
a__ : Dict =hidden_act
a__ : Optional[int] =intermediate_size
a__ : Union[str, Any] =hidden_dropout_prob
a__ : Optional[int] =attention_probs_dropout_prob
a__ : List[Any] =max_position_embeddings
a__ : Union[str, Any] =initializer_range
a__ : Optional[int] =layer_norm_eps
a__ : int =position_embedding_type
a__ : int =cross_attention_frequency
a__ : Tuple =encoder_hidden_size
@classmethod
def _lowercase ( cls , lowerCAmelCase__ , **lowerCAmelCase__ ) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(lowerCAmelCase__ )
a__ , a__ : str =cls.get_config_dict(lowerCAmelCase__ , **lowerCAmelCase__ )
# get the qformer config dict if we are loading from InstructBlipConfig
if config_dict.get("model_type" ) == "instructblip":
a__ : Optional[int] =config_dict["qformer_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(lowerCAmelCase__ , **lowerCAmelCase__ )
class __lowerCAmelCase ( UpperCamelCase__):
_lowercase : Dict = """instructblip"""
_lowercase : List[Any] = True
def __init__( self , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=3_2 , **lowerCAmelCase__ ) -> str:
'''simple docstring'''
super().__init__(**lowerCAmelCase__ )
if vision_config is None:
a__ : List[Any] ={}
logger.info("vision_config is None. initializing the InstructBlipVisionConfig with default values." )
if qformer_config is None:
a__ : Tuple ={}
logger.info("qformer_config is None. Initializing the InstructBlipQFormerConfig with default values." )
if text_config is None:
a__ : Dict ={}
logger.info("text_config is None. Initializing the text config with default values (`OPTConfig`)." )
a__ : Dict =InstructBlipVisionConfig(**lowerCAmelCase__ )
a__ : Union[str, Any] =InstructBlipQFormerConfig(**lowerCAmelCase__ )
a__ : Tuple =text_config["model_type"] if "model_type" in text_config else "opt"
a__ : List[str] =CONFIG_MAPPING[text_model_type](**lowerCAmelCase__ )
a__ : Union[str, Any] =self.text_config.tie_word_embeddings
a__ : Optional[Any] =self.text_config.is_encoder_decoder
a__ : str =num_query_tokens
a__ : List[Any] =self.vision_config.hidden_size
a__ : str =self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
a__ : List[Any] =1.0
a__ : List[str] =0.02
@classmethod
def _lowercase ( cls , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ , ) -> int:
'''simple docstring'''
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **lowerCAmelCase__ , )
def _lowercase ( self ) -> Union[str, Any]:
'''simple docstring'''
a__ : int =copy.deepcopy(self.__dict__ )
a__ : int =self.vision_config.to_dict()
a__ : str =self.qformer_config.to_dict()
a__ : str =self.text_config.to_dict()
a__ : List[str] =self.__class__.model_type
return output
| 148
| 1
|
"""simple docstring"""
import importlib
import torch
import yaml
from omegaconf import OmegaConf
from taming.models.vqgan import VQModel
def __magic_name__ ( __snake_case : Dict , __snake_case : Optional[Any]=False ) -> Tuple:
lowercase : Union[str, Any] = OmegaConf.load(__snake_case )
if display:
print(yaml.dump(OmegaConf.to_container(__snake_case ) ) )
return config
def __magic_name__ ( __snake_case : Dict , __snake_case : Optional[Any]=None , __snake_case : Union[str, Any]=None ) -> Tuple:
if conf_path is None:
lowercase : List[Any] = "./model_checkpoints/vqgan_only.yaml"
lowercase : Tuple = load_config(__snake_case , display=__snake_case )
lowercase : List[Any] = VQModel(**config.model.params )
if ckpt_path is None:
lowercase : List[str] = "./model_checkpoints/vqgan_only.pt"
lowercase : Optional[int] = torch.load(__snake_case , map_location=__snake_case )
if ".ckpt" in ckpt_path:
lowercase : str = sd["state_dict"]
model.load_state_dict(__snake_case , strict=__snake_case )
model.to(__snake_case )
del sd
return model
def __magic_name__ ( __snake_case : Tuple , __snake_case : Union[str, Any] ) -> int:
lowercase , lowercase , lowercase : List[Any] = model.encode(__snake_case )
print(f"""VQGAN --- {model.__class__.__name__}: latent shape: {z.shape[2:]}""" )
lowercase : str = model.decode(__snake_case )
return xrec
def __magic_name__ ( __snake_case : Dict , __snake_case : Optional[int]=False ) -> int:
lowercase , lowercase : Union[str, Any] = string.rsplit("." , 1 )
if reload:
lowercase : Any = importlib.import_module(__snake_case )
importlib.reload(__snake_case )
return getattr(importlib.import_module(__snake_case , package=__snake_case ) , cls )
def __magic_name__ ( __snake_case : str ) -> List[str]:
if "target" not in config:
raise KeyError("Expected key `target` to instantiate." )
return get_obj_from_str(config["target"] )(**config.get("params" , {} ) )
def __magic_name__ ( __snake_case : Any , __snake_case : int , __snake_case : List[Any]=True , __snake_case : Dict=True ) -> str:
lowercase : Optional[int] = instantiate_from_config(__snake_case )
if sd is not None:
model.load_state_dict(__snake_case )
if gpu:
model.cuda()
if eval_mode:
model.eval()
return {"model": model}
def __magic_name__ ( __snake_case : Optional[int] , __snake_case : Any , __snake_case : Optional[int] , __snake_case : List[str] ) -> Any:
# load the specified checkpoint
if ckpt:
lowercase : Dict = torch.load(__snake_case , map_location="cpu" )
lowercase : List[Any] = pl_sd["global_step"]
print(f"""loaded model from global step {global_step}.""" )
else:
lowercase : int = {"state_dict": None}
lowercase : Optional[Any] = None
lowercase : List[Any] = load_model_from_config(config.model , pl_sd["state_dict"] , gpu=__snake_case , eval_mode=__snake_case )["model"]
return model, global_step
| 202
|
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_A : Dict = logging.get_logger(__name__)
def __magic_name__ ( __snake_case : Any ) -> Any:
lowercase : Optional[Any] = DPTConfig()
if "large" in checkpoint_url:
lowercase : Optional[int] = 1024
lowercase : Dict = 4096
lowercase : Union[str, Any] = 24
lowercase : str = 16
lowercase : Dict = [5, 11, 17, 23]
lowercase : Any = [256, 512, 1024, 1024]
lowercase : Optional[Any] = (1, 384, 384)
if "ade" in checkpoint_url:
lowercase : List[Any] = True
lowercase : Union[str, Any] = 150
lowercase : Dict = "huggingface/label-files"
lowercase : Optional[Any] = "ade20k-id2label.json"
lowercase : Optional[int] = json.load(open(cached_download(hf_hub_url(__snake_case , __snake_case , repo_type="dataset" ) ) , "r" ) )
lowercase : List[Any] = {int(__snake_case ): v for k, v in idalabel.items()}
lowercase : Optional[Any] = idalabel
lowercase : Optional[Any] = {v: k for k, v in idalabel.items()}
lowercase : Optional[int] = [1, 150, 480, 480]
return config, expected_shape
def __magic_name__ ( __snake_case : Union[str, Any] ) -> Optional[int]:
lowercase : Optional[Any] = ["pretrained.model.head.weight", "pretrained.model.head.bias"]
for k in ignore_keys:
state_dict.pop(__snake_case , __snake_case )
def __magic_name__ ( __snake_case : Union[str, Any] ) -> Optional[Any]:
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
lowercase : Tuple = name.replace("pretrained.model" , "dpt.encoder" )
if "pretrained.model" in name:
lowercase : Tuple = name.replace("pretrained.model" , "dpt.embeddings" )
if "patch_embed" in name:
lowercase : Optional[int] = name.replace("patch_embed" , "patch_embeddings" )
if "pos_embed" in name:
lowercase : List[Any] = name.replace("pos_embed" , "position_embeddings" )
if "attn.proj" in name:
lowercase : str = name.replace("attn.proj" , "attention.output.dense" )
if "proj" in name and "project" not in name:
lowercase : Any = name.replace("proj" , "projection" )
if "blocks" in name:
lowercase : Tuple = name.replace("blocks" , "layer" )
if "mlp.fc1" in name:
lowercase : Optional[int] = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
lowercase : str = name.replace("mlp.fc2" , "output.dense" )
if "norm1" in name:
lowercase : Dict = name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
lowercase : Any = name.replace("norm2" , "layernorm_after" )
if "scratch.output_conv" in name:
lowercase : Optional[Any] = name.replace("scratch.output_conv" , "head" )
if "scratch" in name:
lowercase : Optional[int] = name.replace("scratch" , "neck" )
if "layer1_rn" in name:
lowercase : Any = name.replace("layer1_rn" , "convs.0" )
if "layer2_rn" in name:
lowercase : Tuple = name.replace("layer2_rn" , "convs.1" )
if "layer3_rn" in name:
lowercase : int = name.replace("layer3_rn" , "convs.2" )
if "layer4_rn" in name:
lowercase : Union[str, Any] = name.replace("layer4_rn" , "convs.3" )
if "refinenet" in name:
lowercase : int = int(name[len("neck.refinenet" ) : len("neck.refinenet" ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
lowercase : Union[str, Any] = name.replace(f"""refinenet{layer_idx}""" , f"""fusion_stage.layers.{abs(layer_idx-4 )}""" )
if "out_conv" in name:
lowercase : Tuple = name.replace("out_conv" , "projection" )
if "resConfUnit1" in name:
lowercase : Union[str, Any] = name.replace("resConfUnit1" , "residual_layer1" )
if "resConfUnit2" in name:
lowercase : List[Any] = name.replace("resConfUnit2" , "residual_layer2" )
if "conv1" in name:
lowercase : Optional[int] = name.replace("conv1" , "convolution1" )
if "conv2" in name:
lowercase : Union[str, Any] = name.replace("conv2" , "convolution2" )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
lowercase : str = name.replace("pretrained.act_postprocess1.0.project.0" , "neck.reassemble_stage.readout_projects.0.0" )
if "pretrained.act_postprocess2.0.project.0" in name:
lowercase : Any = name.replace("pretrained.act_postprocess2.0.project.0" , "neck.reassemble_stage.readout_projects.1.0" )
if "pretrained.act_postprocess3.0.project.0" in name:
lowercase : Dict = name.replace("pretrained.act_postprocess3.0.project.0" , "neck.reassemble_stage.readout_projects.2.0" )
if "pretrained.act_postprocess4.0.project.0" in name:
lowercase : str = name.replace("pretrained.act_postprocess4.0.project.0" , "neck.reassemble_stage.readout_projects.3.0" )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
lowercase : Union[str, Any] = name.replace("pretrained.act_postprocess1.3" , "neck.reassemble_stage.layers.0.projection" )
if "pretrained.act_postprocess1.4" in name:
lowercase : Optional[Any] = name.replace("pretrained.act_postprocess1.4" , "neck.reassemble_stage.layers.0.resize" )
if "pretrained.act_postprocess2.3" in name:
lowercase : Union[str, Any] = name.replace("pretrained.act_postprocess2.3" , "neck.reassemble_stage.layers.1.projection" )
if "pretrained.act_postprocess2.4" in name:
lowercase : Optional[int] = name.replace("pretrained.act_postprocess2.4" , "neck.reassemble_stage.layers.1.resize" )
if "pretrained.act_postprocess3.3" in name:
lowercase : str = name.replace("pretrained.act_postprocess3.3" , "neck.reassemble_stage.layers.2.projection" )
if "pretrained.act_postprocess4.3" in name:
lowercase : str = name.replace("pretrained.act_postprocess4.3" , "neck.reassemble_stage.layers.3.projection" )
if "pretrained.act_postprocess4.4" in name:
lowercase : Any = name.replace("pretrained.act_postprocess4.4" , "neck.reassemble_stage.layers.3.resize" )
if "pretrained" in name:
lowercase : List[str] = name.replace("pretrained" , "dpt" )
if "bn" in name:
lowercase : Optional[int] = name.replace("bn" , "batch_norm" )
if "head" in name:
lowercase : Union[str, Any] = name.replace("head" , "head.head" )
if "encoder.norm" in name:
lowercase : List[str] = name.replace("encoder.norm" , "layernorm" )
if "auxlayer" in name:
lowercase : Optional[Any] = name.replace("auxlayer" , "auxiliary_head.head" )
return name
def __magic_name__ ( __snake_case : str , __snake_case : str ) -> Any:
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowercase : Union[str, Any] = state_dict.pop(f"""dpt.encoder.layer.{i}.attn.qkv.weight""" )
lowercase : List[str] = state_dict.pop(f"""dpt.encoder.layer.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
lowercase : Optional[int] = in_proj_weight[: config.hidden_size, :]
lowercase : int = in_proj_bias[: config.hidden_size]
lowercase : Tuple = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowercase : Dict = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowercase : int = in_proj_weight[
-config.hidden_size :, :
]
lowercase : Tuple = in_proj_bias[-config.hidden_size :]
def __magic_name__ ( ) -> int:
lowercase : str = "http://images.cocodataset.org/val2017/000000039769.jpg"
lowercase : str = Image.open(requests.get(__snake_case , stream=__snake_case ).raw )
return im
@torch.no_grad()
def __magic_name__ ( __snake_case : List[Any] , __snake_case : str , __snake_case : Tuple , __snake_case : Tuple ) -> Tuple:
lowercase , lowercase : Tuple = get_dpt_config(__snake_case )
# load original state_dict from URL
lowercase : List[Any] = torch.hub.load_state_dict_from_url(__snake_case , map_location="cpu" )
# remove certain keys
remove_ignore_keys_(__snake_case )
# rename keys
for key in state_dict.copy().keys():
lowercase : Any = state_dict.pop(__snake_case )
lowercase : Optional[int] = val
# read in qkv matrices
read_in_q_k_v(__snake_case , __snake_case )
# load HuggingFace model
lowercase : List[str] = DPTForSemanticSegmentation(__snake_case ) if "ade" in checkpoint_url else DPTForDepthEstimation(__snake_case )
model.load_state_dict(__snake_case )
model.eval()
# Check outputs on an image
lowercase : Any = 480 if "ade" in checkpoint_url else 384
lowercase : Optional[Any] = DPTImageProcessor(size=__snake_case )
lowercase : Any = prepare_img()
lowercase : Union[str, Any] = image_processor(__snake_case , return_tensors="pt" )
# forward pass
lowercase : Optional[int] = model(**__snake_case ).logits if "ade" in checkpoint_url else model(**__snake_case ).predicted_depth
# Assert logits
lowercase : Optional[int] = torch.tensor([[6.31_99, 6.36_29, 6.41_48], [6.38_50, 6.36_15, 6.41_66], [6.35_19, 6.31_76, 6.35_75]] )
if "ade" in checkpoint_url:
lowercase : List[str] = torch.tensor([[4.04_80, 4.24_20, 4.43_60], [4.31_24, 4.56_93, 4.82_61], [4.57_68, 4.89_65, 5.21_63]] )
assert outputs.shape == torch.Size(__snake_case )
assert (
torch.allclose(outputs[0, 0, :3, :3] , __snake_case , atol=1E-4 )
if "ade" in checkpoint_url
else torch.allclose(outputs[0, :3, :3] , __snake_case )
)
Path(__snake_case ).mkdir(exist_ok=__snake_case )
print(f"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(__snake_case )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(__snake_case )
if push_to_hub:
print("Pushing model to hub..." )
model.push_to_hub(
repo_path_or_name=Path(__snake_case , __snake_case ) , organization="nielsr" , commit_message="Add model" , use_temp_dir=__snake_case , )
image_processor.push_to_hub(
repo_path_or_name=Path(__snake_case , __snake_case ) , organization="nielsr" , commit_message="Add image processor" , use_temp_dir=__snake_case , )
if __name__ == "__main__":
_A : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint_url""",
default="""https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt""",
type=str,
help="""URL of the original DPT checkpoint you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=str,
required=True,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
)
parser.add_argument(
"""--model_name""",
default="""dpt-large""",
type=str,
help="""Name of the model, in case you're pushing to the hub.""",
)
_A : Dict = parser.parse_args()
convert_dpt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 202
| 1
|
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_deformable_detr import DeformableDetrImageProcessor
lowercase =logging.get_logger(__name__)
class __magic_name__ ( lowerCAmelCase ):
def __init__( self , *snake_case , **snake_case) -> None:
'''simple docstring'''
warnings.warn(
'The class DeformableDetrFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use DeformableDetrImageProcessor instead.' , snake_case , )
super().__init__(*snake_case , **snake_case)
| 242
|
'''simple docstring'''
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
lowercase =logging.get_logger(__name__)
@add_end_docstrings(lowerCAmelCase )
class __magic_name__ ( lowerCAmelCase ):
def __init__( self , **snake_case) -> Optional[int]:
'''simple docstring'''
super().__init__(**snake_case)
requires_backends(self , 'vision')
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == 'tf'
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING)
def __call__( self , snake_case , **snake_case) -> str:
'''simple docstring'''
return super().__call__(snake_case , **snake_case)
def lowerCAmelCase ( self , **snake_case) -> int:
'''simple docstring'''
_UpperCAmelCase : str ={}
if "candidate_labels" in kwargs:
_UpperCAmelCase : Union[str, Any] =kwargs['candidate_labels']
if "hypothesis_template" in kwargs:
_UpperCAmelCase : List[Any] =kwargs['hypothesis_template']
return preprocess_params, {}, {}
def lowerCAmelCase ( self , snake_case , snake_case=None , snake_case="This is a photo of {}.") -> Any:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] =load_image(snake_case)
_UpperCAmelCase : Union[str, Any] =self.image_processor(images=[image] , return_tensors=self.framework)
_UpperCAmelCase : Union[str, Any] =candidate_labels
_UpperCAmelCase : List[Any] =[hypothesis_template.format(snake_case) for x in candidate_labels]
_UpperCAmelCase : str =self.tokenizer(snake_case , return_tensors=self.framework , padding=snake_case)
_UpperCAmelCase : Any =[text_inputs]
return inputs
def lowerCAmelCase ( self , snake_case) -> str:
'''simple docstring'''
_UpperCAmelCase : List[str] =model_inputs.pop('candidate_labels')
_UpperCAmelCase : Tuple =model_inputs.pop('text_inputs')
if isinstance(text_inputs[0] , snake_case):
_UpperCAmelCase : Any =text_inputs[0]
else:
# Batching case.
_UpperCAmelCase : str =text_inputs[0][0]
_UpperCAmelCase : Any =self.model(**snake_case , **snake_case)
_UpperCAmelCase : List[str] ={
'candidate_labels': candidate_labels,
'logits': outputs.logits_per_image,
}
return model_outputs
def lowerCAmelCase ( self , snake_case) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase : str =model_outputs.pop('candidate_labels')
_UpperCAmelCase : Union[str, Any] =model_outputs['logits'][0]
if self.framework == "pt":
_UpperCAmelCase : Dict =logits.softmax(dim=-1).squeeze(-1)
_UpperCAmelCase : Union[str, Any] =probs.tolist()
if not isinstance(snake_case , snake_case):
_UpperCAmelCase : Union[str, Any] =[scores]
elif self.framework == "tf":
_UpperCAmelCase : Dict =stable_softmax(snake_case , axis=-1)
_UpperCAmelCase : str =probs.numpy().tolist()
else:
raise ValueError(f"Unsupported framework: {self.framework}")
_UpperCAmelCase : List[str] =[
{'score': score, 'label': candidate_label}
for score, candidate_label in sorted(zip(snake_case , snake_case) , key=lambda snake_case: -x[0])
]
return result
| 242
| 1
|
"""simple docstring"""
import argparse
import torch
from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert
from transformers.utils import logging
logging.set_verbosity_info()
def lowercase ( __snake_case : int , __snake_case : Dict , __snake_case : Union[str, Any] ):
# Initialise PyTorch model
lowercase_ : Tuple = BertConfig.from_json_file(_A )
print(F'''Building PyTorch model from configuration: {config}''' )
lowercase_ : List[Any] = BertForPreTraining(_A )
# Load weights from tf checkpoint
load_tf_weights_in_bert(_A , _A , _A )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , _A )
if __name__ == "__main__":
__A : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--bert_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained BERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
__A : List[str] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 33
|
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModel,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableUnCLIPImgaImgPipeline, UNetaDConditionModel
from diffusers.pipelines.pipeline_utils import DiffusionPipeline
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import (
enable_full_determinism,
floats_tensor,
load_image,
load_numpy,
require_torch_gpu,
skip_mps,
slow,
torch_device,
)
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class a__( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
lowercase__ = StableUnCLIPImgaImgPipeline
lowercase__ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS
lowercase__ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
lowercase__ = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
lowercase__ = frozenset([] )
def lowercase_ ( self : int ):
a : Dict = 32
a : str = embedder_hidden_size
# image encoding components
a : List[Any] = CLIPImageProcessor(crop_size=32 , size=32 )
torch.manual_seed(0 )
a : Dict = CLIPVisionModelWithProjection(
CLIPVisionConfig(
hidden_size=__snake_case , projection_dim=__snake_case , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , ) )
# regular denoising components
torch.manual_seed(0 )
a : Dict = StableUnCLIPImageNormalizer(embedding_dim=__snake_case )
a : Optional[int] = DDPMScheduler(beta_schedule='squaredcos_cap_v2' )
torch.manual_seed(0 )
a : Any = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
torch.manual_seed(0 )
a : Tuple = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=__snake_case , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) )
torch.manual_seed(0 )
a : Union[str, Any] = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('CrossAttnDownBlock2D', 'DownBlock2D') , up_block_types=('UpBlock2D', 'CrossAttnUpBlock2D') , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type='projection' , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=__snake_case , layers_per_block=1 , upcast_attention=__snake_case , use_linear_projection=__snake_case , )
torch.manual_seed(0 )
a : List[Any] = DDIMScheduler(
beta_schedule='scaled_linear' , beta_start=0.00085 , beta_end=0.012 , prediction_type='v_prediction' , set_alpha_to_one=__snake_case , steps_offset=1 , )
torch.manual_seed(0 )
a : List[str] = AutoencoderKL()
a : str = {
# image encoding components
'feature_extractor': feature_extractor,
'image_encoder': image_encoder.eval(),
# image noising components
'image_normalizer': image_normalizer.eval(),
'image_noising_scheduler': image_noising_scheduler,
# regular denoising components
'tokenizer': tokenizer,
'text_encoder': text_encoder.eval(),
'unet': unet.eval(),
'scheduler': scheduler,
'vae': vae.eval(),
}
return components
def lowercase_ ( self : Tuple , __snake_case : List[str] , __snake_case : Union[str, Any]=0 , __snake_case : Tuple=True ):
if str(__snake_case ).startswith('mps' ):
a : Tuple = torch.manual_seed(__snake_case )
else:
a : List[Any] = torch.Generator(device=__snake_case ).manual_seed(__snake_case )
a : Optional[Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(__snake_case ) ).to(__snake_case )
if pil_image:
a : Optional[Any] = input_image * 0.5 + 0.5
a : Optional[Any] = input_image.clamp(0 , 1 )
a : Dict = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
a : int = DiffusionPipeline.numpy_to_pil(__snake_case )[0]
return {
"prompt": "An anime racoon running a marathon",
"image": input_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "np",
}
@skip_mps
def lowercase_ ( self : Optional[Any] ):
a : List[Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator
a : Union[str, Any] = self.get_dummy_components()
a : Any = StableUnCLIPImgaImgPipeline(**__snake_case )
a : Tuple = sd_pipe.to(__snake_case )
sd_pipe.set_progress_bar_config(disable=__snake_case )
a : Union[str, Any] = self.get_dummy_inputs(__snake_case )
inputs.update({'image_embeds': None} )
a : str = sd_pipe(**__snake_case ).images
a : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
a : Optional[int] = np.array([0.3872, 0.7224, 0.5601, 0.4741, 0.6872, 0.5814, 0.4636, 0.3867, 0.5078] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def lowercase_ ( self : List[str] ):
a : int = torch_device in ['cpu', 'mps']
self._test_attention_slicing_forward_pass(test_max_difference=__snake_case )
def lowercase_ ( self : int ):
a : Optional[int] = torch_device in ['cpu', 'mps']
self._test_inference_batch_single_identical(test_max_difference=__snake_case )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def lowercase_ ( self : Dict ):
self._test_xformers_attention_forwardGenerator_pass(test_max_difference=__snake_case )
@slow
@require_torch_gpu
class a__( unittest.TestCase ):
def lowercase_ ( self : Any ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase_ ( self : Optional[Any] ):
a : Optional[Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png' )
a : Union[str, Any] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_img2img_anime_turtle_fp16.npy' )
a : Optional[Any] = StableUnCLIPImgaImgPipeline.from_pretrained(
'fusing/stable-unclip-2-1-l-img2img' , torch_dtype=torch.floataa )
pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
a : Optional[int] = torch.Generator(device='cpu' ).manual_seed(0 )
a : Optional[int] = pipe(__snake_case , 'anime turle' , generator=__snake_case , output_type='np' )
a : List[str] = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(__snake_case , __snake_case )
def lowercase_ ( self : Optional[int] ):
a : int = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png' )
a : Any = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_h_img2img_anime_turtle_fp16.npy' )
a : Union[str, Any] = StableUnCLIPImgaImgPipeline.from_pretrained(
'fusing/stable-unclip-2-1-h-img2img' , torch_dtype=torch.floataa )
pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
a : Optional[Any] = torch.Generator(device='cpu' ).manual_seed(0 )
a : str = pipe(__snake_case , 'anime turle' , generator=__snake_case , output_type='np' )
a : List[str] = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(__snake_case , __snake_case )
def lowercase_ ( self : Any ):
a : Optional[Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png' )
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
a : List[str] = StableUnCLIPImgaImgPipeline.from_pretrained(
'fusing/stable-unclip-2-1-h-img2img' , torch_dtype=torch.floataa )
a : Optional[Any] = pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
a : Optional[int] = pipe(
__snake_case , 'anime turtle' , num_inference_steps=2 , output_type='np' , )
a : int = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 297
| 0
|
"""simple docstring"""
import inspect
import unittest
from transformers import RegNetConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from transformers.utils import cached_property, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCamelCase_ ( unittest.TestCase):
"""simple docstring"""
def __init__( self : Optional[Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Optional[int]=3 , UpperCAmelCase__ : Tuple=3_2 , UpperCAmelCase__ : int=3 , UpperCAmelCase__ : Optional[Any]=1_0 , UpperCAmelCase__ : Optional[int]=[1_0, 2_0, 3_0, 4_0] , UpperCAmelCase__ : Dict=[1, 1, 2, 1] , UpperCAmelCase__ : Union[str, Any]=True , UpperCAmelCase__ : Dict=True , UpperCAmelCase__ : List[str]="relu" , UpperCAmelCase__ : Dict=3 , UpperCAmelCase__ : List[str]=None , ) -> Tuple:
__SCREAMING_SNAKE_CASE = parent
__SCREAMING_SNAKE_CASE = batch_size
__SCREAMING_SNAKE_CASE = image_size
__SCREAMING_SNAKE_CASE = num_channels
__SCREAMING_SNAKE_CASE = embeddings_size
__SCREAMING_SNAKE_CASE = hidden_sizes
__SCREAMING_SNAKE_CASE = depths
__SCREAMING_SNAKE_CASE = is_training
__SCREAMING_SNAKE_CASE = use_labels
__SCREAMING_SNAKE_CASE = hidden_act
__SCREAMING_SNAKE_CASE = num_labels
__SCREAMING_SNAKE_CASE = scope
__SCREAMING_SNAKE_CASE = len(UpperCAmelCase__ )
def UpperCAmelCase_ ( self : List[Any] ) -> List[str]:
__SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__SCREAMING_SNAKE_CASE = self.get_config()
return config, pixel_values
def UpperCAmelCase_ ( self : int ) -> List[str]:
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def UpperCAmelCase_ ( self : Tuple , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : List[Any] ) -> Tuple:
__SCREAMING_SNAKE_CASE = FlaxRegNetModel(config=UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ )
# Output shape (b, c, h, w)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , )
def UpperCAmelCase_ ( self : Optional[Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Any ) -> List[str]:
__SCREAMING_SNAKE_CASE = self.num_labels
__SCREAMING_SNAKE_CASE = FlaxRegNetForImageClassification(config=UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase_ ( self : int ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = config_and_inputs
__SCREAMING_SNAKE_CASE = {"pixel_values": pixel_values}
return config, inputs_dict
@require_flax
class UpperCamelCase_ ( UpperCamelCase , unittest.TestCase):
"""simple docstring"""
snake_case__ : int = (FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else ()
snake_case__ : List[Any] = False
snake_case__ : Union[str, Any] = False
snake_case__ : Optional[Any] = False
def UpperCAmelCase_ ( self : Any ) -> None:
__SCREAMING_SNAKE_CASE = FlaxRegNetModelTester(self )
__SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=UpperCAmelCase__ , has_text_modality=UpperCAmelCase__ )
def UpperCAmelCase_ ( self : Optional[int] ) -> int:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCAmelCase_ ( self : Optional[Any] ) -> Dict:
return
def UpperCAmelCase_ ( self : Optional[Any] ) -> List[Any]:
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase__ )
def UpperCAmelCase_ ( self : str ) -> List[str]:
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase__ )
@unittest.skip(reason="RegNet does not use inputs_embeds" )
def UpperCAmelCase_ ( self : Dict ) -> Union[str, Any]:
pass
@unittest.skip(reason="RegNet does not support input and output embeddings" )
def UpperCAmelCase_ ( self : Optional[int] ) -> Tuple:
pass
def UpperCAmelCase_ ( self : int ) -> List[str]:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__SCREAMING_SNAKE_CASE = model_class(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__SCREAMING_SNAKE_CASE = [*signature.parameters.keys()]
__SCREAMING_SNAKE_CASE = ["pixel_values"]
self.assertListEqual(arg_names[:1] , UpperCAmelCase__ )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Union[str, Any]:
def check_hidden_states_output(UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : int ):
__SCREAMING_SNAKE_CASE = model_class(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = model(**self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ ) )
__SCREAMING_SNAKE_CASE = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__SCREAMING_SNAKE_CASE = self.model_tester.num_stages
self.assertEqual(len(UpperCAmelCase__ ) , expected_num_stages + 1 )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__SCREAMING_SNAKE_CASE = True
check_hidden_states_output(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__SCREAMING_SNAKE_CASE = True
check_hidden_states_output(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
def UpperCAmelCase_ ( self : List[Any] ) -> int:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__SCREAMING_SNAKE_CASE = self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = model_class(UpperCAmelCase__ )
@jax.jit
def model_jitted(UpperCAmelCase__ : Union[str, Any] , **UpperCAmelCase__ : Tuple ):
return model(pixel_values=UpperCAmelCase__ , **UpperCAmelCase__ )
with self.subTest("JIT Enabled" ):
__SCREAMING_SNAKE_CASE = model_jitted(**UpperCAmelCase__ ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
__SCREAMING_SNAKE_CASE = model_jitted(**UpperCAmelCase__ ).to_tuple()
self.assertEqual(len(UpperCAmelCase__ ) , len(UpperCAmelCase__ ) )
for jitted_output, output in zip(UpperCAmelCase__ , UpperCAmelCase__ ):
self.assertEqual(jitted_output.shape , output.shape )
def UpperCAmelCase__ ():
'''simple docstring'''
__SCREAMING_SNAKE_CASE = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_flax
class UpperCamelCase_ ( unittest.TestCase):
"""simple docstring"""
@cached_property
def UpperCAmelCase_ ( self : Dict ) -> Tuple:
return AutoImageProcessor.from_pretrained("facebook/regnet-y-040" ) if is_vision_available() else None
@slow
def UpperCAmelCase_ ( self : List[Any] ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE = FlaxRegNetForImageClassification.from_pretrained("facebook/regnet-y-040" )
__SCREAMING_SNAKE_CASE = self.default_image_processor
__SCREAMING_SNAKE_CASE = prepare_img()
__SCREAMING_SNAKE_CASE = image_processor(images=UpperCAmelCase__ , return_tensors="np" )
__SCREAMING_SNAKE_CASE = model(**UpperCAmelCase__ )
# verify the logits
__SCREAMING_SNAKE_CASE = (1, 1_0_0_0)
self.assertEqual(outputs.logits.shape , UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = jnp.array([-0.4_180, -1.5_051, -3.4_836] )
self.assertTrue(jnp.allclose(outputs.logits[0, :3] , UpperCAmelCase__ , atol=1E-4 ) )
| 195
|
"""simple docstring"""
from typing import List
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a__ : Tuple = logging.get_logger(__name__)
a__ : List[Any] = {
'''snap-research/efficientformer-l1-300''': (
'''https://huggingface.co/snap-research/efficientformer-l1-300/resolve/main/config.json'''
),
}
class UpperCamelCase_ ( UpperCamelCase):
"""simple docstring"""
snake_case__ : Any = "efficientformer"
def __init__( self : Any , UpperCAmelCase__ : List[int] = [3, 2, 6, 4] , UpperCAmelCase__ : List[int] = [4_8, 9_6, 2_2_4, 4_4_8] , UpperCAmelCase__ : List[bool] = [True, True, True, True] , UpperCAmelCase__ : int = 4_4_8 , UpperCAmelCase__ : int = 3_2 , UpperCAmelCase__ : int = 4 , UpperCAmelCase__ : int = 7 , UpperCAmelCase__ : int = 5 , UpperCAmelCase__ : int = 8 , UpperCAmelCase__ : int = 4 , UpperCAmelCase__ : float = 0.0 , UpperCAmelCase__ : int = 1_6 , UpperCAmelCase__ : int = 3 , UpperCAmelCase__ : int = 3 , UpperCAmelCase__ : int = 3 , UpperCAmelCase__ : int = 2 , UpperCAmelCase__ : int = 1 , UpperCAmelCase__ : float = 0.0 , UpperCAmelCase__ : int = 1 , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : float = 1E-5 , UpperCAmelCase__ : str = "gelu" , UpperCAmelCase__ : float = 0.02 , UpperCAmelCase__ : float = 1E-12 , UpperCAmelCase__ : int = 2_2_4 , UpperCAmelCase__ : float = 1E-05 , **UpperCAmelCase__ : Tuple , ) -> None:
super().__init__(**UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = hidden_act
__SCREAMING_SNAKE_CASE = hidden_dropout_prob
__SCREAMING_SNAKE_CASE = hidden_sizes
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = initializer_range
__SCREAMING_SNAKE_CASE = layer_norm_eps
__SCREAMING_SNAKE_CASE = patch_size
__SCREAMING_SNAKE_CASE = num_channels
__SCREAMING_SNAKE_CASE = depths
__SCREAMING_SNAKE_CASE = mlp_expansion_ratio
__SCREAMING_SNAKE_CASE = downsamples
__SCREAMING_SNAKE_CASE = dim
__SCREAMING_SNAKE_CASE = key_dim
__SCREAMING_SNAKE_CASE = attention_ratio
__SCREAMING_SNAKE_CASE = resolution
__SCREAMING_SNAKE_CASE = pool_size
__SCREAMING_SNAKE_CASE = downsample_patch_size
__SCREAMING_SNAKE_CASE = downsample_stride
__SCREAMING_SNAKE_CASE = downsample_pad
__SCREAMING_SNAKE_CASE = drop_path_rate
__SCREAMING_SNAKE_CASE = num_metaad_blocks
__SCREAMING_SNAKE_CASE = distillation
__SCREAMING_SNAKE_CASE = use_layer_scale
__SCREAMING_SNAKE_CASE = layer_scale_init_value
__SCREAMING_SNAKE_CASE = image_size
__SCREAMING_SNAKE_CASE = batch_norm_eps
| 195
| 1
|
import unittest
from transformers import SPIECE_UNDERLINE, XLNetTokenizer, XLNetTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
SCREAMING_SNAKE_CASE :List[str] = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
@require_tokenizers
class __lowerCAmelCase ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = XLNetTokenizer
_SCREAMING_SNAKE_CASE = XLNetTokenizerFast
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = True
def lowerCAmelCase__ ( self : int ) -> int:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
snake_case_ = XLNetTokenizer(_UpperCAmelCase , keep_accents=_UpperCAmelCase )
tokenizer.sanitize_special_tokens()
tokenizer.save_pretrained(self.tmpdirname )
def lowerCAmelCase__ ( self : str ) -> List[str]:
"""simple docstring"""
snake_case_ = "<s>"
snake_case_ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_UpperCAmelCase ) , _UpperCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_UpperCAmelCase ) , _UpperCAmelCase )
def lowerCAmelCase__ ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
snake_case_ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<unk>" )
self.assertEqual(vocab_keys[1] , "<s>" )
self.assertEqual(vocab_keys[-1] , "<eod>" )
self.assertEqual(len(_UpperCAmelCase ) , 1_0_0_6 )
def lowerCAmelCase__ ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1_0_0_0 )
def lowerCAmelCase__ ( self : List[Any] ) -> List[str]:
"""simple docstring"""
snake_case_ = XLNetTokenizer(_UpperCAmelCase , keep_accents=_UpperCAmelCase )
snake_case_ = tokenizer.tokenize("This is a test" )
self.assertListEqual(_UpperCAmelCase , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) , [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2] )
snake_case_ = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
_UpperCAmelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
snake_case_ = tokenizer.convert_tokens_to_ids(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 0, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 0, 4] )
snake_case_ = tokenizer.convert_ids_to_tokens(_UpperCAmelCase )
self.assertListEqual(
_UpperCAmelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
def lowerCAmelCase__ ( self : List[str] ) -> Tuple:
"""simple docstring"""
snake_case_ = XLNetTokenizer(_UpperCAmelCase , do_lower_case=_UpperCAmelCase )
snake_case_ = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
_UpperCAmelCase , [
SPIECE_UNDERLINE + "",
"i",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"se",
".",
] , )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["▁he", "ll", "o"] )
def lowerCAmelCase__ ( self : str ) -> Any:
"""simple docstring"""
snake_case_ = XLNetTokenizer(_UpperCAmelCase , do_lower_case=_UpperCAmelCase )
snake_case_ = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
_UpperCAmelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"se",
".",
] , )
@slow
def lowerCAmelCase__ ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
snake_case_ = XLNetTokenizer.from_pretrained("xlnet-base-cased" )
snake_case_ = tokenizer.encode("sequence builders" , add_special_tokens=_UpperCAmelCase )
snake_case_ = tokenizer.encode("multi-sequence build" , add_special_tokens=_UpperCAmelCase )
snake_case_ = tokenizer.build_inputs_with_special_tokens(_UpperCAmelCase )
snake_case_ = tokenizer.build_inputs_with_special_tokens(_UpperCAmelCase , _UpperCAmelCase )
assert encoded_sentence == text + [4, 3]
assert encoded_pair == text + [4] + text_a + [4, 3]
@slow
def lowerCAmelCase__ ( self : List[str] ) -> Any:
"""simple docstring"""
# fmt: off
snake_case_ = {"input_ids": [[1_7, 2_1_4_4_2, 2_7_0, 1_7, 1_0, 1_4_6_4_5, 3_1_8, 3_4, 1_7, 4_5_4_6, 3_1_4_5, 7_8_7, 1_3, 7_7_5_2, 2_2_0_1_8, 2_3, 2_1, 1_7, 4_5_4_6, 3_1_4_5, 7_8_7, 1_3, 3_3_5_2, 1_4_4_3_1, 1_3, 5_5_0_0, 1_1, 1_1_7_6, 5_8_0, 1_3, 1_6_8_1_9, 4_7_9_7, 2_3, 1_7, 1_0, 1_7_1_3_5, 6_5_8, 1_9, 4_5_7, 7_9_3_2, 1_3, 1_8_4, 1_9, 3_1_5_4, 1_7_1_3_5, 6_4_6_8, 1_9, 1_4_0_4, 1_2_2_6_9, 1_9, 4_2_2_9, 5_3_5_6, 1_6_2_6_4, 4_6, 1_9, 1_7, 2_0_5_4_5, 1_0_3_9_5, 9, 9, 9, 1_1, 2_8, 6_4_2_1, 9_5_3_1, 2_0_7_2_9, 1_7, 1_0, 3_5_3, 1_7_0_2_2, 1_1, 2_1, 6_4_2_1, 9_5_3_1, 1_6_9_4_9, 1_7, 1_0, 1_1_5_0_9, 7_5_3, 1_1, 3_3, 9_5, 2_4_2_1, 7_3_8_5, 9_5_6, 1_4_4_3_1, 2_6_2_6, 2_5, 8_4_2, 7_3_8_5, 4_8_3_6, 2_1, 1_4_2_9, 2_2_7_2, 9_8_5_5, 3_1_2_0, 1_6_1, 2_4_7_3_8, 1_9, 1_3_2_0_3, 6_5_8, 2_1_8, 7_8_7, 2_1, 4_3_0, 1_8_4_8_2, 8_4_7, 2_6_3_7, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 3_2_2, 2_2_1_7_8, 2_7, 1_0_6_4, 2_2, 9_5_6, 1_3, 1_1_1_0_1, 1_4_2_9, 5_8_5_4, 2_4_3_1_3, 1_8_9_5_3, 4_0, 4_2_2, 2_4_3_6_6, 6_8, 1_7_5_8, 3_7, 1_0_4_8_3, 1_4_2_5_7, 3_1, 2_0_7, 2_6_3, 2_1, 2_0_3, 3_7_7_3, 2_5, 7_1, 9_7_3_5, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 3_2, 2_0_4_9, 3_4_4_2, 1_7, 1_3_8_9_4, 3_3_8_0, 2_3, 9_5, 1_8, 1_7_6_3_4, 2_2_8_8, 9, 4, 3]], "token_type_ids": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_UpperCAmelCase , model_name="xlnet-base-cased" , revision="c841166438c31ec7ca9a106dee7bb312b73ae511" , )
| 159
|
'''simple docstring'''
def __lowerCAmelCase ( snake_case__ ):
return [
txt[:a] + txt[a].upper() + txt[a + 1 :]
for a in range(len(snake_case__ ) )
if txt[a].isalpha()
]
if __name__ == "__main__":
__import__('''doctest''').testmod()
| 298
| 0
|
def __lowerCamelCase ( lowerCAmelCase__ ):
lowerCAmelCase__ = [int(lowerCAmelCase__ ) for i in ip_va_address.split('.' ) if i.isdigit()]
return len(lowerCAmelCase__ ) == 4 and all(0 <= int(lowerCAmelCase__ ) <= 2_5_4 for octet in octets )
if __name__ == "__main__":
lowerCAmelCase__ = input().strip()
lowerCAmelCase__ = 'valid' if is_ip_va_address_valid(ip) else 'invalid'
print(F'{ip} is a {valid_or_invalid} IP v4 address.')
| 119
|
def __lowerCamelCase ( lowerCAmelCase__ ):
lowerCAmelCase__ = len(lowerCAmelCase__ )
for i in range(lowerCAmelCase__ ):
for j in range(i + 1 , lowerCAmelCase__ ):
if numbers[j] < numbers[i]:
lowerCAmelCase__ , lowerCAmelCase__ = numbers[j], numbers[i]
return numbers
if __name__ == "__main__":
lowerCAmelCase__ = input('Enter numbers separated by a comma:\n').strip()
lowerCAmelCase__ = [int(item) for item in user_input.split(',')]
print(exchange_sort(unsorted))
| 119
| 1
|
'''simple docstring'''
import argparse
import json
from typing import List
from ltp import LTP
from transformers.models.bert.tokenization_bert import BertTokenizer
def _lowerCamelCase ( lowercase : List[str] ) -> Optional[Any]:
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0X4E_00 and cp <= 0X9F_FF)
or (cp >= 0X34_00 and cp <= 0X4D_BF) #
or (cp >= 0X2_00_00 and cp <= 0X2_A6_DF) #
or (cp >= 0X2_A7_00 and cp <= 0X2_B7_3F) #
or (cp >= 0X2_B7_40 and cp <= 0X2_B8_1F) #
or (cp >= 0X2_B8_20 and cp <= 0X2_CE_AF) #
or (cp >= 0XF9_00 and cp <= 0XFA_FF)
or (cp >= 0X2_F8_00 and cp <= 0X2_FA_1F) #
): #
return True
return False
def _lowerCamelCase ( lowercase : str ) -> Tuple:
# word like '180' or '身高' or '神'
for char in word:
_a = ord(lowercase )
if not _is_chinese_char(lowercase ):
return 0
return 1
def _lowerCamelCase ( lowercase : List[str] ) -> List[Any]:
_a = set()
for token in tokens:
_a = len(lowercase ) > 1 and is_chinese(lowercase )
if chinese_word:
word_set.add(lowercase )
_a = list(lowercase )
return word_list
def _lowerCamelCase ( lowercase : List[str] , lowercase : set() ) -> Dict:
if not chinese_word_set:
return bert_tokens
_a = max([len(lowercase ) for w in chinese_word_set] )
_a = bert_tokens
_a , _a = 0, len(lowercase )
while start < end:
_a = True
if is_chinese(bert_word[start] ):
_a = min(end - start , lowercase )
for i in range(lowercase , 1 , -1 ):
_a = "".join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
_a = "##" + bert_word[j]
_a = start + i
_a = False
break
if single_word:
start += 1
return bert_word
def _lowerCamelCase ( lowercase : List[str] , lowercase : LTP , lowercase : BertTokenizer ) -> int:
_a = []
for i in range(0 , len(lowercase ) , 100 ):
_a = ltp_tokenizer.pipeline(lines[i : i + 100] , tasks=["cws"] ).cws
_a = [get_chinese_word(lowercase ) for r in res]
ltp_res.extend(lowercase )
assert len(lowercase ) == len(lowercase )
_a = []
for i in range(0 , len(lowercase ) , 100 ):
_a = bert_tokenizer(lines[i : i + 100] , add_special_tokens=lowercase , truncation=lowercase , max_length=512 )
bert_res.extend(res["input_ids"] )
assert len(lowercase ) == len(lowercase )
_a = []
for input_ids, chinese_word in zip(lowercase , lowercase ):
_a = []
for id in input_ids:
_a = bert_tokenizer._convert_id_to_token(lowercase )
input_tokens.append(lowercase )
_a = add_sub_symbol(lowercase , lowercase )
_a = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(lowercase ):
if token[:2] == "##":
_a = token[2:]
# save chinese tokens' pos
if len(lowercase ) == 1 and _is_chinese_char(ord(lowercase ) ):
ref_id.append(lowercase )
ref_ids.append(lowercase )
assert len(lowercase ) == len(lowercase )
return ref_ids
def _lowerCamelCase ( lowercase : str ) -> Tuple:
# For Chinese (Ro)Bert, the best result is from : RoBERTa-wwm-ext (https://github.com/ymcui/Chinese-BERT-wwm)
# If we want to fine-tune these model, we have to use same tokenizer : LTP (https://github.com/HIT-SCIR/ltp)
with open(args.file_name , "r" , encoding="utf-8" ) as f:
_a = f.readlines()
_a = [line.strip() for line in data if len(lowercase ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
_a = LTP(args.ltp ) # faster in GPU device
_a = BertTokenizer.from_pretrained(args.bert )
_a = prepare_ref(lowercase , lowercase , lowercase )
with open(args.save_path , "w" , encoding="utf-8" ) as f:
_a = [json.dumps(lowercase ) + "\n" for ref in ref_ids]
f.writelines(lowercase )
if __name__ == "__main__":
lowerCAmelCase_ : Tuple = argparse.ArgumentParser(description='prepare_chinese_ref')
parser.add_argument(
'--file_name',
required=False,
type=str,
default='./resources/chinese-demo.txt',
help='file need process, same as training data in lm',
)
parser.add_argument(
'--ltp',
required=False,
type=str,
default='./resources/ltp',
help='resources for LTP tokenizer, usually a path',
)
parser.add_argument(
'--bert',
required=False,
type=str,
default='./resources/robert',
help='resources for Bert tokenizer',
)
parser.add_argument(
'--save_path',
required=False,
type=str,
default='./resources/ref.txt',
help='path to save res',
)
lowerCAmelCase_ : Tuple = parser.parse_args()
main(args)
| 63
|
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from transformers import CLIPImageProcessor, CLIPVisionModel
from ...models import PriorTransformer
from ...pipelines import DiffusionPipeline
from ...schedulers import HeunDiscreteScheduler
from ...utils import (
BaseOutput,
is_accelerate_available,
logging,
randn_tensor,
replace_example_docstring,
)
from .renderer import ShapERenderer
_snake_case = logging.get_logger(__name__) # pylint: disable=invalid-name
_snake_case = "\n Examples:\n ```py\n >>> from PIL import Image\n >>> import torch\n >>> from diffusers import DiffusionPipeline\n >>> from diffusers.utils import export_to_gif, load_image\n\n >>> device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n >>> repo = \"openai/shap-e-img2img\"\n >>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16)\n >>> pipe = pipe.to(device)\n\n >>> guidance_scale = 3.0\n >>> image_url = \"https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png\"\n >>> image = load_image(image_url).convert(\"RGB\")\n\n >>> images = pipe(\n ... image,\n ... guidance_scale=guidance_scale,\n ... num_inference_steps=64,\n ... frame_size=256,\n ... ).images\n\n >>> gif_path = export_to_gif(images[0], \"corgi_3d.gif\")\n ```\n"
@dataclass
class lowercase ( UpperCamelCase__ ):
_a = 42
class lowercase ( UpperCamelCase__ ):
def __init__( self , _a , _a , _a , _a , _a , ) -> List[Any]:
super().__init__()
self.register_modules(
prior=_a , image_encoder=_a , image_processor=_a , scheduler=_a , renderer=_a , )
def a__ ( self , _a , _a , _a , _a , _a , _a ) -> str:
if latents is None:
_A : str = randn_tensor(_a , generator=_a , device=_a , dtype=_a )
else:
if latents.shape != shape:
raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {shape}''' )
_A : Union[str, Any] = latents.to(_a )
_A : int = latents * scheduler.init_noise_sigma
return latents
def a__ ( self , _a=0 ) -> Optional[Any]:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("""Please install accelerate via `pip install accelerate`""" )
_A : str = torch.device(F'''cuda:{gpu_id}''' )
_A : Any = [self.image_encoder, self.prior]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(_a , _a )
@property
def a__ ( self ) -> List[Any]:
if self.device != torch.device("""meta""" ) or not hasattr(self.image_encoder , """_hf_hook""" ):
return self.device
for module in self.image_encoder.modules():
if (
hasattr(_a , """_hf_hook""" )
and hasattr(module._hf_hook , """execution_device""" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
def a__ ( self , _a , _a , _a , _a , ) -> Tuple:
if isinstance(_a , _a ) and isinstance(image[0] , torch.Tensor ):
_A : int = torch.cat(_a , axis=0 ) if image[0].ndim == 4 else torch.stack(_a , axis=0 )
if not isinstance(_a , torch.Tensor ):
_A : Dict = self.image_processor(_a , return_tensors="""pt""" ).pixel_values[0].unsqueeze(0 )
_A : int = image.to(dtype=self.image_encoder.dtype , device=_a )
_A : List[Any] = self.image_encoder(_a )["""last_hidden_state"""]
_A : List[Any] = image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256
_A : Dict = image_embeds.repeat_interleave(_a , dim=0 )
if do_classifier_free_guidance:
_A : str = torch.zeros_like(_a )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
_A : List[str] = torch.cat([negative_image_embeds, image_embeds] )
return image_embeds
@torch.no_grad()
@replace_example_docstring(_a )
def __call__( self , _a , _a = 1 , _a = 25 , _a = None , _a = None , _a = 4.0 , _a = 64 , _a = "pil" , _a = True , ) -> Union[str, Any]:
if isinstance(_a , PIL.Image.Image ):
_A : List[Any] = 1
elif isinstance(_a , torch.Tensor ):
_A : Any = image.shape[0]
elif isinstance(_a , _a ) and isinstance(image[0] , (torch.Tensor, PIL.Image.Image) ):
_A : Union[str, Any] = len(_a )
else:
raise ValueError(
F'''`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(_a )}''' )
_A : Optional[int] = self._execution_device
_A : Tuple = batch_size * num_images_per_prompt
_A : List[Any] = guidance_scale > 1.0
_A : Optional[Any] = self._encode_image(_a , _a , _a , _a )
# prior
self.scheduler.set_timesteps(_a , device=_a )
_A : Optional[int] = self.scheduler.timesteps
_A : List[str] = self.prior.config.num_embeddings
_A : int = self.prior.config.embedding_dim
_A : Optional[Any] = self.prepare_latents(
(batch_size, num_embeddings * embedding_dim) , image_embeds.dtype , _a , _a , _a , self.scheduler , )
# YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim
_A : List[Any] = latents.reshape(latents.shape[0] , _a , _a )
for i, t in enumerate(self.progress_bar(_a ) ):
# expand the latents if we are doing classifier free guidance
_A : List[str] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_A : int = self.scheduler.scale_model_input(_a , _a )
_A : Tuple = self.prior(
_a , timestep=_a , proj_embedding=_a , ).predicted_image_embedding
# remove the variance
_A , _A : Optional[Any] = noise_pred.split(
scaled_model_input.shape[2] , dim=2 ) # batch_size, num_embeddings, embedding_dim
if do_classifier_free_guidance is not None:
_A , _A : Dict = noise_pred.chunk(2 )
_A : Tuple = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond)
_A : int = self.scheduler.step(
_a , timestep=_a , sample=_a , ).prev_sample
if output_type == "latent":
return ShapEPipelineOutput(images=_a )
_A : List[str] = []
for i, latent in enumerate(_a ):
print()
_A : List[str] = self.renderer.decode(
latent[None, :] , _a , size=_a , ray_batch_size=4096 , n_coarse_samples=64 , n_fine_samples=128 , )
images.append(_a )
_A : List[Any] = torch.stack(_a )
if output_type not in ["np", "pil"]:
raise ValueError(F'''Only the output types `pil` and `np` are supported not output_type={output_type}''' )
_A : List[str] = images.cpu().numpy()
if output_type == "pil":
_A : List[Any] = [self.numpy_to_pil(_a ) for image in images]
# Offload last model to CPU
if hasattr(self , """final_offload_hook""" ) and self.final_offload_hook is not None:
self.final_offload_hook.offload()
if not return_dict:
return (images,)
return ShapEPipelineOutput(images=_a )
| 26
| 0
|
'''simple docstring'''
import collections
import inspect
import unittest
from typing import Dict, List, Tuple
from transformers import MaskFormerSwinConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device
from transformers.utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MaskFormerSwinBackbone
from transformers.models.maskformer import MaskFormerSwinModel
class _A :
def __init__( self : Union[str, Any] , __magic_name__ : List[Any] , __magic_name__ : Tuple=13 , __magic_name__ : Optional[int]=32 , __magic_name__ : Union[str, Any]=2 , __magic_name__ : Union[str, Any]=3 , __magic_name__ : Any=16 , __magic_name__ : int=[1, 2, 1] , __magic_name__ : Dict=[2, 2, 4] , __magic_name__ : Optional[int]=2 , __magic_name__ : str=2.0 , __magic_name__ : str=True , __magic_name__ : Dict=0.0 , __magic_name__ : Optional[Any]=0.0 , __magic_name__ : List[str]=0.1 , __magic_name__ : int="gelu" , __magic_name__ : Optional[int]=False , __magic_name__ : Union[str, Any]=True , __magic_name__ : Tuple=0.02 , __magic_name__ : List[str]=1E-5 , __magic_name__ : int=True , __magic_name__ : Dict=None , __magic_name__ : Optional[Any]=True , __magic_name__ : int=10 , __magic_name__ : int=8 , __magic_name__ : str=["stage1", "stage2", "stage3"] , __magic_name__ : List[Any]=[1, 2, 3] , ) -> Dict:
"""simple docstring"""
__snake_case : List[str] = parent
__snake_case : Optional[int] = batch_size
__snake_case : List[str] = image_size
__snake_case : Any = patch_size
__snake_case : str = num_channels
__snake_case : List[str] = embed_dim
__snake_case : Dict = depths
__snake_case : str = num_heads
__snake_case : str = window_size
__snake_case : List[Any] = mlp_ratio
__snake_case : Optional[int] = qkv_bias
__snake_case : Tuple = hidden_dropout_prob
__snake_case : str = attention_probs_dropout_prob
__snake_case : List[str] = drop_path_rate
__snake_case : Optional[Any] = hidden_act
__snake_case : List[str] = use_absolute_embeddings
__snake_case : List[str] = patch_norm
__snake_case : int = layer_norm_eps
__snake_case : int = initializer_range
__snake_case : int = is_training
__snake_case : Optional[Any] = scope
__snake_case : int = use_labels
__snake_case : Tuple = type_sequence_label_size
__snake_case : Optional[int] = encoder_stride
__snake_case : Union[str, Any] = out_features
__snake_case : int = out_indices
def lowercase__ ( self : Any ) -> List[str]:
"""simple docstring"""
__snake_case : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__snake_case : Any = None
if self.use_labels:
__snake_case : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__snake_case : int = self.get_config()
return config, pixel_values, labels
def lowercase__ ( self : Any ) -> List[Any]:
"""simple docstring"""
return MaskFormerSwinConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def lowercase__ ( self : Tuple , __magic_name__ : Any , __magic_name__ : Dict , __magic_name__ : Tuple ) -> Optional[Any]:
"""simple docstring"""
__snake_case : Tuple = MaskFormerSwinModel(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
__snake_case : int = model(__magic_name__ )
__snake_case : Optional[int] = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
__snake_case : List[Any] = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def lowercase__ ( self : Optional[int] , __magic_name__ : int , __magic_name__ : Tuple , __magic_name__ : Optional[int] ) -> List[Any]:
"""simple docstring"""
__snake_case : int = MaskFormerSwinBackbone(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
__snake_case : Any = model(__magic_name__ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [13, 16, 16, 16] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , [16, 32, 64] )
# verify ValueError
with self.parent.assertRaises(__magic_name__ ):
__snake_case : str = ["""stem"""]
__snake_case : int = MaskFormerSwinBackbone(config=__magic_name__ )
def lowercase__ ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
__snake_case : Optional[int] = self.prepare_config_and_inputs()
__snake_case , __snake_case , __snake_case : List[str] = config_and_inputs
__snake_case : Optional[int] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _A ( __lowercase , __lowercase , unittest.TestCase ):
lowercase__: str = (
(
MaskFormerSwinModel,
MaskFormerSwinBackbone,
)
if is_torch_available()
else ()
)
lowercase__: Union[str, Any] = {'''feature-extraction''': MaskFormerSwinModel} if is_torch_available() else {}
lowercase__: Any = False
lowercase__: Any = False
lowercase__: Dict = False
lowercase__: int = False
lowercase__: Any = False
def lowercase__ ( self : Union[str, Any] ) -> str:
"""simple docstring"""
__snake_case : List[Any] = MaskFormerSwinModelTester(self )
__snake_case : Dict = ConfigTester(self , config_class=__magic_name__ , embed_dim=37 )
@require_torch_multi_gpu
@unittest.skip(
reason=(
"""`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn't work well with"""
""" `nn.DataParallel`"""
) )
def lowercase__ ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
pass
def lowercase__ ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowercase__ ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
return
def lowercase__ ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
__snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__magic_name__ )
def lowercase__ ( self : Optional[Any] ) -> int:
"""simple docstring"""
__snake_case : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*__magic_name__ )
@unittest.skip("""Swin does not use inputs_embeds""" )
def lowercase__ ( self : Tuple ) -> List[str]:
"""simple docstring"""
pass
@unittest.skip("""Swin does not support feedforward chunking""" )
def lowercase__ ( self : Dict ) -> Dict:
"""simple docstring"""
pass
def lowercase__ ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
__snake_case , __snake_case : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case : Dict = model_class(__magic_name__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__snake_case : Union[str, Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__magic_name__ , nn.Linear ) )
def lowercase__ ( self : Dict ) -> List[Any]:
"""simple docstring"""
__snake_case , __snake_case : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case : Optional[Any] = model_class(__magic_name__ )
__snake_case : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__snake_case : List[Any] = [*signature.parameters.keys()]
__snake_case : Optional[Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __magic_name__ )
@unittest.skip(reason="""MaskFormerSwin is only used as backbone and doesn't support output_attentions""" )
def lowercase__ ( self : List[str] ) -> Tuple:
"""simple docstring"""
pass
@unittest.skip(reason="""MaskFormerSwin is only used as an internal backbone""" )
def lowercase__ ( self : Optional[Any] ) -> Any:
"""simple docstring"""
pass
def lowercase__ ( self : List[str] , __magic_name__ : List[Any] , __magic_name__ : str , __magic_name__ : List[Any] , __magic_name__ : Any ) -> Optional[Any]:
"""simple docstring"""
__snake_case : int = model_class(__magic_name__ )
model.to(__magic_name__ )
model.eval()
with torch.no_grad():
__snake_case : Any = model(**self._prepare_for_class(__magic_name__ , __magic_name__ ) )
__snake_case : Optional[Any] = outputs.hidden_states
__snake_case : Tuple = getattr(
self.model_tester , """expected_num_hidden_layers""" , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(__magic_name__ ) , __magic_name__ )
# Swin has a different seq_length
__snake_case : Optional[Any] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__snake_case : Any = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def lowercase__ ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
__snake_case , __snake_case : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case : Tuple = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
__snake_case : Any = True
self.check_hidden_states_output(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__snake_case : str = True
self.check_hidden_states_output(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
def lowercase__ ( self : str ) -> List[str]:
"""simple docstring"""
__snake_case , __snake_case : Any = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case : Tuple = 3
__snake_case : Tuple = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
__snake_case : str = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__snake_case : Optional[Any] = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
__snake_case : Optional[Any] = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
__snake_case : str = True
self.check_hidden_states_output(__magic_name__ , __magic_name__ , __magic_name__ , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__snake_case : Tuple = True
self.check_hidden_states_output(__magic_name__ , __magic_name__ , __magic_name__ , (padded_height, padded_width) )
@unittest.skip(reason="""MaskFormerSwin doesn't have pretrained checkpoints""" )
def lowercase__ ( self : List[Any] ) -> int:
"""simple docstring"""
pass
@unittest.skip(reason="""This will be fixed once MaskFormerSwin is replaced by native Swin""" )
def lowercase__ ( self : Dict ) -> List[str]:
"""simple docstring"""
pass
@unittest.skip(reason="""This will be fixed once MaskFormerSwin is replaced by native Swin""" )
def lowercase__ ( self : int ) -> Any:
"""simple docstring"""
pass
def lowercase__ ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
__snake_case , __snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
def set_nan_tensor_to_zero(__magic_name__ : Optional[Any] ):
__snake_case : Any = 0
return t
def check_equivalence(__magic_name__ : Optional[Any] , __magic_name__ : Optional[Any] , __magic_name__ : Optional[Any] , __magic_name__ : int={} ):
with torch.no_grad():
__snake_case : Union[str, Any] = model(**__magic_name__ , return_dict=__magic_name__ , **__magic_name__ )
__snake_case : Tuple = model(**__magic_name__ , return_dict=__magic_name__ , **__magic_name__ ).to_tuple()
def recursive_check(__magic_name__ : Dict , __magic_name__ : Tuple ):
if isinstance(__magic_name__ , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(__magic_name__ , __magic_name__ ):
recursive_check(__magic_name__ , __magic_name__ )
elif isinstance(__magic_name__ , __magic_name__ ):
for tuple_iterable_value, dict_iterable_value in zip(
tuple_object.values() , dict_object.values() ):
recursive_check(__magic_name__ , __magic_name__ )
elif tuple_object is None:
return
else:
self.assertTrue(
torch.allclose(
set_nan_tensor_to_zero(__magic_name__ ) , set_nan_tensor_to_zero(__magic_name__ ) , atol=1E-5 ) , msg=(
"""Tuple and dict output are not equal. Difference:"""
f''' {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:'''
f''' {torch.isnan(__magic_name__ ).any()} and `inf`: {torch.isinf(__magic_name__ )}. Dict has'''
f''' `nan`: {torch.isnan(__magic_name__ ).any()} and `inf`: {torch.isinf(__magic_name__ )}.'''
) , )
recursive_check(__magic_name__ , __magic_name__ )
for model_class in self.all_model_classes:
__snake_case : Union[str, Any] = model_class(__magic_name__ )
model.to(__magic_name__ )
model.eval()
__snake_case : Union[str, Any] = self._prepare_for_class(__magic_name__ , __magic_name__ )
__snake_case : Tuple = self._prepare_for_class(__magic_name__ , __magic_name__ )
check_equivalence(__magic_name__ , __magic_name__ , __magic_name__ )
__snake_case : int = self._prepare_for_class(__magic_name__ , __magic_name__ , return_labels=__magic_name__ )
__snake_case : Tuple = self._prepare_for_class(__magic_name__ , __magic_name__ , return_labels=__magic_name__ )
check_equivalence(__magic_name__ , __magic_name__ , __magic_name__ )
__snake_case : Tuple = self._prepare_for_class(__magic_name__ , __magic_name__ )
__snake_case : Optional[Any] = self._prepare_for_class(__magic_name__ , __magic_name__ )
check_equivalence(__magic_name__ , __magic_name__ , __magic_name__ , {"""output_hidden_states""": True} )
__snake_case : Union[str, Any] = self._prepare_for_class(__magic_name__ , __magic_name__ , return_labels=__magic_name__ )
__snake_case : Optional[Any] = self._prepare_for_class(__magic_name__ , __magic_name__ , return_labels=__magic_name__ )
check_equivalence(__magic_name__ , __magic_name__ , __magic_name__ , {"""output_hidden_states""": True} )
@require_torch
class _A ( unittest.TestCase , __lowercase ):
lowercase__: Optional[int] = (MaskFormerSwinBackbone,) if is_torch_available() else ()
lowercase__: Optional[Any] = MaskFormerSwinConfig
def lowercase__ ( self : List[str] ) -> List[Any]:
"""simple docstring"""
__snake_case : List[Any] = MaskFormerSwinModelTester(self )
def lowercase__ ( self : Dict ) -> Any:
"""simple docstring"""
__snake_case , __snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case : int = inputs_dict["""pixel_values"""].shape[0]
for backbone_class in self.all_model_classes:
__snake_case : Union[str, Any] = backbone_class(__magic_name__ )
backbone.to(__magic_name__ )
backbone.eval()
__snake_case : int = backbone(**__magic_name__ )
# Test default outputs and verify feature maps
self.assertIsInstance(outputs.feature_maps , __magic_name__ )
self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) )
for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels ):
self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels) )
self.assertIsNone(outputs.hidden_states )
self.assertIsNone(outputs.attentions )
# Test output_hidden_states=True
__snake_case : List[Any] = backbone(**__magic_name__ , output_hidden_states=__magic_name__ )
self.assertIsNotNone(outputs.hidden_states )
self.assertTrue(len(outputs.hidden_states ) , len(backbone.stage_names ) )
# We skip the stem layer
for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels ):
for hidden_state in hidden_states:
# Hidden states are in the format (batch_size, (height * width), n_channels)
__snake_case , __snake_case , __snake_case : str = hidden_state.shape
self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels) )
# Test output_attentions=True
if self.has_attentions:
__snake_case : Optional[int] = backbone(**__magic_name__ , output_attentions=__magic_name__ )
self.assertIsNotNone(outputs.attentions )
| 13
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
if is_sentencepiece_available():
from ..ta.tokenization_ta import TaTokenizer
else:
from ...utils.dummy_sentencepiece_objects import TaTokenizer
__UpperCamelCase = TaTokenizer
if is_tokenizers_available():
from ..ta.tokenization_ta_fast import TaTokenizerFast
else:
from ...utils.dummy_tokenizers_objects import TaTokenizerFast
__UpperCamelCase = TaTokenizerFast
__UpperCamelCase = {"configuration_mt5": ["MT5Config", "MT5OnnxConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = [
"MT5EncoderModel",
"MT5ForConditionalGeneration",
"MT5ForQuestionAnswering",
"MT5Model",
"MT5PreTrainedModel",
"MT5Stack",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = ["TFMT5EncoderModel", "TFMT5ForConditionalGeneration", "TFMT5Model"]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = ["FlaxMT5EncoderModel", "FlaxMT5ForConditionalGeneration", "FlaxMT5Model"]
if TYPE_CHECKING:
from .configuration_mta import MTaConfig, MTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mta import (
MTaEncoderModel,
MTaForConditionalGeneration,
MTaForQuestionAnswering,
MTaModel,
MTaPreTrainedModel,
MTaStack,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mta import TFMTaEncoderModel, TFMTaForConditionalGeneration, TFMTaModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mta import FlaxMTaEncoderModel, FlaxMTaForConditionalGeneration, FlaxMTaModel
else:
import sys
__UpperCamelCase = _LazyModule(
__name__,
globals()["__file__"],
_import_structure,
extra_objects={"MT5Tokenizer": MTaTokenizer, "MT5TokenizerFast": MTaTokenizerFast},
module_spec=__spec__,
)
| 13
| 1
|
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import datasets
import datasets.config
from .utils import require_beam
class lowerCamelCase_ ( datasets.BeamBasedBuilder ):
'''simple docstring'''
def UpperCamelCase__ ( self) -> Union[str, Any]:
return datasets.DatasetInfo(
features=datasets.Features({'''content''': datasets.Value('''string''')}) , supervised_keys=__lowercase , )
def UpperCamelCase__ ( self , __lowercase , __lowercase) -> Optional[int]:
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''examples''': get_test_dummy_examples()})]
def UpperCamelCase__ ( self , __lowercase , __lowercase) -> List[str]:
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(__lowercase)
class lowerCamelCase_ ( datasets.BeamBasedBuilder ):
'''simple docstring'''
def UpperCamelCase__ ( self) -> Dict:
return datasets.DatasetInfo(
features=datasets.Features({'''a''': datasets.Sequence({'''b''': datasets.Value('''string''')})}) , supervised_keys=__lowercase , )
def UpperCamelCase__ ( self , __lowercase , __lowercase) -> Dict:
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''examples''': get_test_nested_examples()})
]
def UpperCamelCase__ ( self , __lowercase , __lowercase) -> Optional[int]:
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(__lowercase)
def lowerCamelCase ( ):
'''simple docstring'''
return [(i, {"content": content}) for i, content in enumerate(['''foo''', '''bar''', '''foobar'''] )]
def lowerCamelCase ( ):
'''simple docstring'''
return [(i, {"a": {"b": [content]}}) for i, content in enumerate(['''foo''', '''bar''', '''foobar'''] )]
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
@require_beam
def UpperCamelCase__ ( self) -> List[Any]:
__UpperCamelCase :Any = len(get_test_dummy_examples())
with tempfile.TemporaryDirectory() as tmp_cache_dir:
__UpperCamelCase :Optional[int] = DummyBeamDataset(cache_dir=__lowercase , beam_runner='''DirectRunner''')
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(__lowercase , builder.name , '''default''' , '''0.0.0''' , f"""{builder.name}-train.arrow""")))
self.assertDictEqual(builder.info.features , datasets.Features({'''content''': datasets.Value('''string''')}))
__UpperCamelCase :str = builder.as_dataset()
self.assertEqual(dset['''train'''].num_rows , __lowercase)
self.assertEqual(dset['''train'''].info.splits['''train'''].num_examples , __lowercase)
self.assertDictEqual(dset['''train'''][0] , get_test_dummy_examples()[0][1])
self.assertDictEqual(
dset['''train'''][expected_num_examples - 1] , get_test_dummy_examples()[expected_num_examples - 1][1])
self.assertTrue(
os.path.exists(os.path.join(__lowercase , builder.name , '''default''' , '''0.0.0''' , '''dataset_info.json''')))
del dset
@require_beam
def UpperCamelCase__ ( self) -> Any:
import apache_beam as beam
__UpperCamelCase :int = beam.io.parquetio.WriteToParquet
__UpperCamelCase :Optional[int] = len(get_test_dummy_examples())
with tempfile.TemporaryDirectory() as tmp_cache_dir:
__UpperCamelCase :Optional[int] = DummyBeamDataset(cache_dir=__lowercase , beam_runner='''DirectRunner''')
with patch('''apache_beam.io.parquetio.WriteToParquet''') as write_parquet_mock:
__UpperCamelCase :List[Any] = partial(__lowercase , num_shards=2)
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(
__lowercase , builder.name , '''default''' , '''0.0.0''' , f"""{builder.name}-train-00000-of-00002.arrow""")))
self.assertTrue(
os.path.exists(
os.path.join(
__lowercase , builder.name , '''default''' , '''0.0.0''' , f"""{builder.name}-train-00000-of-00002.arrow""")))
self.assertDictEqual(builder.info.features , datasets.Features({'''content''': datasets.Value('''string''')}))
__UpperCamelCase :Dict = builder.as_dataset()
self.assertEqual(dset['''train'''].num_rows , __lowercase)
self.assertEqual(dset['''train'''].info.splits['''train'''].num_examples , __lowercase)
# Order is not preserved when sharding, so we just check that all the elements are there
self.assertListEqual(sorted(dset['''train''']['''content''']) , sorted(['''foo''', '''bar''', '''foobar''']))
self.assertTrue(
os.path.exists(os.path.join(__lowercase , builder.name , '''default''' , '''0.0.0''' , '''dataset_info.json''')))
del dset
@require_beam
def UpperCamelCase__ ( self) -> List[Any]:
with tempfile.TemporaryDirectory() as tmp_cache_dir:
__UpperCamelCase :Optional[int] = DummyBeamDataset(cache_dir=__lowercase)
self.assertRaises(datasets.builder.MissingBeamOptions , builder.download_and_prepare)
@require_beam
def UpperCamelCase__ ( self) -> Optional[int]:
__UpperCamelCase :Dict = len(get_test_nested_examples())
with tempfile.TemporaryDirectory() as tmp_cache_dir:
__UpperCamelCase :Tuple = NestedBeamDataset(cache_dir=__lowercase , beam_runner='''DirectRunner''')
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(__lowercase , builder.name , '''default''' , '''0.0.0''' , f"""{builder.name}-train.arrow""")))
self.assertDictEqual(
builder.info.features , datasets.Features({'''a''': datasets.Sequence({'''b''': datasets.Value('''string''')})}))
__UpperCamelCase :Union[str, Any] = builder.as_dataset()
self.assertEqual(dset['''train'''].num_rows , __lowercase)
self.assertEqual(dset['''train'''].info.splits['''train'''].num_examples , __lowercase)
self.assertDictEqual(dset['''train'''][0] , get_test_nested_examples()[0][1])
self.assertDictEqual(
dset['''train'''][expected_num_examples - 1] , get_test_nested_examples()[expected_num_examples - 1][1])
self.assertTrue(
os.path.exists(os.path.join(__lowercase , builder.name , '''default''' , '''0.0.0''' , '''dataset_info.json''')))
del dset
| 43
|
'''simple docstring'''
import numpy as np
def a__ ( a__ , a__ , a__ = 1E-1_2 , a__ = 1_00 , ):
"""simple docstring"""
assert np.shape(a__ )[0] == np.shape(a__ )[1]
# Ensure proper dimensionality.
assert np.shape(a__ )[0] == np.shape(a__ )[0]
# Ensure inputs are either both complex or both real
assert np.iscomplexobj(a__ ) == np.iscomplexobj(a__ )
__SCREAMING_SNAKE_CASE = np.iscomplexobj(a__ )
if is_complex:
# Ensure complex input_matrix is Hermitian
assert np.array_equal(a__ , input_matrix.conj().T )
# Set convergence to False. Will define convergence when we exceed max_iterations
# or when we have small changes from one iteration to next.
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = 1E1_2
while not convergence:
# Multiple matrix by the vector.
__SCREAMING_SNAKE_CASE = np.dot(a__ , a__ )
# Normalize the resulting output vector.
__SCREAMING_SNAKE_CASE = w / np.linalg.norm(a__ )
# Find rayleigh quotient
# (faster than usual b/c we know vector is normalized already)
__SCREAMING_SNAKE_CASE = vector.conj().T if is_complex else vector.T
__SCREAMING_SNAKE_CASE = np.dot(a__ , np.dot(a__ , a__ ) )
# Check convergence.
__SCREAMING_SNAKE_CASE = np.abs(lambda_ - lambda_previous ) / lambda_
iterations += 1
if error <= error_tol or iterations >= max_iterations:
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = lambda_
if is_complex:
__SCREAMING_SNAKE_CASE = np.real(lambda_ )
return lambda_, vector
def a__ ( ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = np.array([[41, 4, 20], [4, 26, 30], [20, 30, 50]] )
__SCREAMING_SNAKE_CASE = np.array([41, 4, 20] )
__SCREAMING_SNAKE_CASE = real_input_matrix.astype(np.complexaaa )
__SCREAMING_SNAKE_CASE = np.triu(1J * complex_input_matrix , 1 )
complex_input_matrix += imag_matrix
complex_input_matrix += -1 * imag_matrix.T
__SCREAMING_SNAKE_CASE = np.array([41, 4, 20] ).astype(np.complexaaa )
for problem_type in ["real", "complex"]:
if problem_type == "real":
__SCREAMING_SNAKE_CASE = real_input_matrix
__SCREAMING_SNAKE_CASE = real_vector
elif problem_type == "complex":
__SCREAMING_SNAKE_CASE = complex_input_matrix
__SCREAMING_SNAKE_CASE = complex_vector
# Our implementation.
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = power_iteration(a__ , a__ )
# Numpy implementation.
# Get eigenvalues and eigenvectors using built-in numpy
# eigh (eigh used for symmetric or hermetian matrices).
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = np.linalg.eigh(a__ )
# Last eigenvalue is the maximum one.
__SCREAMING_SNAKE_CASE = eigen_values[-1]
# Last column in this matrix is eigenvector corresponding to largest eigenvalue.
__SCREAMING_SNAKE_CASE = eigen_vectors[:, -1]
# Check our implementation and numpy gives close answers.
assert np.abs(eigen_value - eigen_value_max ) <= 1E-6
# Take absolute values element wise of each eigenvector.
# as they are only unique to a minus sign.
assert np.linalg.norm(np.abs(a__ ) - np.abs(a__ ) ) <= 1E-6
if __name__ == "__main__":
import doctest
doctest.testmod()
test_power_iteration()
| 267
| 0
|
import os
from datetime import datetime as dt
from github import Github
UpperCAmelCase__ = [
"good first issue",
"feature request",
"wip",
]
def _a ( ) -> List[Any]:
a = Github(os.environ['''GITHUB_TOKEN'''] )
a = g.get_repo('''huggingface/accelerate''' )
a = repo.get_issues(state='''open''' )
for issue in open_issues:
a = sorted([comment for comment in issue.get_comments()] , key=lambda a : i.created_at , reverse=a )
a = comments[0] if len(a ) > 0 else None
a = dt.utcnow()
a = (current_time - issue.updated_at).days
a = (current_time - issue.created_at).days
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and days_since_updated > 7
and days_since_creation >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Close issue since it has been 7 days of inactivity since bot mention.
issue.edit(state='''closed''' )
elif (
days_since_updated > 23
and days_since_creation >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Add stale comment
issue.create_comment(
'''This issue has been automatically marked as stale because it has not had '''
'''recent activity. If you think this still needs to be addressed '''
'''please comment on this thread.\n\nPlease note that issues that do not follow the '''
'''[contributing guidelines](https://github.com/huggingface/accelerate/blob/main/CONTRIBUTING.md) '''
'''are likely to be ignored.''' )
if __name__ == "__main__":
main()
| 26
|
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import (
AutoProcessor,
BertTokenizerFast,
BlipImageProcessor,
GPTaTokenizer,
InstructBlipProcessor,
PreTrainedTokenizerFast,
)
@require_vision
class lowercase_ ( unittest.TestCase ):
'''simple docstring'''
def __lowerCAmelCase ( self : Optional[int] ) ->Tuple:
"""simple docstring"""
a = tempfile.mkdtemp()
a = BlipImageProcessor()
a = GPTaTokenizer.from_pretrained('''hf-internal-testing/tiny-random-GPT2Model''' )
a = BertTokenizerFast.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
a = InstructBlipProcessor(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
processor.save_pretrained(self.tmpdirname )
def __lowerCAmelCase ( self : Optional[Any] , **__UpperCAmelCase : Tuple ) ->List[str]:
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **__UpperCAmelCase ).tokenizer
def __lowerCAmelCase ( self : int , **__UpperCAmelCase : str ) ->List[str]:
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **__UpperCAmelCase ).image_processor
def __lowerCAmelCase ( self : Optional[Any] , **__UpperCAmelCase : Any ) ->Optional[Any]:
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **__UpperCAmelCase ).qformer_tokenizer
def __lowerCAmelCase ( self : str ) ->Tuple:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def __lowerCAmelCase ( self : Optional[int] ) ->str:
"""simple docstring"""
a = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
a = [Image.fromarray(np.moveaxis(__UpperCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __lowerCAmelCase ( self : Optional[Any] ) ->List[str]:
"""simple docstring"""
a = InstructBlipProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() , qformer_tokenizer=self.get_qformer_tokenizer() , )
processor.save_pretrained(self.tmpdirname )
a = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
a = self.get_image_processor(do_normalize=__UpperCAmelCase , padding_value=1.0 )
a = InstructBlipProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=__UpperCAmelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __UpperCAmelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __UpperCAmelCase )
self.assertIsInstance(processor.qformer_tokenizer , __UpperCAmelCase )
def __lowerCAmelCase ( self : Optional[Any] ) ->Any:
"""simple docstring"""
a = self.get_image_processor()
a = self.get_tokenizer()
a = self.get_qformer_tokenizer()
a = InstructBlipProcessor(
tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase , qformer_tokenizer=__UpperCAmelCase )
a = self.prepare_image_inputs()
a = image_processor(__UpperCAmelCase , return_tensors='''np''' )
a = processor(images=__UpperCAmelCase , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def __lowerCAmelCase ( self : List[str] ) ->List[Any]:
"""simple docstring"""
a = self.get_image_processor()
a = self.get_tokenizer()
a = self.get_qformer_tokenizer()
a = InstructBlipProcessor(
tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase , qformer_tokenizer=__UpperCAmelCase )
a = '''lower newer'''
a = processor(text=__UpperCAmelCase )
a = tokenizer(__UpperCAmelCase , return_token_type_ids=__UpperCAmelCase )
a = qformer_tokenizer(__UpperCAmelCase , return_token_type_ids=__UpperCAmelCase )
for key in encoded_tokens.keys():
self.assertListEqual(encoded_tokens[key] , encoded_processor[key] )
for key in encoded_tokens_qformer.keys():
self.assertListEqual(encoded_tokens_qformer[key] , encoded_processor['''qformer_''' + key] )
def __lowerCAmelCase ( self : Dict ) ->Optional[Any]:
"""simple docstring"""
a = self.get_image_processor()
a = self.get_tokenizer()
a = self.get_qformer_tokenizer()
a = InstructBlipProcessor(
tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase , qformer_tokenizer=__UpperCAmelCase )
a = '''lower newer'''
a = self.prepare_image_inputs()
a = processor(text=__UpperCAmelCase , images=__UpperCAmelCase )
self.assertListEqual(
list(inputs.keys() ) , ['''input_ids''', '''attention_mask''', '''qformer_input_ids''', '''qformer_attention_mask''', '''pixel_values'''] , )
# test if it raises when no input is passed
with pytest.raises(__UpperCAmelCase ):
processor()
def __lowerCAmelCase ( self : Dict ) ->List[Any]:
"""simple docstring"""
a = self.get_image_processor()
a = self.get_tokenizer()
a = self.get_qformer_tokenizer()
a = InstructBlipProcessor(
tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase , qformer_tokenizer=__UpperCAmelCase )
a = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
a = processor.batch_decode(__UpperCAmelCase )
a = tokenizer.batch_decode(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
def __lowerCAmelCase ( self : Union[str, Any] ) ->str:
"""simple docstring"""
a = self.get_image_processor()
a = self.get_tokenizer()
a = self.get_qformer_tokenizer()
a = InstructBlipProcessor(
tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase , qformer_tokenizer=__UpperCAmelCase )
a = '''lower newer'''
a = self.prepare_image_inputs()
a = processor(text=__UpperCAmelCase , images=__UpperCAmelCase )
self.assertListEqual(
list(inputs.keys() ) , ['''input_ids''', '''attention_mask''', '''qformer_input_ids''', '''qformer_attention_mask''', '''pixel_values'''] , )
| 26
| 1
|
from collections.abc import Iterable
from typing import Any
class __lowerCAmelCase :
def __init__( self :List[str] , __magic_name__ :int | None = None ):
'''simple docstring'''
a = value
a = None # Added in order to delete a node easier
a = None
a = None
def __repr__( self :int ):
'''simple docstring'''
from pprint import pformat
if self.left is None and self.right is None:
return str(self.value )
return pformat({F'{self.value}': (self.left, self.right)} , indent=1 )
class __lowerCAmelCase :
def __init__( self :int , __magic_name__ :Node | None = None ):
'''simple docstring'''
a = root
def __str__( self :Any ):
'''simple docstring'''
return str(self.root )
def lowerCamelCase__ ( self :Any , __magic_name__ :Node , __magic_name__ :Node | None ):
'''simple docstring'''
if new_children is not None: # reset its kids
a = node.parent
if node.parent is not None: # reset its parent
if self.is_right(__magic_name__ ): # If it is the right children
a = new_children
else:
a = new_children
else:
a = new_children
def lowerCamelCase__ ( self :Union[str, Any] , __magic_name__ :Node ):
'''simple docstring'''
if node.parent and node.parent.right:
return node == node.parent.right
return False
def lowerCamelCase__ ( self :int ):
'''simple docstring'''
return self.root is None
def lowerCamelCase__ ( self :int , __magic_name__ :Any ):
'''simple docstring'''
a = Node(__magic_name__ ) # create a new Node
if self.empty(): # if Tree is empty
a = new_node # set its root
else: # Tree is not empty
a = self.root # from root
if parent_node is None:
return
while True: # While we don't get to a leaf
if value < parent_node.value: # We go left
if parent_node.left is None:
a = new_node # We insert the new node in a leaf
break
else:
a = parent_node.left
else:
if parent_node.right is None:
a = new_node
break
else:
a = parent_node.right
a = parent_node
def lowerCamelCase__ ( self :Optional[Any] , *__magic_name__ :Dict ):
'''simple docstring'''
for value in values:
self.__insert(__magic_name__ )
def lowerCamelCase__ ( self :List[Any] , __magic_name__ :int ):
'''simple docstring'''
if self.empty():
raise IndexError("""Warning: Tree is empty! please use another.""" )
else:
a = self.root
# use lazy evaluation here to avoid NoneType Attribute error
while node is not None and node.value is not value:
a = node.left if value < node.value else node.right
return node
def lowerCamelCase__ ( self :Union[str, Any] , __magic_name__ :Node | None = None ):
'''simple docstring'''
if node is None:
if self.root is None:
return None
a = self.root
if not self.empty():
while node.right is not None:
a = node.right
return node
def lowerCamelCase__ ( self :List[Any] , __magic_name__ :Node | None = None ):
'''simple docstring'''
if node is None:
a = self.root
if self.root is None:
return None
if not self.empty():
a = self.root
while node.left is not None:
a = node.left
return node
def lowerCamelCase__ ( self :Optional[Any] , __magic_name__ :int ):
'''simple docstring'''
a = self.search(__magic_name__ ) # Look for the node with that label
if node is not None:
if node.left is None and node.right is None: # If it has no children
self.__reassign_nodes(__magic_name__ , __magic_name__ )
elif node.left is None: # Has only right children
self.__reassign_nodes(__magic_name__ , node.right )
elif node.right is None: # Has only left children
self.__reassign_nodes(__magic_name__ , node.left )
else:
a = self.get_max(
node.left ) # Gets the max value of the left branch
self.remove(tmp_node.value ) # type: ignore
a = (
tmp_node.value # type: ignore
) # Assigns the value to the node to delete and keep tree structure
def lowerCamelCase__ ( self :Optional[int] , __magic_name__ :Node | None ):
'''simple docstring'''
if node is not None:
yield node # Preorder Traversal
yield from self.preorder_traverse(node.left )
yield from self.preorder_traverse(node.right )
def lowerCamelCase__ ( self :str , __magic_name__ :Tuple=None ):
'''simple docstring'''
if traversal_function is None:
return self.preorder_traverse(self.root )
else:
return traversal_function(self.root )
def lowerCamelCase__ ( self :Optional[int] , __magic_name__ :list , __magic_name__ :Node | None ):
'''simple docstring'''
if node:
self.inorder(__magic_name__ , node.left )
arr.append(node.value )
self.inorder(__magic_name__ , node.right )
def lowerCamelCase__ ( self :List[str] , __magic_name__ :int , __magic_name__ :Node ):
'''simple docstring'''
a = []
self.inorder(__magic_name__ , __magic_name__ ) # append all values to list using inorder traversal
return arr[k - 1]
def __A ( __lowerCamelCase ) -> list[Node]:
a = []
if curr_node is not None:
a = postorder(curr_node.left ) + postorder(curr_node.right ) + [curr_node]
return node_list
def __A ( ) -> None:
a = (8, 3, 6, 1, 10, 14, 13, 4, 7)
a = BinarySearchTree()
for i in testlist:
t.insert(__lowerCamelCase )
# Prints all the elements of the list in order traversal
print(__lowerCamelCase )
if t.search(6 ) is not None:
print("""The value 6 exists""" )
else:
print("""The value 6 doesn't exist""" )
if t.search(-1 ) is not None:
print("""The value -1 exists""" )
else:
print("""The value -1 doesn't exist""" )
if not t.empty():
print("""Max Value: """ , t.get_max().value ) # type: ignore
print("""Min Value: """ , t.get_min().value ) # type: ignore
for i in testlist:
t.remove(__lowerCamelCase )
print(__lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 228
|
from math import pi, sqrt
def __A ( __lowerCamelCase ) -> float:
if num <= 0:
raise ValueError("""math domain error""" )
if num > 171.5:
raise OverflowError("""math range error""" )
elif num - int(__lowerCamelCase ) not in (0, 0.5):
raise NotImplementedError("""num must be an integer or a half-integer""" )
elif num == 0.5:
return sqrt(__lowerCamelCase )
else:
return 1.0 if num == 1 else (num - 1) * gamma(num - 1 )
def __A ( ) -> None:
assert gamma(0.5 ) == sqrt(__lowerCamelCase )
assert gamma(1 ) == 1.0
assert gamma(2 ) == 1.0
if __name__ == "__main__":
from doctest import testmod
testmod()
__UpperCamelCase : str = 1.0
while num:
__UpperCamelCase : Dict = float(input("Gamma of: "))
print(F'gamma({num}) = {gamma(num)}')
print("\nEnter 0 to exit...")
| 228
| 1
|
import unittest
from transformers import is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class UpperCAmelCase_ :
@staticmethod
def snake_case__ ( *__a, **__a):
'''simple docstring'''
pass
@is_pipeline_test
@require_vision
class UpperCAmelCase_ ( unittest.TestCase):
@require_torch
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : str = pipeline(
model="hf-internal-testing/tiny-random-clip-zero-shot-image-classification", )
_lowerCAmelCase : Any = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png")
_lowerCAmelCase : Optional[Any] = image_classifier(__a, candidate_labels=["a", "b", "c"])
# The floating scores are so close, we enter floating error approximation and the order is not guaranteed across
# python and torch versions.
self.assertIn(
nested_simplify(__a), [
[{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "b"}, {"score": 0.333, "label": "c"}],
[{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "c"}, {"score": 0.333, "label": "b"}],
], )
_lowerCAmelCase : Optional[Any] = image_classifier([image] * 5, candidate_labels=["A", "B", "C"], batch_size=2)
self.assertEqual(
nested_simplify(__a), [
[
{"score": 0.333, "label": ANY(__a)},
{"score": 0.333, "label": ANY(__a)},
{"score": 0.333, "label": ANY(__a)},
],
[
{"score": 0.333, "label": ANY(__a)},
{"score": 0.333, "label": ANY(__a)},
{"score": 0.333, "label": ANY(__a)},
],
[
{"score": 0.333, "label": ANY(__a)},
{"score": 0.333, "label": ANY(__a)},
{"score": 0.333, "label": ANY(__a)},
],
[
{"score": 0.333, "label": ANY(__a)},
{"score": 0.333, "label": ANY(__a)},
{"score": 0.333, "label": ANY(__a)},
],
[
{"score": 0.333, "label": ANY(__a)},
{"score": 0.333, "label": ANY(__a)},
{"score": 0.333, "label": ANY(__a)},
],
], )
@require_tf
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = pipeline(
model="hf-internal-testing/tiny-random-clip-zero-shot-image-classification", framework="tf")
_lowerCAmelCase : List[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png")
_lowerCAmelCase : Union[str, Any] = image_classifier(__a, candidate_labels=["a", "b", "c"])
self.assertEqual(
nested_simplify(__a), [{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "b"}, {"score": 0.333, "label": "c"}], )
_lowerCAmelCase : Dict = image_classifier([image] * 5, candidate_labels=["A", "B", "C"], batch_size=2)
self.assertEqual(
nested_simplify(__a), [
[
{"score": 0.333, "label": ANY(__a)},
{"score": 0.333, "label": ANY(__a)},
{"score": 0.333, "label": ANY(__a)},
],
[
{"score": 0.333, "label": ANY(__a)},
{"score": 0.333, "label": ANY(__a)},
{"score": 0.333, "label": ANY(__a)},
],
[
{"score": 0.333, "label": ANY(__a)},
{"score": 0.333, "label": ANY(__a)},
{"score": 0.333, "label": ANY(__a)},
],
[
{"score": 0.333, "label": ANY(__a)},
{"score": 0.333, "label": ANY(__a)},
{"score": 0.333, "label": ANY(__a)},
],
[
{"score": 0.333, "label": ANY(__a)},
{"score": 0.333, "label": ANY(__a)},
{"score": 0.333, "label": ANY(__a)},
],
], )
@slow
@require_torch
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = pipeline(
task="zero-shot-image-classification", model="openai/clip-vit-base-patch32", )
# This is an image of 2 cats with remotes and no planes
_lowerCAmelCase : str = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png")
_lowerCAmelCase : int = image_classifier(__a, candidate_labels=["cat", "plane", "remote"])
self.assertEqual(
nested_simplify(__a), [
{"score": 0.511, "label": "remote"},
{"score": 0.485, "label": "cat"},
{"score": 0.004, "label": "plane"},
], )
_lowerCAmelCase : Optional[int] = image_classifier([image] * 5, candidate_labels=["cat", "plane", "remote"], batch_size=2)
self.assertEqual(
nested_simplify(__a), [
[
{"score": 0.511, "label": "remote"},
{"score": 0.485, "label": "cat"},
{"score": 0.004, "label": "plane"},
],
]
* 5, )
@slow
@require_tf
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = pipeline(
task="zero-shot-image-classification", model="openai/clip-vit-base-patch32", framework="tf")
# This is an image of 2 cats with remotes and no planes
_lowerCAmelCase : Tuple = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png")
_lowerCAmelCase : List[str] = image_classifier(__a, candidate_labels=["cat", "plane", "remote"])
self.assertEqual(
nested_simplify(__a), [
{"score": 0.511, "label": "remote"},
{"score": 0.485, "label": "cat"},
{"score": 0.004, "label": "plane"},
], )
_lowerCAmelCase : Optional[int] = image_classifier([image] * 5, candidate_labels=["cat", "plane", "remote"], batch_size=2)
self.assertEqual(
nested_simplify(__a), [
[
{"score": 0.511, "label": "remote"},
{"score": 0.485, "label": "cat"},
{"score": 0.004, "label": "plane"},
],
]
* 5, )
| 300
|
import builtins
import sys
from ...utils.imports import _is_package_available
from . import cursor, input
from .helpers import Direction, clear_line, forceWrite, linebreak, move_cursor, reset_cursor, writeColor
from .keymap import KEYMAP
_snake_case = False
try:
_snake_case = _is_package_available("google.colab")
except ModuleNotFoundError:
pass
@input.register
class UpperCAmelCase_ :
def __init__( self, __a = None, __a = []):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = 0
_lowerCAmelCase : Optional[int] = choices
_lowerCAmelCase : Tuple = prompt
if sys.platform == "win32":
_lowerCAmelCase : Optional[Any] = "*"
else:
_lowerCAmelCase : Dict = "➔ "
def snake_case__ ( self, __a, __a = ""):
'''simple docstring'''
if sys.platform != "win32":
writeColor(self.choices[index], 32, __a)
else:
forceWrite(self.choices[index], __a)
def snake_case__ ( self, __a):
'''simple docstring'''
if index == self.position:
forceWrite(f" {self.arrow_char} ")
self.write_choice(__a)
else:
forceWrite(f" {self.choices[index]}")
reset_cursor()
def snake_case__ ( self, __a, __a = 1):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.position
if direction == Direction.DOWN:
if self.position + 1 >= len(self.choices):
return
self.position += num_spaces
else:
if self.position - 1 < 0:
return
self.position -= num_spaces
clear_line()
self.print_choice(__a)
move_cursor(__a, direction.name)
self.print_choice(self.position)
@input.mark(KEYMAP["up"])
def snake_case__ ( self):
'''simple docstring'''
self.move_direction(Direction.UP)
@input.mark(KEYMAP["down"])
def snake_case__ ( self):
'''simple docstring'''
self.move_direction(Direction.DOWN)
@input.mark(KEYMAP["newline"])
def snake_case__ ( self):
'''simple docstring'''
move_cursor(len(self.choices) - self.position, "DOWN")
return self.position
@input.mark(KEYMAP["interrupt"])
def snake_case__ ( self):
'''simple docstring'''
move_cursor(len(self.choices) - self.position, "DOWN")
raise KeyboardInterrupt
@input.mark_multiple(*[KEYMAP[str(__a)] for number in range(10)])
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : str = int(chr(self.current_selection))
_lowerCAmelCase : List[str] = index - self.position
if index == self.position:
return
if index < len(self.choices):
if self.position > index:
self.move_direction(Direction.UP, -movement)
elif self.position < index:
self.move_direction(Direction.DOWN, __a)
else:
return
else:
return
def snake_case__ ( self, __a = 0):
'''simple docstring'''
if self.prompt:
linebreak()
forceWrite(self.prompt, "\n")
if in_colab:
forceWrite("Please input a choice index (starting from 0), and press enter", "\n")
else:
forceWrite("Please select a choice using the arrow or number keys, and selecting with enter", "\n")
_lowerCAmelCase : List[Any] = default_choice
for i in range(len(self.choices)):
self.print_choice(__a)
forceWrite("\n")
move_cursor(len(self.choices) - self.position, "UP")
with cursor.hide():
while True:
if in_colab:
try:
_lowerCAmelCase : str = int(builtins.input())
except ValueError:
_lowerCAmelCase : List[Any] = default_choice
else:
_lowerCAmelCase : List[str] = self.handle_input()
if choice is not None:
reset_cursor()
for _ in range(len(self.choices) + 1):
move_cursor(1, "UP")
clear_line()
self.write_choice(__a, "\n")
return choice
| 300
| 1
|
'''simple docstring'''
import collections
import inspect
import unittest
from transformers import FocalNetConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
)
from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class a__:
def __init__( self : str , __snake_case : Optional[Any] , __snake_case : List[str]=13 , __snake_case : Tuple=32 , __snake_case : Optional[int]=2 , __snake_case : int=3 , __snake_case : Optional[int]=16 , __snake_case : int=[32, 64, 1_28] , __snake_case : Dict=[1, 2, 1] , __snake_case : Dict=[2, 2, 4] , __snake_case : Dict=2 , __snake_case : Union[str, Any]=2.0 , __snake_case : Tuple=True , __snake_case : Optional[int]=0.0 , __snake_case : List[Any]=0.0 , __snake_case : Dict=0.1 , __snake_case : Optional[int]="gelu" , __snake_case : List[str]=False , __snake_case : Union[str, Any]=True , __snake_case : Optional[Any]=0.02 , __snake_case : Dict=1e-5 , __snake_case : int=True , __snake_case : Dict=None , __snake_case : str=True , __snake_case : int=10 , __snake_case : List[Any]=8 , __snake_case : List[str]=["stage1", "stage2"] , __snake_case : int=[1, 2] , ):
a : Any = parent
a : Any = batch_size
a : Any = image_size
a : Dict = patch_size
a : int = num_channels
a : List[Any] = embed_dim
a : Tuple = hidden_sizes
a : Union[str, Any] = depths
a : List[str] = num_heads
a : Tuple = window_size
a : Optional[int] = mlp_ratio
a : str = qkv_bias
a : Optional[Any] = hidden_dropout_prob
a : int = attention_probs_dropout_prob
a : List[str] = drop_path_rate
a : int = hidden_act
a : Optional[int] = use_absolute_embeddings
a : List[Any] = patch_norm
a : Tuple = layer_norm_eps
a : Optional[int] = initializer_range
a : int = is_training
a : int = scope
a : Tuple = use_labels
a : Optional[int] = type_sequence_label_size
a : List[Any] = encoder_stride
a : Tuple = out_features
a : str = out_indices
def lowercase_ ( self : Any ):
a : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
a : Tuple = None
if self.use_labels:
a : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
a : Dict = self.get_config()
return config, pixel_values, labels
def lowercase_ ( self : Union[str, Any] ):
return FocalNetConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , hidden_sizes=self.hidden_sizes , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def lowercase_ ( self : Optional[Any] , __snake_case : Tuple , __snake_case : List[str] , __snake_case : List[str] ):
a : Optional[Any] = FocalNetModel(config=__snake_case )
model.to(__snake_case )
model.eval()
a : List[str] = model(__snake_case )
a : Optional[Any] = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
a : Any = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def lowercase_ ( self : Optional[Any] , __snake_case : Union[str, Any] , __snake_case : int , __snake_case : str ):
a : int = FocalNetBackbone(config=__snake_case )
model.to(__snake_case )
model.eval()
a : Union[str, Any] = model(__snake_case )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size, 8, 8] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[:-1] )
# verify backbone works with out_features=None
a : Any = None
a : Union[str, Any] = FocalNetBackbone(config=__snake_case )
model.to(__snake_case )
model.eval()
a : Optional[Any] = model(__snake_case )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size * 2, 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def lowercase_ ( self : Union[str, Any] , __snake_case : str , __snake_case : Union[str, Any] , __snake_case : Optional[Any] ):
a : Dict = FocalNetForMaskedImageModeling(config=__snake_case )
model.to(__snake_case )
model.eval()
a : Union[str, Any] = model(__snake_case )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
a : int = 1
a : int = FocalNetForMaskedImageModeling(__snake_case )
model.to(__snake_case )
model.eval()
a : Tuple = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
a : List[str] = model(__snake_case )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def lowercase_ ( self : List[str] , __snake_case : Union[str, Any] , __snake_case : List[Any] , __snake_case : Any ):
a : List[str] = self.type_sequence_label_size
a : List[str] = FocalNetForImageClassification(__snake_case )
model.to(__snake_case )
model.eval()
a : Union[str, Any] = model(__snake_case , labels=__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
a : Union[str, Any] = 1
a : List[Any] = FocalNetForImageClassification(__snake_case )
model.to(__snake_case )
model.eval()
a : int = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
a : Optional[Any] = model(__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowercase_ ( self : List[Any] ):
a : List[str] = self.prepare_config_and_inputs()
a , a , a : List[str] = config_and_inputs
a : List[Any] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class a__( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
lowercase__ = (
(
FocalNetModel,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetBackbone,
)
if is_torch_available()
else ()
)
lowercase__ = (
{"""feature-extraction""": FocalNetModel, """image-classification""": FocalNetForImageClassification}
if is_torch_available()
else {}
)
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
def lowercase_ ( self : Optional[int] ):
a : Any = FocalNetModelTester(self )
a : List[str] = ConfigTester(self , config_class=__snake_case , embed_dim=37 , has_text_modality=__snake_case )
def lowercase_ ( self : Optional[Any] ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowercase_ ( self : int ):
return
def lowercase_ ( self : List[str] ):
a : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__snake_case )
def lowercase_ ( self : str ):
a : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*__snake_case )
def lowercase_ ( self : str ):
a : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*__snake_case )
def lowercase_ ( self : str ):
a : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__snake_case )
@unittest.skip(reason='FocalNet does not use inputs_embeds' )
def lowercase_ ( self : List[str] ):
pass
@unittest.skip(reason='FocalNet does not use feedforward chunking' )
def lowercase_ ( self : List[str] ):
pass
def lowercase_ ( self : List[Any] ):
a , a : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
a : Tuple = model_class(__snake_case )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
a : Dict = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__snake_case , nn.Linear ) )
def lowercase_ ( self : Tuple ):
a , a : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
a : List[str] = model_class(__snake_case )
a : List[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a : Union[str, Any] = [*signature.parameters.keys()]
a : Optional[Any] = ['pixel_values']
self.assertListEqual(arg_names[:1] , __snake_case )
def lowercase_ ( self : Tuple , __snake_case : str , __snake_case : int , __snake_case : List[Any] , __snake_case : int ):
a : str = model_class(__snake_case )
model.to(__snake_case )
model.eval()
with torch.no_grad():
a : List[str] = model(**self._prepare_for_class(__snake_case , __snake_case ) )
a : Union[str, Any] = outputs.hidden_states
a : Union[str, Any] = getattr(
self.model_tester , 'expected_num_hidden_layers' , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(__snake_case ) , __snake_case )
# FocalNet has a different seq_length
a : Optional[Any] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
a : Any = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
a : Optional[int] = outputs.reshaped_hidden_states
self.assertEqual(len(__snake_case ) , __snake_case )
a , a , a , a : List[str] = reshaped_hidden_states[0].shape
a : int = (
reshaped_hidden_states[0].view(__snake_case , __snake_case , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def lowercase_ ( self : Optional[Any] ):
a , a : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
a : Optional[Any] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes[:-1]:
a : List[Any] = True
self.check_hidden_states_output(__snake_case , __snake_case , __snake_case , __snake_case )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
a : Dict = True
self.check_hidden_states_output(__snake_case , __snake_case , __snake_case , __snake_case )
def lowercase_ ( self : int ):
a , a : Any = self.model_tester.prepare_config_and_inputs_for_common()
a : List[str] = 3
a : List[str] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
a : List[Any] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
a : List[Any] = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
a : Optional[int] = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes[:-1]:
a : Optional[Any] = True
self.check_hidden_states_output(__snake_case , __snake_case , __snake_case , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
a : Dict = True
self.check_hidden_states_output(__snake_case , __snake_case , __snake_case , (padded_height, padded_width) )
@slow
def lowercase_ ( self : Dict ):
for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a : Dict = FocalNetModel.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
def lowercase_ ( self : str ):
a , a : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
a : Optional[int] = _config_zero_init(__snake_case )
for model_class in self.all_model_classes:
a : Optional[Any] = model_class(config=__snake_case )
for name, param in model.named_parameters():
if "embeddings" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@require_vision
@require_torch
class a__( unittest.TestCase ):
@cached_property
def lowercase_ ( self : Tuple ):
# TODO update organization
return AutoImageProcessor.from_pretrained('microsoft/focalnet-tiny' ) if is_vision_available() else None
@slow
def lowercase_ ( self : Tuple ):
a : Optional[Any] = FocalNetForImageClassification.from_pretrained('microsoft/focalnet-tiny' ).to(__snake_case )
a : List[str] = self.default_image_processor
a : List[str] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
a : List[str] = image_processor(images=__snake_case , return_tensors='pt' ).to(__snake_case )
# forward pass
with torch.no_grad():
a : List[str] = model(**__snake_case )
# verify the logits
a : List[str] = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , __snake_case )
a : Union[str, Any] = torch.tensor([0.2166, -0.4368, 0.2191] ).to(__snake_case )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __snake_case , atol=1e-4 ) )
self.assertTrue(outputs.logits.argmax(dim=-1 ).item() , 2_81 )
@require_torch
class a__( lowerCamelCase__ , unittest.TestCase ):
lowercase__ = (FocalNetBackbone,) if is_torch_available() else ()
lowercase__ = FocalNetConfig
lowercase__ = False
def lowercase_ ( self : Any ):
a : Union[str, Any] = FocalNetModelTester(self )
| 297
|
'''simple docstring'''
import json
import multiprocessing
import os
import re
from collections import defaultdict
import torch
from accelerate import Accelerator
from accelerate.utils import set_seed
from arguments import HumanEvalArguments
from datasets import load_dataset, load_metric
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList
lowerCAmelCase: List[str] = ['\nclass', '\ndef', '\n#', '\n@', '\nprint', '\nif']
class a__( lowerCamelCase__ ):
def __init__( self : Any , __snake_case : List[str] , __snake_case : Union[str, Any] , __snake_case : List[Any]=None , __snake_case : Optional[Any]=1 ):
a : Union[str, Any] = tokenizer
a : Union[str, Any] = dataset
a : Any = len(__snake_case ) if n_tasks is None else n_tasks
a : List[str] = n_copies
def __iter__( self : str ):
a : List[Any] = []
for task in range(self.n_tasks ):
# without strip, the model generate commented codes ...
prompts.append(self.tokenizer.eos_token + self.dataset[task]['prompt'].strip() )
a : Dict = self.tokenizer(__snake_case , padding=__snake_case , return_tensors='pt' )
for task in range(self.n_tasks ):
for _ in range(self.n_copies ):
yield {
"ids": outputs.input_ids[task],
"task_id": task,
"input_len": outputs.attention_mask[task].sum(),
}
class a__( lowerCamelCase__ ):
def __init__( self : Union[str, Any] , __snake_case : int , __snake_case : Optional[Any] , __snake_case : str ):
a : Dict = start_length
a : Dict = eof_strings
a : str = tokenizer
def __call__( self : Optional[Any] , __snake_case : Optional[Any] , __snake_case : Any , **__snake_case : Union[str, Any] ):
a : int = self.tokenizer.batch_decode(input_ids[:, self.start_length :] )
a : Optional[int] = []
for decoded_generation in decoded_generations:
done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) )
return all(__snake_case )
def lowerCamelCase__ ( _A ):
a : Optional[Any] = re.split('(%s)' % '|'.join(_A ) , _A )
# last string should be ""
return "".join(string_list[:-2] )
def lowerCamelCase__ ( _A , _A , _A , _A , _A , _A=20 , **_A ):
a : Optional[Any] = defaultdict(_A ) # dict of list of generated tokens
for step, batch in tqdm(enumerate(_A ) ):
with torch.no_grad():
a : Optional[Any] = batch['ids'].shape[-1]
a : Optional[Any] = accelerator.unwrap_model(_A ).generate(
input_ids=batch['ids'][:, : batch['input_len']] , num_return_sequences=_A , **_A )
# each task is generated batch_size times
a : Tuple = batch['task_id'].repeat(_A )
a : List[Any] = accelerator.pad_across_processes(
_A , dim=1 , pad_index=tokenizer.pad_token_id )
a , a : Union[str, Any] = accelerator.gather((generated_tokens, generated_tasks) )
a : List[str] = generated_tokens.cpu().numpy()
a : int = generated_tasks.cpu().numpy()
for task, generated_tokens in zip(_A , _A ):
gen_token_dict[task].append(_A )
a : Any = [[] for _ in range(_A )]
for task, generated_tokens in gen_token_dict.items():
for s in generated_tokens:
a : Optional[int] = tokenizer.decode(_A , skip_special_tokens=_A , clean_up_tokenization_spaces=_A )
code_gens[task].append(remove_last_block(_A ) )
return code_gens
def lowerCamelCase__ ( ):
# Setup configuration
a : Dict = HfArgumentParser(_A )
a : Any = parser.parse_args()
transformers.logging.set_verbosity_error()
# enables code execution in code_eval metric
a : List[Any] = args.HF_ALLOW_CODE_EVAL
# make sure tokenizer plays nice with multiprocessing
a : int = 'false'
if args.num_workers is None:
a : Dict = multiprocessing.cpu_count()
# Use dataset load to feed to accelerate
a : List[Any] = Accelerator()
set_seed(args.seed , device_specific=_A )
# Load model and tokenizer
a : Dict = AutoTokenizer.from_pretrained(args.model_ckpt )
a : str = tokenizer.eos_token
a : List[Any] = AutoModelForCausalLM.from_pretrained(args.model_ckpt )
# Generation settings
a : Optional[Any] = {
'do_sample': args.do_sample,
'temperature': args.temperature,
'max_new_tokens': args.max_new_tokens,
'top_p': args.top_p,
'top_k': args.top_k,
'stopping_criteria': StoppingCriteriaList([EndOfFunctionCriteria(0 , _A , _A )] ),
}
# Load evaluation dataset and metric
a : Optional[int] = load_dataset('openai_humaneval' )
a : Optional[Any] = load_metric('code_eval' )
a : Optional[int] = args.num_tasks if args.num_tasks is not None else len(human_eval['test'] )
a : Optional[Any] = args.n_samples // args.batch_size
a : Any = TokenizedDataset(_A , human_eval['test'] , n_copies=_A , n_tasks=_A )
# do not confuse args.batch_size, which is actually the num_return_sequences
a : int = DataLoader(_A , batch_size=1 )
# Run a quick test to see if code evaluation is enabled
try:
a : int = code_eval_metric.compute(references=[''] , predictions=[['']] )
except ValueError as exception:
print(
'Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL="1"`'
' flag to enable code evaluation.' )
raise exception
a , a : int = accelerator.prepare(_A , _A )
a : int = complete_code(
_A , _A , _A , _A , n_tasks=_A , batch_size=args.batch_size , **_A , )
if accelerator.is_main_process:
a : List[str] = []
for task in tqdm(range(_A ) ):
a : int = human_eval['test'][task]['test']
a : int = f"""check({human_eval["test"][task]["entry_point"]})"""
references.append('\n' + test_func + '\n' + entry_point )
# Evaluate completions with "code_eval" metric
a , a : Tuple = code_eval_metric.compute(
references=_A , predictions=_A , num_workers=args.num_workers )
print(f"""Results: {pass_at_k}""" )
# Save results to json file
with open(args.output_file , 'w' ) as fp:
json.dump(_A , _A )
# For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing
# https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script
if __name__ == "__main__":
main()
| 297
| 1
|
"""simple docstring"""
def snake_case_ ( A_ : int ):
'''simple docstring'''
assert isinstance(A_, A_ ), F'''The input value of [n={number}] is not an integer'''
if number == 1:
return 2
elif number < 1:
_lowerCamelCase : Tuple = F'''The input value of [n={number}] has to be > 0'''
raise ValueError(A_ )
else:
_lowerCamelCase : Union[str, Any] = sylvester(number - 1 )
_lowerCamelCase : Optional[Any] = num - 1
_lowerCamelCase : List[Any] = num
return lower * upper + 1
if __name__ == "__main__":
print(F"""The 8th number in Sylvester's sequence: {sylvester(8)}""")
| 175
|
"""simple docstring"""
import argparse
lowerCAmelCase__ = '''docs/source/_static/js/custom.js'''
def snake_case_ ( A_ : List[str] ):
'''simple docstring'''
with open(A_, encoding='''utf-8''', newline='''\n''' ) as f:
_lowerCamelCase : int = f.readlines()
_lowerCamelCase : List[str] = 0
# First let's put the right version
while not lines[index].startswith('''const stableVersion =''' ):
index += 1
_lowerCamelCase : List[Any] = F'''const stableVersion = "v{version}"\n'''
# Then update the dictionary
while not lines[index].startswith('''const versionMapping = {''' ):
index += 1
# We go until the end
while not lines[index].startswith('''}''' ):
index += 1
# We add the new version at the end
lines[index - 1] += F''' "v{version}": "v{version}",\n'''
with open(A_, '''w''', encoding='''utf-8''', newline='''\n''' ) as f:
f.writelines(A_ )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument('''--version''', help='''Release version.''')
lowerCAmelCase__ = parser.parse_args()
update_custom_js(args.version)
| 175
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.