code stringlengths 86 54.5k | code_codestyle int64 0 371 | style_context stringlengths 87 49.2k | style_context_codestyle int64 0 349 | label int64 0 1 |
|---|---|---|---|---|
from __future__ import annotations
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ = None ):
lowerCamelCase_ = word_bank or []
# create a table
lowerCamelCase_ = len(lowerCamelCase__ ) + 1
lowerCamelCase_ = []
for _ in range(lowerCamelCase__ ):
table.append([] )
# seed value
lowerCamelCase_ = [[]] # because empty string has empty combination
# iterate through the indices
for i in range(lowerCamelCase__ ):
# condition
if table[i] != []:
for word in word_bank:
# slice condition
if target[i : i + len(lowerCamelCase__ )] == word:
lowerCamelCase_ = [
[word, *way] for way in table[i]
]
# adds the word to every combination the current position holds
# now,push that combination to the table[i+len(word)]
table[i + len(lowerCamelCase__ )] += new_combinations
# combinations are in reverse order so reverse for better output
for combination in table[len(lowerCamelCase__ )]:
combination.reverse()
return table[len(lowerCamelCase__ )]
if __name__ == "__main__":
print(all_construct('''jwajalapa''', ['''jwa''', '''j''', '''w''', '''a''', '''la''', '''lapa''']))
print(all_construct('''rajamati''', ['''s''', '''raj''', '''amat''', '''raja''', '''ma''', '''i''', '''t''']))
print(
all_construct(
'''hexagonosaurus''',
['''h''', '''ex''', '''hex''', '''ag''', '''ago''', '''ru''', '''auru''', '''rus''', '''go''', '''no''', '''o''', '''s'''],
)
)
| 19 |
from PIL import Image
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : Image , _lowerCamelCase : int) -> Image:
'''simple docstring'''
__UpperCamelCase : str = (259 * (level + 255)) / (255 * (259 - level))
def contrast(_lowerCamelCase : int) -> int:
return int(128 + factor * (c - 128))
return img.point(_lowerCamelCase)
if __name__ == "__main__":
# Load image
with Image.open('image_data/lena.jpg') as img:
# Change contrast to 170
lowercase : Tuple = change_contrast(img, 170)
cont_img.save('image_data/lena_high_contrast.png', format='png') | 232 | 0 |
import collections.abc
from typing import Optional, Tuple, Union
import torch
import torch.utils.checkpoint
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_poolformer import PoolFormerConfig
_lowerCamelCase : str = logging.get_logger(__name__)
# General docstring
_lowerCamelCase : Tuple = '''PoolFormerConfig'''
# Base docstring
_lowerCamelCase : List[Any] = '''sail/poolformer_s12'''
_lowerCamelCase : int = [1, 5_1_2, 7, 7]
# Image classification docstring
_lowerCamelCase : int = '''sail/poolformer_s12'''
_lowerCamelCase : Union[str, Any] = '''tabby, tabby cat'''
_lowerCamelCase : Tuple = [
'''sail/poolformer_s12''',
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
]
def _a ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : float = 0.0 , SCREAMING_SNAKE_CASE__ : bool = False ) -> int:
'''simple docstring'''
if drop_prob == 0.0 or not training:
return input
SCREAMING_SNAKE_CASE__ : Any = 1 - drop_prob
SCREAMING_SNAKE_CASE__ : Any = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
SCREAMING_SNAKE_CASE__ : Optional[int] = keep_prob + torch.rand(SCREAMING_SNAKE_CASE__ , dtype=input.dtype , device=input.device )
random_tensor.floor_() # binarize
SCREAMING_SNAKE_CASE__ : Union[str, Any] = input.div(SCREAMING_SNAKE_CASE__ ) * random_tensor
return output
class lowerCamelCase (nn.Module ):
"""simple docstring"""
def __init__( self : List[Any], _UpperCAmelCase : Optional[float] = None ) -> None:
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE__ : int = drop_prob
def A_ ( self : str, _UpperCAmelCase : torch.Tensor ) -> torch.Tensor:
"""simple docstring"""
return drop_path(_UpperCAmelCase, self.drop_prob, self.training )
def A_ ( self : Tuple ) -> str:
"""simple docstring"""
return "p={}".format(self.drop_prob )
class lowerCamelCase (nn.Module ):
"""simple docstring"""
def __init__( self : Optional[Any], _UpperCAmelCase : List[Any], _UpperCAmelCase : Dict, _UpperCAmelCase : Any, _UpperCAmelCase : Union[str, Any], _UpperCAmelCase : Dict, _UpperCAmelCase : str=None ) -> List[Any]:
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE__ : Optional[Any] = patch_size if isinstance(_UpperCAmelCase, collections.abc.Iterable ) else (patch_size, patch_size)
SCREAMING_SNAKE_CASE__ : Optional[int] = stride if isinstance(_UpperCAmelCase, collections.abc.Iterable ) else (stride, stride)
SCREAMING_SNAKE_CASE__ : List[Any] = padding if isinstance(_UpperCAmelCase, collections.abc.Iterable ) else (padding, padding)
SCREAMING_SNAKE_CASE__ : str = nn.Convad(_UpperCAmelCase, _UpperCAmelCase, kernel_size=_UpperCAmelCase, stride=_UpperCAmelCase, padding=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[int] = norm_layer(_UpperCAmelCase ) if norm_layer else nn.Identity()
def A_ ( self : Any, _UpperCAmelCase : Tuple ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.projection(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : str = self.norm(_UpperCAmelCase )
return embeddings
class lowerCamelCase (nn.GroupNorm ):
"""simple docstring"""
def __init__( self : str, _UpperCAmelCase : Tuple, **_UpperCAmelCase : Tuple ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(1, _UpperCAmelCase, **_UpperCAmelCase )
class lowerCamelCase (nn.Module ):
"""simple docstring"""
def __init__( self : Tuple, _UpperCAmelCase : Optional[Any] ) -> List[str]:
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE__ : str = nn.AvgPoolad(_UpperCAmelCase, stride=1, padding=pool_size // 2, count_include_pad=_UpperCAmelCase )
def A_ ( self : List[str], _UpperCAmelCase : Union[str, Any] ) -> int:
"""simple docstring"""
return self.pool(_UpperCAmelCase ) - hidden_states
class lowerCamelCase (nn.Module ):
"""simple docstring"""
def __init__( self : str, _UpperCAmelCase : int, _UpperCAmelCase : Optional[int], _UpperCAmelCase : str, _UpperCAmelCase : Union[str, Any] ) -> Any:
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE__ : Any = nn.Convad(_UpperCAmelCase, _UpperCAmelCase, 1 )
SCREAMING_SNAKE_CASE__ : List[Any] = nn.Convad(_UpperCAmelCase, _UpperCAmelCase, 1 )
SCREAMING_SNAKE_CASE__ : Optional[int] = PoolFormerDropPath(_UpperCAmelCase )
if isinstance(config.hidden_act, _UpperCAmelCase ):
SCREAMING_SNAKE_CASE__ : Optional[int] = ACTaFN[config.hidden_act]
else:
SCREAMING_SNAKE_CASE__ : List[str] = config.hidden_act
def A_ ( self : Optional[Any], _UpperCAmelCase : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = self.conva(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[int] = self.act_fn(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.drop(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : int = self.conva(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.drop(_UpperCAmelCase )
return hidden_states
class lowerCamelCase (nn.Module ):
"""simple docstring"""
def __init__( self : Any, _UpperCAmelCase : Optional[Any], _UpperCAmelCase : List[str], _UpperCAmelCase : List[Any], _UpperCAmelCase : Optional[Any], _UpperCAmelCase : Optional[Any], _UpperCAmelCase : List[str] ) -> List[str]:
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE__ : int = PoolFormerPooling(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Tuple = PoolFormerOutput(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = PoolFormerGroupNorm(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[int] = PoolFormerGroupNorm(_UpperCAmelCase )
# Useful for training neural nets
SCREAMING_SNAKE_CASE__ : Tuple = PoolFormerDropPath(_UpperCAmelCase ) if drop_path > 0.0 else nn.Identity()
SCREAMING_SNAKE_CASE__ : Tuple = config.use_layer_scale
if config.use_layer_scale:
SCREAMING_SNAKE_CASE__ : List[str] = nn.Parameter(
config.layer_scale_init_value * torch.ones((_UpperCAmelCase) ), requires_grad=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = nn.Parameter(
config.layer_scale_init_value * torch.ones((_UpperCAmelCase) ), requires_grad=_UpperCAmelCase )
def A_ ( self : Tuple, _UpperCAmelCase : Any ) -> Optional[Any]:
"""simple docstring"""
if self.use_layer_scale:
SCREAMING_SNAKE_CASE__ : Any = self.pooling(self.before_norm(_UpperCAmelCase ) )
SCREAMING_SNAKE_CASE__ : List[str] = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * pooling_output
# First residual connection
SCREAMING_SNAKE_CASE__ : Optional[int] = hidden_states + self.drop_path(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : int = ()
SCREAMING_SNAKE_CASE__ : str = self.output(self.after_norm(_UpperCAmelCase ) )
SCREAMING_SNAKE_CASE__ : Tuple = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * layer_output
# Second residual connection
SCREAMING_SNAKE_CASE__ : Tuple = hidden_states + self.drop_path(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = (output,) + outputs
return outputs
else:
SCREAMING_SNAKE_CASE__ : Dict = self.drop_path(self.pooling(self.before_norm(_UpperCAmelCase ) ) )
# First residual connection
SCREAMING_SNAKE_CASE__ : Any = pooling_output + hidden_states
SCREAMING_SNAKE_CASE__ : List[str] = ()
# Second residual connection inside the PoolFormerOutput block
SCREAMING_SNAKE_CASE__ : Tuple = self.drop_path(self.output(self.after_norm(_UpperCAmelCase ) ) )
SCREAMING_SNAKE_CASE__ : List[str] = hidden_states + layer_output
SCREAMING_SNAKE_CASE__ : Tuple = (output,) + outputs
return outputs
class lowerCamelCase (nn.Module ):
"""simple docstring"""
def __init__( self : Tuple, _UpperCAmelCase : Dict ) -> Union[str, Any]:
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE__ : Optional[int] = config
# stochastic depth decay rule
SCREAMING_SNAKE_CASE__ : List[Any] = [x.item() for x in torch.linspace(0, config.drop_path_rate, sum(config.depths ) )]
# patch embeddings
SCREAMING_SNAKE_CASE__ : Optional[int] = []
for i in range(config.num_encoder_blocks ):
embeddings.append(
PoolFormerEmbeddings(
patch_size=config.patch_sizes[i], stride=config.strides[i], padding=config.padding[i], num_channels=config.num_channels if i == 0 else config.hidden_sizes[i - 1], hidden_size=config.hidden_sizes[i], ) )
SCREAMING_SNAKE_CASE__ : Dict = nn.ModuleList(_UpperCAmelCase )
# Transformer blocks
SCREAMING_SNAKE_CASE__ : Optional[Any] = []
SCREAMING_SNAKE_CASE__ : Any = 0
for i in range(config.num_encoder_blocks ):
# each block consists of layers
SCREAMING_SNAKE_CASE__ : Dict = []
if i != 0:
cur += config.depths[i - 1]
for j in range(config.depths[i] ):
layers.append(
PoolFormerLayer(
_UpperCAmelCase, num_channels=config.hidden_sizes[i], pool_size=config.pool_size, hidden_size=config.hidden_sizes[i], intermediate_size=int(config.hidden_sizes[i] * config.mlp_ratio ), drop_path=dpr[cur + j], ) )
blocks.append(nn.ModuleList(_UpperCAmelCase ) )
SCREAMING_SNAKE_CASE__ : Any = nn.ModuleList(_UpperCAmelCase )
def A_ ( self : Any, _UpperCAmelCase : int, _UpperCAmelCase : Optional[Any]=False, _UpperCAmelCase : List[str]=True ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = () if output_hidden_states else None
SCREAMING_SNAKE_CASE__ : Optional[Any] = pixel_values
for idx, layers in enumerate(zip(self.patch_embeddings, self.block ) ):
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : Union[str, Any] = layers
# Get patch embeddings from hidden_states
SCREAMING_SNAKE_CASE__ : Tuple = embedding_layer(_UpperCAmelCase )
# Send the embeddings through the blocks
for _, blk in enumerate(_UpperCAmelCase ):
SCREAMING_SNAKE_CASE__ : List[str] = blk(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : str = layer_outputs[0]
if output_hidden_states:
SCREAMING_SNAKE_CASE__ : List[Any] = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, all_hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=_UpperCAmelCase, hidden_states=_UpperCAmelCase )
class lowerCamelCase (__lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase_ = PoolFormerConfig
UpperCAmelCase_ = "poolformer"
UpperCAmelCase_ = "pixel_values"
UpperCAmelCase_ = True
def A_ ( self : Any, _UpperCAmelCase : str ) -> Dict:
"""simple docstring"""
if isinstance(_UpperCAmelCase, (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(_UpperCAmelCase, nn.LayerNorm ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
def A_ ( self : Dict, _UpperCAmelCase : Any, _UpperCAmelCase : Dict=False ) -> str:
"""simple docstring"""
if isinstance(_UpperCAmelCase, _UpperCAmelCase ):
SCREAMING_SNAKE_CASE__ : int = value
_lowerCamelCase : List[str] = r'''
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use
it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`PoolFormerConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
'''
_lowerCamelCase : Tuple = r'''
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`PoolFormerImageProcessor.__call__`] for details.
'''
@add_start_docstrings(
"The bare PoolFormer Model transformer outputting raw hidden-states without any specific head on top." , __lowerCamelCase , )
class lowerCamelCase (__lowerCamelCase ):
"""simple docstring"""
def __init__( self : str, _UpperCAmelCase : List[str] ) -> Any:
"""simple docstring"""
super().__init__(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = config
SCREAMING_SNAKE_CASE__ : int = PoolFormerEncoder(_UpperCAmelCase )
# Initialize weights and apply final processing
self.post_init()
def A_ ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
return self.embeddings.patch_embeddings
@add_start_docstrings_to_model_forward(_UpperCAmelCase )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC, output_type=_UpperCAmelCase, config_class=_CONFIG_FOR_DOC, modality="vision", expected_output=_EXPECTED_OUTPUT_SHAPE, )
def A_ ( self : Dict, _UpperCAmelCase : Optional[torch.FloatTensor] = None, _UpperCAmelCase : Optional[bool] = None, _UpperCAmelCase : Optional[bool] = None, ) -> Union[Tuple, BaseModelOutputWithNoAttention]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
SCREAMING_SNAKE_CASE__ : Tuple = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError("You have to specify pixel_values" )
SCREAMING_SNAKE_CASE__ : List[Any] = self.encoder(
_UpperCAmelCase, output_hidden_states=_UpperCAmelCase, return_dict=_UpperCAmelCase, )
SCREAMING_SNAKE_CASE__ : int = encoder_outputs[0]
if not return_dict:
return (sequence_output, None) + encoder_outputs[1:]
return BaseModelOutputWithNoAttention(
last_hidden_state=_UpperCAmelCase, hidden_states=encoder_outputs.hidden_states, )
class lowerCamelCase (nn.Module ):
"""simple docstring"""
def __init__( self : int, _UpperCAmelCase : Tuple ) -> Optional[int]:
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE__ : Tuple = nn.Linear(config.hidden_size, config.hidden_size )
def A_ ( self : List[Any], _UpperCAmelCase : int ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.dense(_UpperCAmelCase )
return output
@add_start_docstrings(
"\n PoolFormer Model transformer with an image classification head on top\n " , __lowerCamelCase , )
class lowerCamelCase (__lowerCamelCase ):
"""simple docstring"""
def __init__( self : str, _UpperCAmelCase : str ) -> List[str]:
"""simple docstring"""
super().__init__(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = config.num_labels
SCREAMING_SNAKE_CASE__ : Any = PoolFormerModel(_UpperCAmelCase )
# Final norm
SCREAMING_SNAKE_CASE__ : str = PoolFormerGroupNorm(config.hidden_sizes[-1] )
# Classifier head
SCREAMING_SNAKE_CASE__ : int = (
nn.Linear(config.hidden_sizes[-1], config.num_labels ) if config.num_labels > 0 else nn.Identity()
)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(_UpperCAmelCase )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT, output_type=_UpperCAmelCase, config_class=_CONFIG_FOR_DOC, expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT, )
def A_ ( self : Optional[int], _UpperCAmelCase : Optional[torch.FloatTensor] = None, _UpperCAmelCase : Optional[torch.LongTensor] = None, _UpperCAmelCase : Optional[bool] = None, _UpperCAmelCase : Optional[bool] = None, ) -> Union[Tuple, ImageClassifierOutputWithNoAttention]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = return_dict if return_dict is not None else self.config.use_return_dict
SCREAMING_SNAKE_CASE__ : Tuple = self.poolformer(
_UpperCAmelCase, output_hidden_states=_UpperCAmelCase, return_dict=_UpperCAmelCase, )
SCREAMING_SNAKE_CASE__ : Dict = outputs[0]
SCREAMING_SNAKE_CASE__ : Optional[int] = self.classifier(self.norm(_UpperCAmelCase ).mean([-2, -1] ) )
SCREAMING_SNAKE_CASE__ : List[Any] = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
SCREAMING_SNAKE_CASE__ : List[str] = "regression"
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
SCREAMING_SNAKE_CASE__ : Optional[Any] = "single_label_classification"
else:
SCREAMING_SNAKE_CASE__ : Any = "multi_label_classification"
if self.config.problem_type == "regression":
SCREAMING_SNAKE_CASE__ : Tuple = MSELoss()
if self.num_labels == 1:
SCREAMING_SNAKE_CASE__ : Optional[Any] = loss_fct(logits.squeeze(), labels.squeeze() )
else:
SCREAMING_SNAKE_CASE__ : List[str] = loss_fct(_UpperCAmelCase, _UpperCAmelCase )
elif self.config.problem_type == "single_label_classification":
SCREAMING_SNAKE_CASE__ : Optional[Any] = CrossEntropyLoss()
SCREAMING_SNAKE_CASE__ : List[str] = loss_fct(logits.view(-1, self.num_labels ), labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
SCREAMING_SNAKE_CASE__ : List[Any] = BCEWithLogitsLoss()
SCREAMING_SNAKE_CASE__ : str = loss_fct(_UpperCAmelCase, _UpperCAmelCase )
if not return_dict:
SCREAMING_SNAKE_CASE__ : Tuple = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=_UpperCAmelCase, logits=_UpperCAmelCase, hidden_states=outputs.hidden_states )
| 191 |
def _a ( SCREAMING_SNAKE_CASE__ : str ) -> str:
'''simple docstring'''
if not all(char in "01" for char in bin_string ):
raise ValueError("Non-binary value was passed to the function" )
if not bin_string:
raise ValueError("Empty string was passed to the function" )
SCREAMING_SNAKE_CASE__ : List[Any] = ""
while len(SCREAMING_SNAKE_CASE__ ) % 3 != 0:
SCREAMING_SNAKE_CASE__ : str = "0" + bin_string
SCREAMING_SNAKE_CASE__ : List[Any] = [
bin_string[index : index + 3]
for index in range(len(SCREAMING_SNAKE_CASE__ ) )
if index % 3 == 0
]
for bin_group in bin_string_in_3_list:
SCREAMING_SNAKE_CASE__ : List[Any] = 0
for index, val in enumerate(SCREAMING_SNAKE_CASE__ ):
oct_val += int(2 ** (2 - index) * int(SCREAMING_SNAKE_CASE__ ) )
oct_string += str(SCREAMING_SNAKE_CASE__ )
return oct_string
if __name__ == "__main__":
from doctest import testmod
testmod()
| 191 | 1 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.test_utils import execute_subprocess_async
def UpperCAmelCase ( a_=None ) -> Tuple:
"""simple docstring"""
if subparsers is not None:
__A = subparsers.add_parser("test" )
else:
__A = argparse.ArgumentParser("Accelerate test command" )
parser.add_argument(
"--config_file" , default=a_ , help=(
"The path to use to store the config file. Will default to a file named default_config.yaml in the cache "
"location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have "
"such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed "
"with 'huggingface'."
) , )
if subparsers is not None:
parser.set_defaults(func=a_ )
return parser
def UpperCAmelCase ( a_ ) -> Union[str, Any]:
"""simple docstring"""
__A = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ["test_utils", "scripts", "test_script.py"] )
if args.config_file is None:
__A = script_name
else:
__A = F'''--config_file={args.config_file} {script_name}'''
__A = ["accelerate-launch"] + test_args.split()
__A = execute_subprocess_async(a_ , env=os.environ.copy() )
if result.returncode == 0:
print("Test is a success! You are ready for your distributed training!" )
def UpperCAmelCase ( ) -> Tuple:
"""simple docstring"""
__A = test_command_parser()
__A = parser.parse_args()
test_command(a_ )
if __name__ == "__main__":
main()
| 15 |
SCREAMING_SNAKE_CASE :Any = 256
# Modulus to hash a string
SCREAMING_SNAKE_CASE :Union[str, Any] = 100_0003
def UpperCAmelCase ( a_ , a_ ) -> bool:
"""simple docstring"""
__A = len(a_ )
__A = len(a_ )
if p_len > t_len:
return False
__A = 0
__A = 0
__A = 1
# Calculating the hash of pattern and substring of text
for i in range(a_ ):
__A = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus
__A = (ord(text[i] ) + text_hash * alphabet_size) % modulus
if i == p_len - 1:
continue
__A = (modulus_power * alphabet_size) % modulus
for i in range(0 , t_len - p_len + 1 ):
if text_hash == p_hash and text[i : i + p_len] == pattern:
return True
if i == t_len - p_len:
continue
# Calculate the https://en.wikipedia.org/wiki/Rolling_hash
__A = (
(text_hash - ord(text[i] ) * modulus_power) * alphabet_size
+ ord(text[i + p_len] )
) % modulus
return False
def UpperCAmelCase ( ) -> None:
"""simple docstring"""
__A = "abc1abc12"
__A = "alskfjaldsabc1abc1abc12k23adsfabcabc"
__A = "alskfjaldsk23adsfabcabc"
assert rabin_karp(a_ , a_ ) and not rabin_karp(a_ , a_ )
# Test 2)
__A = "ABABX"
__A = "ABABZABABYABABX"
assert rabin_karp(a_ , a_ )
# Test 3)
__A = "AAAB"
__A = "ABAAAAAB"
assert rabin_karp(a_ , a_ )
# Test 4)
__A = "abcdabcy"
__A = "abcxabcdabxabcdabcdabcy"
assert rabin_karp(a_ , a_ )
# Test 5)
__A = "Lü"
__A = "Lüsai"
assert rabin_karp(a_ , a_ )
__A = "Lue"
assert not rabin_karp(a_ , a_ )
print("Success." )
if __name__ == "__main__":
test_rabin_karp()
| 15 | 1 |
"""simple docstring"""
import unittest
from transformers import BertGenerationTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
A__ : List[str] = '▁'
A__ : Any = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
class lowercase__ ( snake_case__, unittest.TestCase ):
_UpperCAmelCase :List[Any] = BertGenerationTokenizer
_UpperCAmelCase :Dict = False
_UpperCAmelCase :Dict = True
def UpperCAmelCase__ ( self : int ):
super().setUp()
lowerCamelCase_ : Optional[Any] =BertGenerationTokenizer(snake_case__ , keep_accents=snake_case__ )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCAmelCase__ ( self : Optional[int] ):
lowerCamelCase_ : str ="<s>"
lowerCamelCase_ : str =1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case__ ) , snake_case__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case__ ) , snake_case__ )
def UpperCAmelCase__ ( self : Optional[Any] ):
lowerCamelCase_ : Optional[int] =list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<unk>" )
self.assertEqual(vocab_keys[1] , "<s>" )
self.assertEqual(vocab_keys[-1] , "<pad>" )
self.assertEqual(len(snake_case__ ) , 1002 )
def UpperCAmelCase__ ( self : Optional[Any] ):
self.assertEqual(self.get_tokenizer().vocab_size , 1000 )
def UpperCAmelCase__ ( self : Union[str, Any] ):
lowerCamelCase_ : Any =BertGenerationTokenizer(snake_case__ , keep_accents=snake_case__ )
lowerCamelCase_ : Tuple =tokenizer.tokenize("This is a test" )
self.assertListEqual(snake_case__ , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(snake_case__ ) , [285, 46, 10, 170, 382] , )
lowerCamelCase_ : int =tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
snake_case__ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
lowerCamelCase_ : List[str] =tokenizer.convert_tokens_to_ids(snake_case__ )
self.assertListEqual(
snake_case__ , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
lowerCamelCase_ : Tuple =tokenizer.convert_ids_to_tokens(snake_case__ )
self.assertListEqual(
snake_case__ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
@cached_property
def UpperCAmelCase__ ( self : Any ):
return BertGenerationTokenizer.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder" )
@slow
def UpperCAmelCase__ ( self : int ):
lowerCamelCase_ : Optional[int] ="Hello World!"
lowerCamelCase_ : List[str] =[1_8536, 2260, 101]
self.assertListEqual(snake_case__ , self.big_tokenizer.encode(snake_case__ ) )
@slow
def UpperCAmelCase__ ( self : List[str] ):
lowerCamelCase_ : List[Any] =(
"This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"
" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"
)
lowerCamelCase_ : List[str] =[
871,
419,
358,
946,
991,
2521,
452,
358,
1357,
387,
7751,
3536,
112,
985,
456,
126,
865,
938,
5400,
5734,
458,
1368,
467,
786,
2462,
5246,
1159,
633,
865,
4519,
457,
582,
852,
2557,
427,
916,
508,
405,
3_4324,
497,
391,
408,
1_1342,
1244,
385,
100,
938,
985,
456,
574,
362,
1_2597,
3200,
3129,
1172,
]
self.assertListEqual(snake_case__ , self.big_tokenizer.encode(snake_case__ ) )
@require_torch
@slow
def UpperCAmelCase__ ( self : str ):
import torch
from transformers import BertGenerationConfig, BertGenerationEncoder
# Build sequence
lowerCamelCase_ : List[str] =list(self.big_tokenizer.get_vocab().keys() )[:10]
lowerCamelCase_ : Any =" ".join(snake_case__ )
lowerCamelCase_ : str =self.big_tokenizer.encode_plus(snake_case__ , return_tensors="pt" , return_token_type_ids=snake_case__ )
lowerCamelCase_ : Union[str, Any] =self.big_tokenizer.batch_encode_plus(
[sequence + " " + sequence] , return_tensors="pt" , return_token_type_ids=snake_case__ )
lowerCamelCase_ : Union[str, Any] =BertGenerationConfig()
lowerCamelCase_ : str =BertGenerationEncoder(snake_case__ )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**snake_case__ )
model(**snake_case__ )
@slow
def UpperCAmelCase__ ( self : int ):
# fmt: off
lowerCamelCase_ : Optional[int] ={"input_ids": [[3_9286, 458, 3_6335, 2001, 456, 1_3073, 1_3266, 455, 113, 7746, 1741, 1_1157, 391, 1_3073, 1_3266, 455, 113, 3967, 3_5412, 113, 4936, 109, 3870, 2377, 113, 3_0084, 4_5720, 458, 134, 1_7496, 112, 503, 1_1672, 113, 118, 112, 5665, 1_3347, 3_8687, 112, 1496, 3_1389, 112, 3268, 4_7264, 134, 962, 112, 1_6377, 8035, 2_3130, 430, 1_2169, 1_5518, 2_8592, 458, 146, 4_1697, 109, 391, 1_2169, 1_5518, 1_6689, 458, 146, 4_1358, 109, 452, 726, 4034, 111, 763, 3_5412, 5082, 388, 1903, 111, 9051, 391, 2870, 4_8918, 1900, 1123, 550, 998, 112, 9586, 1_5985, 455, 391, 410, 2_2955, 3_7636, 114], [448, 1_7496, 419, 3663, 385, 763, 113, 2_7533, 2870, 3283, 1_3043, 1639, 2_4713, 523, 656, 2_4013, 1_8550, 2521, 517, 2_7014, 2_1244, 420, 1212, 1465, 391, 927, 4833, 388, 578, 1_1786, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [484, 2169, 7687, 2_1932, 1_8146, 726, 363, 1_7032, 3391, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=snake_case__ , model_name="google/bert_for_seq_generation_L-24_bbc_encoder" , revision="c817d1fd1be2ffa69431227a1fe320544943d4db" , )
| 209 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ..utils import _LazyModule
A__ : List[str] = {
'config': [
'EXTERNAL_DATA_FORMAT_SIZE_LIMIT',
'OnnxConfig',
'OnnxConfigWithPast',
'OnnxSeq2SeqConfigWithPast',
'PatchingSpec',
],
'convert': ['export', 'validate_model_outputs'],
'features': ['FeaturesManager'],
'utils': ['ParameterFormat', 'compute_serialized_parameters_size'],
}
if TYPE_CHECKING:
from .config import (
EXTERNAL_DATA_FORMAT_SIZE_LIMIT,
OnnxConfig,
OnnxConfigWithPast,
OnnxSeqaSeqConfigWithPast,
PatchingSpec,
)
from .convert import export, validate_model_outputs
from .features import FeaturesManager
from .utils import ParameterFormat, compute_serialized_parameters_size
else:
import sys
A__ : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 209 | 1 |
import numpy as np
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from tensorflow.keras.layers import LSTM, Dense
from tensorflow.keras.models import Sequential
if __name__ == "__main__":
UpperCamelCase = pd.read_csv('''sample_data.csv''', header=None)
UpperCamelCase = df.shape[:1][0]
# If you're using some other dataset input the target column
UpperCamelCase = df.iloc[:, 1:2]
UpperCamelCase = actual_data.values.reshape(len_data, 1)
UpperCamelCase = MinMaxScaler().fit_transform(actual_data)
UpperCamelCase = 10
UpperCamelCase = 5
UpperCamelCase = 20
UpperCamelCase = len_data - periods * look_back
UpperCamelCase = actual_data[:division]
UpperCamelCase = actual_data[division - look_back :]
UpperCamelCase , UpperCamelCase = [], []
UpperCamelCase , UpperCamelCase = [], []
for i in range(0, len(train_data) - forward_days - look_back + 1):
train_x.append(train_data[i : i + look_back])
train_y.append(train_data[i + look_back : i + look_back + forward_days])
for i in range(0, len(test_data) - forward_days - look_back + 1):
test_x.append(test_data[i : i + look_back])
test_y.append(test_data[i + look_back : i + look_back + forward_days])
UpperCamelCase = np.array(train_x)
UpperCamelCase = np.array(test_x)
UpperCamelCase = np.array([list(i.ravel()) for i in train_y])
UpperCamelCase = np.array([list(i.ravel()) for i in test_y])
UpperCamelCase = Sequential()
model.add(LSTM(128, input_shape=(look_back, 1), return_sequences=True))
model.add(LSTM(64, input_shape=(128, 1)))
model.add(Dense(forward_days))
model.compile(loss='''mean_squared_error''', optimizer='''adam''')
UpperCamelCase = model.fit(
x_train, y_train, epochs=150, verbose=1, shuffle=True, batch_size=4
)
UpperCamelCase = model.predict(x_test)
| 87 |
import os
from bleurt import score # From: git+https://github.com/google-research/bleurt.git
import datasets
lowerCAmelCase_ = datasets.logging.get_logger(__name__)
lowerCAmelCase_ = '''\
@inproceedings{bleurt,
title={BLEURT: Learning Robust Metrics for Text Generation},
author={Thibault Sellam and Dipanjan Das and Ankur P. Parikh},
booktitle={ACL},
year={2020},
url={https://arxiv.org/abs/2004.04696}
}
'''
lowerCAmelCase_ = '''\
BLEURT a learnt evaluation metric for Natural Language Generation. It is built using multiple phases of transfer learning starting from a pretrained BERT model (Devlin et al. 2018)
and then employing another pre-training phrase using synthetic data. Finally it is trained on WMT human annotations. You may run BLEURT out-of-the-box or fine-tune
it for your specific application (the latter is expected to perform better).
See the project\'s README at https://github.com/google-research/bleurt#readme for more information.
'''
lowerCAmelCase_ = '''
BLEURT score.
Args:
`predictions` (list of str): prediction/candidate sentences
`references` (list of str): reference sentences
`checkpoint` BLEURT checkpoint. Will default to BLEURT-tiny if None.
Returns:
\'scores\': List of scores.
Examples:
>>> predictions = ["hello there", "general kenobi"]
>>> references = ["hello there", "general kenobi"]
>>> bleurt = datasets.load_metric("bleurt")
>>> results = bleurt.compute(predictions=predictions, references=references)
>>> print([round(v, 2) for v in results["scores"]])
[1.03, 1.04]
'''
lowerCAmelCase_ = {
'''bleurt-tiny-128''': '''https://storage.googleapis.com/bleurt-oss/bleurt-tiny-128.zip''',
'''bleurt-tiny-512''': '''https://storage.googleapis.com/bleurt-oss/bleurt-tiny-512.zip''',
'''bleurt-base-128''': '''https://storage.googleapis.com/bleurt-oss/bleurt-base-128.zip''',
'''bleurt-base-512''': '''https://storage.googleapis.com/bleurt-oss/bleurt-base-512.zip''',
'''bleurt-large-128''': '''https://storage.googleapis.com/bleurt-oss/bleurt-large-128.zip''',
'''bleurt-large-512''': '''https://storage.googleapis.com/bleurt-oss/bleurt-large-512.zip''',
'''BLEURT-20-D3''': '''https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D3.zip''',
'''BLEURT-20-D6''': '''https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D6.zip''',
'''BLEURT-20-D12''': '''https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D12.zip''',
'''BLEURT-20''': '''https://storage.googleapis.com/bleurt-oss-21/BLEURT-20.zip''',
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class __lowerCAmelCase ( datasets.Metric ):
def lowerCamelCase (self ) -> Optional[int]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='''https://github.com/google-research/bleurt''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/google-research/bleurt'''] , reference_urls=['''https://github.com/google-research/bleurt''', '''https://arxiv.org/abs/2004.04696'''] , )
def lowerCamelCase (self , __magic_name__ ) -> List[Any]:
'''simple docstring'''
if self.config_name == "default":
logger.warning(
'''Using default BLEURT-Base checkpoint for sequence maximum length 128. '''
'''You can use a bigger model for better results with e.g.: datasets.load_metric(\'bleurt\', \'bleurt-large-512\').''' )
snake_case_ : Dict = '''bleurt-base-128'''
if self.config_name.lower() in CHECKPOINT_URLS:
snake_case_ : Optional[int] = self.config_name.lower()
elif self.config_name.upper() in CHECKPOINT_URLS:
snake_case_ : Union[str, Any] = self.config_name.upper()
else:
raise KeyError(
F'''{self.config_name} model not found. You should supply the name of a model checkpoint for bleurt in {CHECKPOINT_URLS.keys()}''' )
# download the model checkpoint specified by self.config_name and set up the scorer
snake_case_ : Any = dl_manager.download_and_extract(CHECKPOINT_URLS[checkpoint_name] )
snake_case_ : Dict = score.BleurtScorer(os.path.join(__magic_name__ , __magic_name__ ) )
def lowerCamelCase (self , __magic_name__ , __magic_name__ ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Dict = self.scorer.score(references=__magic_name__ , candidates=__magic_name__ )
return {"scores": scores}
| 279 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
UpperCAmelCase: Any = {
"""configuration_encodec""": [
"""ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""EncodecConfig""",
],
"""feature_extraction_encodec""": ["""EncodecFeatureExtractor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase: int = [
"""ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""EncodecModel""",
"""EncodecPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_encodec import (
ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP,
EncodecConfig,
)
from .feature_extraction_encodec import EncodecFeatureExtractor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encodec import (
ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST,
EncodecModel,
EncodecPreTrainedModel,
)
else:
import sys
UpperCAmelCase: Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 358 |
"""simple docstring"""
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
UpperCAmelCase: Any = logging.get_logger(__name__)
UpperCAmelCase: List[str] = {
"""Salesforce/instruct-blip-flan-t5""": """https://huggingface.co/Salesforce/instruct-blip-flan-t5/resolve/main/config.json""",
}
class UpperCamelCase ( snake_case ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = "instructblip_vision_model"
def __init__( self ,UpperCAmelCase_=14_08 ,UpperCAmelCase_=61_44 ,UpperCAmelCase_=39 ,UpperCAmelCase_=16 ,UpperCAmelCase_=2_24 ,UpperCAmelCase_=14 ,UpperCAmelCase_="gelu" ,UpperCAmelCase_=1E-6 ,UpperCAmelCase_=0.0 ,UpperCAmelCase_=1E-10 ,UpperCAmelCase_=True ,**UpperCAmelCase_ ,):
super().__init__(**UpperCAmelCase_ )
_lowercase : Optional[Any] = hidden_size
_lowercase : Tuple = intermediate_size
_lowercase : List[Any] = num_hidden_layers
_lowercase : Tuple = num_attention_heads
_lowercase : Optional[Any] = patch_size
_lowercase : Optional[Any] = image_size
_lowercase : Union[str, Any] = initializer_range
_lowercase : Optional[Any] = attention_dropout
_lowercase : List[Any] = layer_norm_eps
_lowercase : Optional[int] = hidden_act
_lowercase : Tuple = qkv_bias
@classmethod
def lowerCamelCase__ ( cls ,UpperCAmelCase_ ,**UpperCAmelCase_ ):
cls._set_token_in_kwargs(UpperCAmelCase_ )
_lowercase , _lowercase : List[Any] = cls.get_config_dict(UpperCAmelCase_ ,**UpperCAmelCase_ )
# get the vision config dict if we are loading from InstructBlipConfig
if config_dict.get("""model_type""" ) == "instructblip":
_lowercase : int = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls ,"""model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(UpperCAmelCase_ ,**UpperCAmelCase_ )
class UpperCamelCase ( snake_case ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = "instructblip_qformer"
def __init__( self ,UpperCAmelCase_=3_05_22 ,UpperCAmelCase_=7_68 ,UpperCAmelCase_=12 ,UpperCAmelCase_=12 ,UpperCAmelCase_=30_72 ,UpperCAmelCase_="gelu" ,UpperCAmelCase_=0.1 ,UpperCAmelCase_=0.1 ,UpperCAmelCase_=5_12 ,UpperCAmelCase_=0.02 ,UpperCAmelCase_=1E-12 ,UpperCAmelCase_=0 ,UpperCAmelCase_="absolute" ,UpperCAmelCase_=2 ,UpperCAmelCase_=14_08 ,**UpperCAmelCase_ ,):
super().__init__(pad_token_id=UpperCAmelCase_ ,**UpperCAmelCase_ )
_lowercase : List[Any] = vocab_size
_lowercase : List[Any] = hidden_size
_lowercase : str = num_hidden_layers
_lowercase : List[str] = num_attention_heads
_lowercase : Optional[Any] = hidden_act
_lowercase : int = intermediate_size
_lowercase : Union[str, Any] = hidden_dropout_prob
_lowercase : Optional[Any] = attention_probs_dropout_prob
_lowercase : List[Any] = max_position_embeddings
_lowercase : Tuple = initializer_range
_lowercase : Optional[int] = layer_norm_eps
_lowercase : Any = position_embedding_type
_lowercase : Dict = cross_attention_frequency
_lowercase : Optional[Any] = encoder_hidden_size
@classmethod
def lowerCamelCase__ ( cls ,UpperCAmelCase_ ,**UpperCAmelCase_ ):
cls._set_token_in_kwargs(UpperCAmelCase_ )
_lowercase , _lowercase : Dict = cls.get_config_dict(UpperCAmelCase_ ,**UpperCAmelCase_ )
# get the qformer config dict if we are loading from InstructBlipConfig
if config_dict.get("""model_type""" ) == "instructblip":
_lowercase : str = config_dict["""qformer_config"""]
if "model_type" in config_dict and hasattr(cls ,"""model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(UpperCAmelCase_ ,**UpperCAmelCase_ )
class UpperCamelCase ( snake_case ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = "instructblip"
SCREAMING_SNAKE_CASE_ : List[str] = True
def __init__( self ,UpperCAmelCase_=None ,UpperCAmelCase_=None ,UpperCAmelCase_=None ,UpperCAmelCase_=32 ,**UpperCAmelCase_ ):
super().__init__(**UpperCAmelCase_ )
if vision_config is None:
_lowercase : str = {}
logger.info("""vision_config is None. initializing the InstructBlipVisionConfig with default values.""" )
if qformer_config is None:
_lowercase : Any = {}
logger.info("""qformer_config is None. Initializing the InstructBlipQFormerConfig with default values.""" )
if text_config is None:
_lowercase : Optional[int] = {}
logger.info("""text_config is None. Initializing the text config with default values (`OPTConfig`).""" )
_lowercase : int = InstructBlipVisionConfig(**UpperCAmelCase_ )
_lowercase : Optional[int] = InstructBlipQFormerConfig(**UpperCAmelCase_ )
_lowercase : Dict = text_config["""model_type"""] if """model_type""" in text_config else """opt"""
_lowercase : str = CONFIG_MAPPING[text_model_type](**UpperCAmelCase_ )
_lowercase : str = self.text_config.tie_word_embeddings
_lowercase : Union[str, Any] = self.text_config.is_encoder_decoder
_lowercase : List[str] = num_query_tokens
_lowercase : List[str] = self.vision_config.hidden_size
_lowercase : Dict = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
_lowercase : Union[str, Any] = 1.0
_lowercase : Dict = 0.02
@classmethod
def lowerCamelCase__ ( cls ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,**UpperCAmelCase_ ,):
return cls(
vision_config=vision_config.to_dict() ,qformer_config=qformer_config.to_dict() ,text_config=text_config.to_dict() ,**UpperCAmelCase_ ,)
def lowerCamelCase__ ( self ):
_lowercase : Union[str, Any] = copy.deepcopy(self.__dict__ )
_lowercase : int = self.vision_config.to_dict()
_lowercase : Any = self.qformer_config.to_dict()
_lowercase : Any = self.text_config.to_dict()
_lowercase : Optional[int] = self.__class__.model_type
return output
| 336 | 0 |
'''simple docstring'''
from sklearn.metrics import fa_score
import datasets
UpperCAmelCase_ : Optional[int] = '\nThe F1 score is the harmonic mean of the precision and recall. It can be computed with the equation:\nF1 = 2 * (precision * recall) / (precision + recall)\n'
UpperCAmelCase_ : Tuple = '\nArgs:\n predictions (`list` of `int`): Predicted labels.\n references (`list` of `int`): Ground truth labels.\n labels (`list` of `int`): The set of labels to include when `average` is not set to `\'binary\'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None.\n pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1.\n average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `\'binary\'`.\n\n - \'binary\': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary.\n - \'micro\': Calculate metrics globally by counting the total true positives, false negatives and false positives.\n - \'macro\': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.\n - \'weighted\': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `\'macro\'` to account for label imbalance. This option can result in an F-score that is not between precision and recall.\n - \'samples\': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).\n sample_weight (`list` of `float`): Sample weights Defaults to None.\n\nReturns:\n f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better.\n\nExamples:\n\n Example 1-A simple binary example\n >>> f1_metric = datasets.load_metric("f1")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0])\n >>> print(results)\n {\'f1\': 0.5}\n\n Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`.\n >>> f1_metric = datasets.load_metric("f1")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0)\n >>> print(round(results[\'f1\'], 2))\n 0.67\n\n Example 3-The same simple binary example as in Example 1, but with `sample_weight` included.\n >>> f1_metric = datasets.load_metric("f1")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3])\n >>> print(round(results[\'f1\'], 2))\n 0.35\n\n Example 4-A multiclass example, with different values for the `average` input.\n >>> predictions = [0, 2, 1, 0, 0, 1]\n >>> references = [0, 1, 2, 0, 1, 2]\n >>> results = f1_metric.compute(predictions=predictions, references=references, average="macro")\n >>> print(round(results[\'f1\'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average="micro")\n >>> print(round(results[\'f1\'], 2))\n 0.33\n >>> results = f1_metric.compute(predictions=predictions, references=references, average="weighted")\n >>> print(round(results[\'f1\'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=None)\n >>> print(results)\n {\'f1\': array([0.8, 0. , 0. ])}\n'
UpperCAmelCase_ : Dict = '\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase__ ( datasets.Metric ):
'''simple docstring'''
def UpperCAmelCase_ ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""int32""" ) ),
"""references""": datasets.Sequence(datasets.Value("""int32""" ) ),
}
if self.config_name == """multilabel"""
else {
"""predictions""": datasets.Value("""int32""" ),
"""references""": datasets.Value("""int32""" ),
} ) , reference_urls=["""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html"""] , )
def UpperCAmelCase_ ( self , __snake_case , __snake_case , __snake_case=None , __snake_case=1 , __snake_case="binary" , __snake_case=None ):
_SCREAMING_SNAKE_CASE : Optional[int] = fa_score(
__snake_case , __snake_case , labels=__snake_case , pos_label=__snake_case , average=__snake_case , sample_weight=__snake_case )
return {"f1": float(__snake_case ) if score.size == 1 else score}
| 200 |
'''simple docstring'''
from pickle import UnpicklingError
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict
from ..utils import logging
UpperCAmelCase_ : List[Any] = logging.get_logger(__name__)
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
try:
with open(SCREAMING_SNAKE_CASE__ , """rb""" ) as flax_state_f:
_SCREAMING_SNAKE_CASE : Dict = from_bytes(SCREAMING_SNAKE_CASE__ , flax_state_f.read() )
except UnpicklingError as e:
try:
with open(SCREAMING_SNAKE_CASE__ ) as f:
if f.read().startswith("""version""" ):
raise OSError(
"""You seem to have cloned a repository without having git-lfs installed. Please"""
""" install git-lfs and run `git lfs install` followed by `git lfs pull` in the"""
""" folder you cloned.""" )
else:
raise ValueError from e
except (UnicodeDecodeError, ValueError):
raise EnvironmentError(f"""Unable to convert {model_file} to Flax deserializable object. """ )
return load_flax_weights_in_pytorch_model(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
try:
import torch # noqa: F401
except ImportError:
logger.error(
"""Loading Flax weights in PyTorch requires both PyTorch and Flax to be installed. Please see"""
""" https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation"""
""" instructions.""" )
raise
# check if we have bf16 weights
_SCREAMING_SNAKE_CASE : List[Any] = flatten_dict(jax.tree_util.tree_map(lambda SCREAMING_SNAKE_CASE__ : x.dtype == jnp.bfloataa , SCREAMING_SNAKE_CASE__ ) ).values()
if any(SCREAMING_SNAKE_CASE__ ):
# convert all weights to fp32 if they are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
"""Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` """
"""before loading those in PyTorch model.""" )
_SCREAMING_SNAKE_CASE : Dict = jax.tree_util.tree_map(
lambda SCREAMING_SNAKE_CASE__ : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , SCREAMING_SNAKE_CASE__ )
_SCREAMING_SNAKE_CASE : Optional[Any] = """"""
_SCREAMING_SNAKE_CASE : str = flatten_dict(SCREAMING_SNAKE_CASE__ , sep=""".""" )
_SCREAMING_SNAKE_CASE : str = pt_model.state_dict()
# keep track of unexpected & missing keys
_SCREAMING_SNAKE_CASE : Tuple = []
_SCREAMING_SNAKE_CASE : int = set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
_SCREAMING_SNAKE_CASE : Any = flax_key_tuple.split(""".""" )
if flax_key_tuple_array[-1] == "kernel" and flax_tensor.ndim == 4:
_SCREAMING_SNAKE_CASE : Optional[Any] = flax_key_tuple_array[:-1] + ["""weight"""]
_SCREAMING_SNAKE_CASE : List[str] = jnp.transpose(SCREAMING_SNAKE_CASE__ , (3, 2, 0, 1) )
elif flax_key_tuple_array[-1] == "kernel":
_SCREAMING_SNAKE_CASE : Union[str, Any] = flax_key_tuple_array[:-1] + ["""weight"""]
_SCREAMING_SNAKE_CASE : Any = flax_tensor.T
elif flax_key_tuple_array[-1] == "scale":
_SCREAMING_SNAKE_CASE : Optional[int] = flax_key_tuple_array[:-1] + ["""weight"""]
if "time_embedding" not in flax_key_tuple_array:
for i, flax_key_tuple_string in enumerate(SCREAMING_SNAKE_CASE__ ):
_SCREAMING_SNAKE_CASE : Optional[int] = (
flax_key_tuple_string.replace("""_0""" , """.0""" )
.replace("""_1""" , """.1""" )
.replace("""_2""" , """.2""" )
.replace("""_3""" , """.3""" )
.replace("""_4""" , """.4""" )
.replace("""_5""" , """.5""" )
.replace("""_6""" , """.6""" )
.replace("""_7""" , """.7""" )
.replace("""_8""" , """.8""" )
.replace("""_9""" , """.9""" )
)
_SCREAMING_SNAKE_CASE : Tuple = """.""".join(SCREAMING_SNAKE_CASE__ )
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
f"""Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected """
f"""to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.""" )
else:
# add weight to pytorch dict
_SCREAMING_SNAKE_CASE : Union[str, Any] = np.asarray(SCREAMING_SNAKE_CASE__ ) if not isinstance(SCREAMING_SNAKE_CASE__ , np.ndarray ) else flax_tensor
_SCREAMING_SNAKE_CASE : int = torch.from_numpy(SCREAMING_SNAKE_CASE__ )
# remove from missing keys
missing_keys.remove(SCREAMING_SNAKE_CASE__ )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(SCREAMING_SNAKE_CASE__ )
pt_model.load_state_dict(SCREAMING_SNAKE_CASE__ )
# re-transform missing_keys to list
_SCREAMING_SNAKE_CASE : Optional[Any] = list(SCREAMING_SNAKE_CASE__ )
if len(SCREAMING_SNAKE_CASE__ ) > 0:
logger.warning(
"""Some weights of the Flax model were not used when initializing the PyTorch model"""
f""" {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing"""
f""" {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture"""
""" (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This"""
f""" IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect"""
""" to be exactly identical (e.g. initializing a BertForSequenceClassification model from a"""
""" FlaxBertForSequenceClassification model).""" )
if len(SCREAMING_SNAKE_CASE__ ) > 0:
logger.warning(
f"""Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly"""
f""" initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to"""
""" use it for predictions and inference.""" )
return pt_model
| 200 | 1 |
import csv
from collections import defaultdict
from dataclasses import dataclass, field
from typing import List, Optional
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.ticker import ScalarFormatter
from transformers import HfArgumentParser
def snake_case_ ( lowerCAmelCase_ : str=None , lowerCAmelCase_ : int=None ):
return field(default_factory=lambda: default , metadata=__snake_case )
@dataclass
class lowerCAmelCase :
'''simple docstring'''
_A : List[Any] = field(
metadata={'''help''': '''The csv file to plot.'''} , )
_A : Optional[int] = field(
default=__a , metadata={'''help''': '''Whether to plot along batch size or sequence length. Defaults to sequence length.'''} , )
_A : Tuple = field(
default=__a , metadata={'''help''': '''Whether the csv file has time results or memory results. Defaults to memory results.'''} , )
_A : Union[str, Any] = field(
default=__a , metadata={'''help''': '''Disable logarithmic scale when plotting'''} , )
_A : Dict = field(
default=__a , metadata={
'''help''': '''Whether the csv file has training results or inference results. Defaults to inference results.'''
} , )
_A : str = field(
default=__a , metadata={'''help''': '''Filename under which the plot will be saved. If unused no plot is saved.'''} , )
_A : List[Any] = list_field(
default=__a , metadata={'''help''': '''List of model names that are used instead of the ones in the csv file.'''} )
def snake_case_ ( lowerCAmelCase_ : Optional[int] ):
try:
int(__snake_case )
return True
except ValueError:
return False
def snake_case_ ( lowerCAmelCase_ : List[str] ):
try:
float(__snake_case )
return True
except ValueError:
return False
class lowerCAmelCase :
'''simple docstring'''
def __init__( self : Dict , __a : Optional[Any] ) -> Dict:
"""simple docstring"""
__lowercase : Dict = args
__lowercase : Any = defaultdict(lambda: {"bsz": [], "seq_len": [], "result": {}} )
with open(self.args.csv_file , newline="""""" ) as csv_file:
__lowercase : Dict = csv.DictReader(__a )
for row in reader:
__lowercase : List[Any] = row["""model"""]
self.result_dict[model_name]["bsz"].append(int(row["""batch_size"""] ) )
self.result_dict[model_name]["seq_len"].append(int(row["""sequence_length"""] ) )
if can_convert_to_int(row["""result"""] ):
# value is not None
__lowercase : List[Any] = int(row["""result"""] )
elif can_convert_to_float(row["""result"""] ):
# value is not None
__lowercase : Dict = float(row["""result"""] )
def lowerCAmelCase ( self : List[str] ) -> str:
"""simple docstring"""
__lowercase , __lowercase : Tuple = plt.subplots()
__lowercase : Union[str, Any] = """Time usage""" if self.args.is_time else """Memory usage"""
__lowercase : Any = title_str + """ for training""" if self.args.is_train else title_str + """ for inference"""
if not self.args.no_log_scale:
# set logarithm scales
ax.set_xscale("""log""" )
ax.set_yscale("""log""" )
for axis in [ax.xaxis, ax.yaxis]:
axis.set_major_formatter(ScalarFormatter() )
for model_name_idx, model_name in enumerate(self.result_dict.keys() ):
__lowercase : str = sorted(set(self.result_dict[model_name]["""bsz"""] ) )
__lowercase : Any = sorted(set(self.result_dict[model_name]["""seq_len"""] ) )
__lowercase : Any = self.result_dict[model_name]["""result"""]
((__lowercase) , (__lowercase)) : Tuple = (
(batch_sizes, sequence_lengths) if self.args.plot_along_batch else (sequence_lengths, batch_sizes)
)
__lowercase : Optional[int] = (
model_name if self.args.short_model_names is None else self.args.short_model_names[model_name_idx]
)
for inner_loop_value in inner_loop_array:
if self.args.plot_along_batch:
__lowercase : List[str] = np.asarray(
[results[(x, inner_loop_value)] for x in x_axis_array if (x, inner_loop_value) in results] , dtype=__a , )
else:
__lowercase : List[Any] = np.asarray(
[results[(inner_loop_value, x)] for x in x_axis_array if (inner_loop_value, x) in results] , dtype=np.floataa , )
((__lowercase) , (__lowercase)) : Any = (
("""batch_size""", """len""") if self.args.plot_along_batch else ("""in #tokens""", """bsz""")
)
__lowercase : Optional[Any] = np.asarray(__a , __a )[: len(__a )]
plt.scatter(
__a , __a , label=F"{label_model_name} - {inner_loop_label}: {inner_loop_value}" )
plt.plot(__a , __a , """--""" )
title_str += F" {label_model_name} vs."
__lowercase : List[str] = title_str[:-4]
__lowercase : Optional[int] = """Time in s""" if self.args.is_time else """Memory in MB"""
# plot
plt.title(__a )
plt.xlabel(__a )
plt.ylabel(__a )
plt.legend()
if self.args.figure_png_file is not None:
plt.savefig(self.args.figure_png_file )
else:
plt.show()
def snake_case_ ( ):
__lowercase : int = HfArgumentParser(__snake_case )
__lowercase : int = parser.parse_args_into_dataclasses()[0]
__lowercase : Union[str, Any] = Plot(args=__snake_case )
plot.plot()
if __name__ == "__main__":
main() | 362 |
def snake_case_ ( lowerCAmelCase_ : str , lowerCAmelCase_ : str ):
if len(lowerCAmelCase_ ) != len(lowerCAmelCase_ ):
raise ValueError("""String lengths must match!""" )
__lowercase : str = 0
for chara, chara in zip(lowerCAmelCase_ , lowerCAmelCase_ ):
if chara != chara:
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod() | 306 | 0 |
'''simple docstring'''
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert_fast import BertTokenizerFast
from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer
snake_case_ : List[str] = logging.get_logger(__name__)
snake_case_ : Any = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
snake_case_ : Dict = {
"vocab_file": {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json"
),
},
}
snake_case_ : List[Any] = {
"vocab_file": {
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json"
),
},
}
snake_case_ : str = {
"vocab_file": {
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json"
),
},
}
snake_case_ : Optional[Any] = {
"facebook/dpr-ctx_encoder-single-nq-base": 5_12,
"facebook/dpr-ctx_encoder-multiset-base": 5_12,
}
snake_case_ : int = {
"facebook/dpr-question_encoder-single-nq-base": 5_12,
"facebook/dpr-question_encoder-multiset-base": 5_12,
}
snake_case_ : str = {
"facebook/dpr-reader-single-nq-base": 5_12,
"facebook/dpr-reader-multiset-base": 5_12,
}
snake_case_ : Any = {
"facebook/dpr-ctx_encoder-single-nq-base": {"do_lower_case": True},
"facebook/dpr-ctx_encoder-multiset-base": {"do_lower_case": True},
}
snake_case_ : Union[str, Any] = {
"facebook/dpr-question_encoder-single-nq-base": {"do_lower_case": True},
"facebook/dpr-question_encoder-multiset-base": {"do_lower_case": True},
}
snake_case_ : Any = {
"facebook/dpr-reader-single-nq-base": {"do_lower_case": True},
"facebook/dpr-reader-multiset-base": {"do_lower_case": True},
}
class __a (lowerCamelCase ):
__a : int = VOCAB_FILES_NAMES
__a : int = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
__a : List[str] = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__a : str = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
__a : Union[str, Any] = DPRContextEncoderTokenizer
class __a (lowerCamelCase ):
__a : Optional[Any] = VOCAB_FILES_NAMES
__a : List[str] = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
__a : Dict = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__a : Any = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
__a : int = DPRQuestionEncoderTokenizer
snake_case_ : Dict = collections.namedtuple(
"DPRSpanPrediction", ["span_score", "relevance_score", "doc_id", "start_index", "end_index", "text"]
)
snake_case_ : Optional[Any] = collections.namedtuple("DPRReaderOutput", ["start_logits", "end_logits", "relevance_logits"])
snake_case_ : Optional[int] = r"\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `'tf'`: Return TensorFlow `tf.constant` objects.\n - `'pt'`: Return PyTorch `torch.Tensor` objects.\n - `'np'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer's default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Return:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n "
@add_start_docstrings(lowerCamelCase )
class __a :
def __call__( self : str , __magic_name__ : List[str] , __magic_name__ : Optional[str] = None , __magic_name__ : Optional[str] = None , __magic_name__ : Union[bool, str] = False , __magic_name__ : Union[bool, str] = False , __magic_name__ : Optional[int] = None , __magic_name__ : Optional[Union[str, TensorType]] = None , __magic_name__ : Optional[bool] = None , **__magic_name__ : Dict , ) -> BatchEncoding:
"""simple docstring"""
if titles is None and texts is None:
return super().__call__(
__magic_name__ , padding=__magic_name__ , truncation=__magic_name__ , max_length=__magic_name__ , return_tensors=__magic_name__ , return_attention_mask=__magic_name__ , **__magic_name__ , )
elif titles is None or texts is None:
UpperCAmelCase_ : int = titles if texts is None else texts
return super().__call__(
__magic_name__ , __magic_name__ , padding=__magic_name__ , truncation=__magic_name__ , max_length=__magic_name__ , return_tensors=__magic_name__ , return_attention_mask=__magic_name__ , **__magic_name__ , )
UpperCAmelCase_ : Optional[int] = titles if not isinstance(__magic_name__ , __magic_name__ ) else [titles]
UpperCAmelCase_ : int = texts if not isinstance(__magic_name__ , __magic_name__ ) else [texts]
UpperCAmelCase_ : Union[str, Any] = len(__magic_name__ )
UpperCAmelCase_ : Dict = questions if not isinstance(__magic_name__ , __magic_name__ ) else [questions] * n_passages
assert len(__magic_name__ ) == len(
__magic_name__ ), F"""There should be as many titles than texts but got {len(__magic_name__ )} titles and {len(__magic_name__ )} texts."""
UpperCAmelCase_ : List[Any] = super().__call__(__magic_name__ , __magic_name__ , padding=__magic_name__ , truncation=__magic_name__ )['''input_ids''']
UpperCAmelCase_ : Optional[int] = super().__call__(__magic_name__ , add_special_tokens=__magic_name__ , padding=__magic_name__ , truncation=__magic_name__ )['''input_ids''']
UpperCAmelCase_ : Optional[Any] = {
'''input_ids''': [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(__magic_name__ , __magic_name__ )
]
}
if return_attention_mask is not False:
UpperCAmelCase_ : List[str] = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
UpperCAmelCase_ : Tuple = attention_mask
return self.pad(__magic_name__ , padding=__magic_name__ , max_length=__magic_name__ , return_tensors=__magic_name__ )
def UpperCAmelCase__ ( self : int , __magic_name__ : BatchEncoding , __magic_name__ : DPRReaderOutput , __magic_name__ : int = 16 , __magic_name__ : int = 64 , __magic_name__ : int = 4 , ) -> List[DPRSpanPrediction]:
"""simple docstring"""
UpperCAmelCase_ : Dict = reader_input['''input_ids''']
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Tuple = reader_output[:3]
UpperCAmelCase_ : List[str] = len(__magic_name__ )
UpperCAmelCase_ : str = sorted(range(__magic_name__ ) , reverse=__magic_name__ , key=relevance_logits.__getitem__ )
UpperCAmelCase_ : List[DPRReaderOutput] = []
for doc_id in sorted_docs:
UpperCAmelCase_ : Any = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
UpperCAmelCase_ : Dict = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
UpperCAmelCase_ : Dict = sequence_ids.index(self.pad_token_id )
else:
UpperCAmelCase_ : Any = len(__magic_name__ )
UpperCAmelCase_ : Dict = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=__magic_name__ , top_spans=__magic_name__ , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=__magic_name__ , start_index=__magic_name__ , end_index=__magic_name__ , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(__magic_name__ ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def UpperCAmelCase__ ( self : int , __magic_name__ : List[int] , __magic_name__ : List[int] , __magic_name__ : int , __magic_name__ : int , ) -> List[DPRSpanPrediction]:
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = []
for start_index, start_score in enumerate(__magic_name__ ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
UpperCAmelCase_ : Any = sorted(__magic_name__ , key=lambda __magic_name__ : x[1] , reverse=__magic_name__ )
UpperCAmelCase_ : Optional[int] = []
for (start_index, end_index), score in scores:
assert start_index <= end_index, F"""Wrong span indices: [{start_index}:{end_index}]"""
UpperCAmelCase_ : int = end_index - start_index + 1
assert length <= max_answer_length, F"""Span is too long: {length} > {max_answer_length}"""
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(__magic_name__ ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(lowerCamelCase )
class __a (lowerCamelCase , lowerCamelCase ):
__a : Tuple = VOCAB_FILES_NAMES
__a : List[Any] = READER_PRETRAINED_VOCAB_FILES_MAP
__a : List[Any] = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__a : Optional[int] = READER_PRETRAINED_INIT_CONFIGURATION
__a : Dict = ["input_ids", "attention_mask"]
__a : List[Any] = DPRReaderTokenizer
| 125 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case_ : Any = logging.get_logger(__name__)
snake_case_ : Dict = {
"weiweishi/roc-bert-base-zh": "https://huggingface.co/weiweishi/roc-bert-base-zh/resolve/main/config.json",
}
class __a (lowerCamelCase ):
__a : Tuple = "roc_bert"
def __init__( self : Union[str, Any] , __magic_name__ : List[str]=3_05_22 , __magic_name__ : Tuple=7_68 , __magic_name__ : Any=12 , __magic_name__ : Optional[Any]=12 , __magic_name__ : Union[str, Any]=30_72 , __magic_name__ : Optional[int]="gelu" , __magic_name__ : Optional[int]=0.1 , __magic_name__ : Tuple=0.1 , __magic_name__ : Any=5_12 , __magic_name__ : str=2 , __magic_name__ : Any=0.0_2 , __magic_name__ : Dict=1E-12 , __magic_name__ : int=True , __magic_name__ : Optional[int]=0 , __magic_name__ : str="absolute" , __magic_name__ : Tuple=None , __magic_name__ : Any=True , __magic_name__ : Optional[Any]=True , __magic_name__ : List[str]=7_68 , __magic_name__ : List[Any]=9_10 , __magic_name__ : Tuple=5_12 , __magic_name__ : Dict=2_48_58 , __magic_name__ : Any=True , **__magic_name__ : Union[str, Any] , ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ : List[str] = vocab_size
UpperCAmelCase_ : Any = max_position_embeddings
UpperCAmelCase_ : Dict = hidden_size
UpperCAmelCase_ : int = num_hidden_layers
UpperCAmelCase_ : Optional[Any] = num_attention_heads
UpperCAmelCase_ : Optional[int] = intermediate_size
UpperCAmelCase_ : Dict = hidden_act
UpperCAmelCase_ : Tuple = hidden_dropout_prob
UpperCAmelCase_ : Optional[int] = attention_probs_dropout_prob
UpperCAmelCase_ : Dict = initializer_range
UpperCAmelCase_ : Optional[Any] = type_vocab_size
UpperCAmelCase_ : str = layer_norm_eps
UpperCAmelCase_ : Tuple = use_cache
UpperCAmelCase_ : Optional[int] = enable_pronunciation
UpperCAmelCase_ : Union[str, Any] = enable_shape
UpperCAmelCase_ : List[str] = pronunciation_embed_dim
UpperCAmelCase_ : List[str] = pronunciation_vocab_size
UpperCAmelCase_ : int = shape_embed_dim
UpperCAmelCase_ : Optional[int] = shape_vocab_size
UpperCAmelCase_ : Optional[Any] = concat_input
UpperCAmelCase_ : Dict = position_embedding_type
UpperCAmelCase_ : Union[str, Any] = classifier_dropout
super().__init__(pad_token_id=__magic_name__ , **__magic_name__ )
| 125 | 1 |
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
lowercase_ = logging.getLogger(__name__)
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
return (preds == labels).mean()
@dataclass
class SCREAMING_SNAKE_CASE__ :
A : str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
A : Optional[str] = field(
default=__UpperCamelCase , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
A : Optional[str] = field(
default=__UpperCamelCase , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
A : Optional[str] = field(
default=__UpperCamelCase , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
@dataclass
class SCREAMING_SNAKE_CASE__ :
A : str = field(metadata={"help": "The name of the task to train on: " + ", ".join(processors.keys() )} )
A : str = field(metadata={"help": "Should contain the data files for the task."} )
A : int = field(
default=128 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
A : bool = field(
default=__UpperCamelCase , metadata={"help": "Overwrite the cached training and evaluation sets"} )
def __lowerCAmelCase ( ):
'''simple docstring'''
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__snake_case : Any = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
__snake_case , __snake_case , __snake_case : int = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. Use'''
""" --overwrite_output_dir to overcome.""" )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"""Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s""" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("""Training/evaluation parameters %s""" , __SCREAMING_SNAKE_CASE )
# Set seed
set_seed(training_args.seed )
try:
__snake_case : int = processors[data_args.task_name]()
__snake_case : Optional[Any] = processor.get_labels()
__snake_case : Union[str, Any] = len(__SCREAMING_SNAKE_CASE )
except KeyError:
raise ValueError("""Task not found: %s""" % (data_args.task_name) )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__snake_case : Union[str, Any] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=__SCREAMING_SNAKE_CASE , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , )
__snake_case : List[Any] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
__snake_case : List[Any] = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=__SCREAMING_SNAKE_CASE , cache_dir=model_args.cache_dir , )
# Get datasets
__snake_case : str = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=__SCREAMING_SNAKE_CASE , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
__snake_case : str = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=__SCREAMING_SNAKE_CASE , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def compute_metrics(__SCREAMING_SNAKE_CASE : EvalPrediction ) -> Dict:
__snake_case : int = np.argmax(p.predictions , axis=1 )
return {"acc": simple_accuracy(__SCREAMING_SNAKE_CASE , p.label_ids )}
# Data collator
__snake_case : int = DataCollatorWithPadding(__SCREAMING_SNAKE_CASE , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
__snake_case : List[str] = Trainer(
model=__SCREAMING_SNAKE_CASE , args=__SCREAMING_SNAKE_CASE , train_dataset=__SCREAMING_SNAKE_CASE , eval_dataset=__SCREAMING_SNAKE_CASE , compute_metrics=__SCREAMING_SNAKE_CASE , data_collator=__SCREAMING_SNAKE_CASE , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
__snake_case : List[str] = {}
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
__snake_case : Dict = trainer.evaluate()
__snake_case : int = os.path.join(training_args.output_dir , """eval_results.txt""" )
if trainer.is_world_master():
with open(__SCREAMING_SNAKE_CASE , """w""" ) as writer:
logger.info("""***** Eval results *****""" )
for key, value in result.items():
logger.info(""" %s = %s""" , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
writer.write("""%s = %s\n""" % (key, value) )
results.update(__SCREAMING_SNAKE_CASE )
return results
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 20 | import math
from typing import Optional
import numpy as np
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"facebook/encodec_24khz": "https://huggingface.co/facebook/encodec_24khz/resolve/main/config.json",
"facebook/encodec_48khz": "https://huggingface.co/facebook/encodec_48khz/resolve/main/config.json",
}
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
A : List[Any] = "encodec"
def __init__( self : Tuple , _lowerCAmelCase : Union[str, Any]=[1.5, 3.0, 6.0, 12.0, 24.0] , _lowerCAmelCase : Tuple=2_40_00 , _lowerCAmelCase : List[Any]=1 , _lowerCAmelCase : Optional[int]=False , _lowerCAmelCase : Optional[Any]=None , _lowerCAmelCase : Union[str, Any]=None , _lowerCAmelCase : int=1_28 , _lowerCAmelCase : List[Any]=32 , _lowerCAmelCase : Optional[Any]=1 , _lowerCAmelCase : Union[str, Any]=[8, 5, 4, 2] , _lowerCAmelCase : str="weight_norm" , _lowerCAmelCase : Tuple=7 , _lowerCAmelCase : str=7 , _lowerCAmelCase : Any=3 , _lowerCAmelCase : int=2 , _lowerCAmelCase : str=True , _lowerCAmelCase : Dict="reflect" , _lowerCAmelCase : Tuple=2 , _lowerCAmelCase : List[Any]=2 , _lowerCAmelCase : int=1.0 , _lowerCAmelCase : Optional[int]=10_24 , _lowerCAmelCase : int=None , _lowerCAmelCase : List[str]=True , **_lowerCAmelCase : List[Any] , ):
__snake_case : Optional[int] = target_bandwidths
__snake_case : int = sampling_rate
__snake_case : List[Any] = audio_channels
__snake_case : str = normalize
__snake_case : Union[str, Any] = chunk_length_s
__snake_case : Union[str, Any] = overlap
__snake_case : Union[str, Any] = hidden_size
__snake_case : Union[str, Any] = num_filters
__snake_case : Optional[Any] = num_residual_layers
__snake_case : List[Any] = upsampling_ratios
__snake_case : List[str] = norm_type
__snake_case : Union[str, Any] = kernel_size
__snake_case : Optional[int] = last_kernel_size
__snake_case : Optional[Any] = residual_kernel_size
__snake_case : Dict = dilation_growth_rate
__snake_case : int = use_causal_conv
__snake_case : Tuple = pad_mode
__snake_case : str = compress
__snake_case : Optional[Any] = num_lstm_layers
__snake_case : List[Any] = trim_right_ratio
__snake_case : Any = codebook_size
__snake_case : int = codebook_dim if codebook_dim is not None else hidden_size
__snake_case : int = use_conv_shortcut
if self.norm_type not in ["weight_norm", "time_group_norm"]:
raise ValueError(
f'''self.norm_type must be one of `"weight_norm"`, `"time_group_norm"`), got {self.norm_type}''' )
super().__init__(**_lowerCAmelCase )
@property
def snake_case__ ( self : int ):
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def snake_case__ ( self : int ):
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length ) )
@property
def snake_case__ ( self : Union[str, Any] ):
__snake_case : List[str] = np.prod(self.upsampling_ratios )
return math.ceil(self.sampling_rate / hop_length )
@property
def snake_case__ ( self : Tuple ):
return int(10_00 * self.target_bandwidths[-1] // (self.frame_rate * 10) )
| 20 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
SCREAMING_SNAKE_CASE : List[str] = {
"""configuration_owlvit""": [
"""OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""OwlViTConfig""",
"""OwlViTOnnxConfig""",
"""OwlViTTextConfig""",
"""OwlViTVisionConfig""",
],
"""processing_owlvit""": ["""OwlViTProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : Optional[int] = ["""OwlViTFeatureExtractor"""]
SCREAMING_SNAKE_CASE : Tuple = ["""OwlViTImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : Tuple = [
"""OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""OwlViTModel""",
"""OwlViTPreTrainedModel""",
"""OwlViTTextModel""",
"""OwlViTVisionModel""",
"""OwlViTForObjectDetection""",
]
if TYPE_CHECKING:
from .configuration_owlvit import (
OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OwlViTConfig,
OwlViTOnnxConfig,
OwlViTTextConfig,
OwlViTVisionConfig,
)
from .processing_owlvit import OwlViTProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_owlvit import OwlViTFeatureExtractor
from .image_processing_owlvit import OwlViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_owlvit import (
OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
OwlViTForObjectDetection,
OwlViTModel,
OwlViTPreTrainedModel,
OwlViTTextModel,
OwlViTVisionModel,
)
else:
import sys
SCREAMING_SNAKE_CASE : List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 102 |
'''simple docstring'''
from typing import Dict
import numpy as np
import torch
from . import residue_constants as rc
from .tensor_utils import tensor_tree_map, tree_map
def snake_case_ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Tuple = []
_SCREAMING_SNAKE_CASE : Dict = []
_SCREAMING_SNAKE_CASE : str = []
for rt in rc.restypes:
_SCREAMING_SNAKE_CASE : Optional[int] = rc.restype_name_to_atomaa_names[rc.restype_atoa[rt]]
restype_atomaa_to_atomaa_list.append([(rc.atom_order[name] if name else 0) for name in atom_names] )
_SCREAMING_SNAKE_CASE : Optional[Any] = {name: i for i, name in enumerate(SCREAMING_SNAKE_CASE__ )}
restype_atomaa_to_atomaa_list.append(
[(atom_name_to_idxaa[name] if name in atom_name_to_idxaa else 0) for name in rc.atom_types] )
restype_atomaa_mask_list.append([(1.0 if name else 0.0) for name in atom_names] )
# Add dummy mapping for restype 'UNK'
restype_atomaa_to_atomaa_list.append([0] * 14 )
restype_atomaa_to_atomaa_list.append([0] * 37 )
restype_atomaa_mask_list.append([0.0] * 14 )
_SCREAMING_SNAKE_CASE : Dict = torch.tensor(
SCREAMING_SNAKE_CASE__ , dtype=torch.intaa , device=protein["""aatype"""].device , )
_SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor(
SCREAMING_SNAKE_CASE__ , dtype=torch.intaa , device=protein["""aatype"""].device , )
_SCREAMING_SNAKE_CASE : str = torch.tensor(
SCREAMING_SNAKE_CASE__ , dtype=torch.floataa , device=protein["""aatype"""].device , )
_SCREAMING_SNAKE_CASE : int = protein["""aatype"""].to(torch.long )
# create the mapping for (residx, atom14) --> atom37, i.e. an array
# with shape (num_res, 14) containing the atom37 indices for this protein
_SCREAMING_SNAKE_CASE : List[str] = restype_atomaa_to_atomaa[protein_aatype]
_SCREAMING_SNAKE_CASE : str = restype_atomaa_mask[protein_aatype]
_SCREAMING_SNAKE_CASE : List[Any] = residx_atomaa_mask
_SCREAMING_SNAKE_CASE : List[str] = residx_atomaa_to_atomaa.long()
# create the gather indices for mapping back
_SCREAMING_SNAKE_CASE : int = restype_atomaa_to_atomaa[protein_aatype]
_SCREAMING_SNAKE_CASE : Dict = residx_atomaa_to_atomaa.long()
# create the corresponding mask
_SCREAMING_SNAKE_CASE : str = torch.zeros([21, 37] , dtype=torch.floataa , device=protein["""aatype"""].device )
for restype, restype_letter in enumerate(rc.restypes ):
_SCREAMING_SNAKE_CASE : int = rc.restype_atoa[restype_letter]
_SCREAMING_SNAKE_CASE : Dict = rc.residue_atoms[restype_name]
for atom_name in atom_names:
_SCREAMING_SNAKE_CASE : List[Any] = rc.atom_order[atom_name]
_SCREAMING_SNAKE_CASE : Union[str, Any] = 1
_SCREAMING_SNAKE_CASE : int = restype_atomaa_mask[protein_aatype]
_SCREAMING_SNAKE_CASE : int = residx_atomaa_mask
return protein
def snake_case_ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Optional[Any] = tree_map(lambda SCREAMING_SNAKE_CASE__ : torch.tensor(SCREAMING_SNAKE_CASE__ , device=batch["""aatype"""].device ) , SCREAMING_SNAKE_CASE__ , np.ndarray )
_SCREAMING_SNAKE_CASE : Optional[Any] = tensor_tree_map(lambda SCREAMING_SNAKE_CASE__ : np.array(SCREAMING_SNAKE_CASE__ ) , make_atomaa_masks(SCREAMING_SNAKE_CASE__ ) )
return out
| 200 | 0 |
'''simple docstring'''
import os
from bleurt import score # From: git+https://github.com/google-research/bleurt.git
import datasets
lowerCAmelCase__ : Tuple = datasets.logging.get_logger(__name__)
lowerCAmelCase__ : Tuple = "\\n@inproceedings{bleurt,\n title={BLEURT: Learning Robust Metrics for Text Generation},\n author={Thibault Sellam and Dipanjan Das and Ankur P. Parikh},\n booktitle={ACL},\n year={2020},\n url={https://arxiv.org/abs/2004.04696}\n}\n"
lowerCAmelCase__ : int = "\\nBLEURT a learnt evaluation metric for Natural Language Generation. It is built using multiple phases of transfer learning starting from a pretrained BERT model (Devlin et al. 2018)\nand then employing another pre-training phrase using synthetic data. Finally it is trained on WMT human annotations. You may run BLEURT out-of-the-box or fine-tune\nit for your specific application (the latter is expected to perform better).\n\nSee the project's README at https://github.com/google-research/bleurt#readme for more information.\n"
lowerCAmelCase__ : Dict = "\nBLEURT score.\n\nArgs:\n `predictions` (list of str): prediction/candidate sentences\n `references` (list of str): reference sentences\n `checkpoint` BLEURT checkpoint. Will default to BLEURT-tiny if None.\n\nReturns:\n 'scores': List of scores.\nExamples:\n\n >>> predictions = [\"hello there\", \"general kenobi\"]\n >>> references = [\"hello there\", \"general kenobi\"]\n >>> bleurt = datasets.load_metric(\"bleurt\")\n >>> results = bleurt.compute(predictions=predictions, references=references)\n >>> print([round(v, 2) for v in results[\"scores\"]])\n [1.03, 1.04]\n"
lowerCAmelCase__ : Optional[Any] = {
"bleurt-tiny-128": "https://storage.googleapis.com/bleurt-oss/bleurt-tiny-128.zip",
"bleurt-tiny-512": "https://storage.googleapis.com/bleurt-oss/bleurt-tiny-512.zip",
"bleurt-base-128": "https://storage.googleapis.com/bleurt-oss/bleurt-base-128.zip",
"bleurt-base-512": "https://storage.googleapis.com/bleurt-oss/bleurt-base-512.zip",
"bleurt-large-128": "https://storage.googleapis.com/bleurt-oss/bleurt-large-128.zip",
"bleurt-large-512": "https://storage.googleapis.com/bleurt-oss/bleurt-large-512.zip",
"BLEURT-20-D3": "https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D3.zip",
"BLEURT-20-D6": "https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D6.zip",
"BLEURT-20-D12": "https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D12.zip",
"BLEURT-20": "https://storage.googleapis.com/bleurt-oss-21/BLEURT-20.zip",
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE__ ( datasets.Metric ):
"""simple docstring"""
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="https://github.com/google-research/bleurt" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , codebase_urls=["https://github.com/google-research/bleurt"] , reference_urls=["https://github.com/google-research/bleurt", "https://arxiv.org/abs/2004.04696"] , )
def lowerCamelCase_ ( self : List[Any] , UpperCAmelCase_ : List[str] ):
"""simple docstring"""
# check that config name specifies a valid BLEURT model
if self.config_name == "default":
logger.warning(
"Using default BLEURT-Base checkpoint for sequence maximum length 128. "
"You can use a bigger model for better results with e.g.: datasets.load_metric('bleurt', 'bleurt-large-512')." )
__UpperCAmelCase : str = "bleurt-base-128"
if self.config_name.lower() in CHECKPOINT_URLS:
__UpperCAmelCase : Dict = self.config_name.lower()
elif self.config_name.upper() in CHECKPOINT_URLS:
__UpperCAmelCase : int = self.config_name.upper()
else:
raise KeyError(
f"{self.config_name} model not found. You should supply the name of a model checkpoint for bleurt in {CHECKPOINT_URLS.keys()}" )
# download the model checkpoint specified by self.config_name and set up the scorer
__UpperCAmelCase : Optional[Any] = dl_manager.download_and_extract(CHECKPOINT_URLS[checkpoint_name] )
__UpperCAmelCase : Union[str, Any] = score.BleurtScorer(os.path.join(UpperCAmelCase_ , UpperCAmelCase_ ) )
def lowerCamelCase_ ( self : List[Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Any ):
"""simple docstring"""
__UpperCAmelCase : Dict = self.scorer.score(references=UpperCAmelCase_ , candidates=UpperCAmelCase_ )
return {"scores": scores}
| 37 |
'''simple docstring'''
#
# This a `torch.distributed` diagnostics script that checks that all GPUs in the cluster (one or
# many nodes) can talk to each other via nccl and allocate gpu memory.
#
# To run first adjust the number of processes and nodes:
#
# python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# You may need to add --master_addr $MASTER_ADDR --master_port $MASTER_PORT if using a custom addr:port
#
# You can also use the rdzv API: --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT --rdzv_backend c10d
#
# use torch.distributed.launch instead of torch.distributed.run for torch < 1.9
#
# If you get a hanging in `barrier` calls you have some network issues, you may try to debug this with:
#
# NCCL_DEBUG=INFO python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# which should tell you what's going on behind the scenes.
#
#
# This script can be run via `srun` in the SLURM environment as well. Here is a SLURM script that
# runs on 2 nodes of 4 gpus per node:
#
# #SBATCH --job-name=test-nodes # name
# #SBATCH --nodes=2 # nodes
# #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
# #SBATCH --cpus-per-task=10 # number of cores per tasks
# #SBATCH --gres=gpu:4 # number of gpus
# #SBATCH --time 0:05:00 # maximum execution time (HH:MM:SS)
# #SBATCH --output=%x-%j.out # output file name
#
# GPUS_PER_NODE=4
# MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
# MASTER_PORT=6000
#
# srun --jobid $SLURM_JOBID bash -c 'python -m torch.distributed.run \
# --nproc_per_node $GPUS_PER_NODE --nnodes $SLURM_NNODES --node_rank $SLURM_PROCID \
# --master_addr $MASTER_ADDR --master_port $MASTER_PORT \
# torch-distributed-gpu-test.py'
#
import fcntl
import os
import socket
import torch
import torch.distributed as dist
def __UpperCamelCase ( *_UpperCAmelCase ):
with open(_UpperCAmelCase, "r" ) as fh:
fcntl.flock(_UpperCAmelCase, fcntl.LOCK_EX )
try:
print(*_UpperCAmelCase )
finally:
fcntl.flock(_UpperCAmelCase, fcntl.LOCK_UN )
lowerCAmelCase__ : Dict = int(os.environ["LOCAL_RANK"])
torch.cuda.set_device(local_rank)
lowerCAmelCase__ : Optional[int] = torch.device("cuda", local_rank)
lowerCAmelCase__ : List[str] = socket.gethostname()
lowerCAmelCase__ : Optional[Any] = f"[{hostname}-{local_rank}]"
try:
# test distributed
dist.init_process_group("nccl")
dist.all_reduce(torch.ones(1).to(device), op=dist.ReduceOp.SUM)
dist.barrier()
# test cuda is available and can allocate memory
torch.cuda.is_available()
torch.ones(1).cuda(local_rank)
# global rank
lowerCAmelCase__ : Tuple = dist.get_rank()
lowerCAmelCase__ : Optional[int] = dist.get_world_size()
printflock(f"{gpu} is OK (global rank: {rank}/{world_size})")
dist.barrier()
if rank == 0:
printflock(f"pt={torch.__version__}, cuda={torch.version.cuda}, nccl={torch.cuda.nccl.version()}")
except Exception:
printflock(f"{gpu} is broken")
raise
| 37 | 1 |
"""simple docstring"""
from __future__ import annotations
def _snake_case ( UpperCamelCase : list[int] ):
if len(UpperCamelCase ) == 0:
return array
UpperCAmelCase , UpperCAmelCase : Optional[Any] = min(UpperCamelCase ), max(UpperCamelCase )
# Compute the variables
UpperCAmelCase : Any = _max - _min + 1
UpperCAmelCase , UpperCAmelCase : List[Any] = [0] * holes_range, [0] * holes_range
# Make the sorting.
for i in array:
UpperCAmelCase : Optional[int] = i - _min
UpperCAmelCase : Optional[Any] = i
holes_repeat[index] += 1
# Makes the array back by replacing the numbers.
UpperCAmelCase : int = 0
for i in range(UpperCamelCase ):
while holes_repeat[i] > 0:
UpperCAmelCase : List[str] = holes[i]
index += 1
holes_repeat[i] -= 1
# Returns the sorted array.
return array
if __name__ == "__main__":
import doctest
doctest.testmod()
A: List[str] = input("Enter numbers separated by comma:\n")
A: Tuple = [int(x) for x in user_input.split(",")]
print(pigeon_sort(unsorted))
| 109 |
import math
def snake_case_ ( snake_case , snake_case ) -> float:
return math.pow(snake_case , 2 ) - a
def snake_case_ ( snake_case ) -> float:
return 2 * x
def snake_case_ ( snake_case ) -> float:
lowercase__: Dict = 2.0
while start <= a:
lowercase__: str = math.pow(snake_case , 2 )
return start
def snake_case_ ( snake_case , snake_case = 99_99 , snake_case = 0.0_0_0_0_0_0_0_0_0_0_0_0_0_1 ) -> float:
if a < 0:
raise ValueError('math domain error' )
lowercase__: Tuple = get_initial_point(snake_case )
for _ in range(snake_case ):
lowercase__: List[Any] = value
lowercase__: Any = value - fx(snake_case , snake_case ) / fx_derivative(snake_case )
if abs(prev_value - value ) < tolerance:
return value
return value
if __name__ == "__main__":
from doctest import testmod
testmod()
| 196 | 0 |
'''simple docstring'''
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import numpy as np
import pandas as pd
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
BartForSequenceClassification,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
TapexTokenizer,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("""4.17.0.dev0""")
require_version("""datasets>=1.8.0""", """To fix: pip install -r examples/pytorch/text-classification/requirements.txt""")
a : Tuple = logging.getLogger(__name__)
@dataclass
class UpperCamelCase_ :
lowercase = field(
default='tab_fact' , metadata={'help': 'The name of the dataset to use (via the datasets library).'} )
lowercase = field(
default='tab_fact' , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} , )
lowercase = field(
default=1_024 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
lowercase = field(
default=__magic_name__ , metadata={'help': 'Overwrite the cached preprocessed datasets or not.'} )
lowercase = field(
default=__magic_name__ , metadata={
'help': (
'Whether to pad all samples to `max_seq_length`. '
'If False, will pad the samples dynamically when batching to the maximum length in the batch.'
)
} , )
lowercase = field(
default=__magic_name__ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
lowercase = field(
default=__magic_name__ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
lowercase = field(
default=__magic_name__ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of prediction examples to this '
'value if set.'
)
} , )
lowercase = field(
default=__magic_name__ , metadata={'help': 'A csv or a json file containing the training data.'} )
lowercase = field(
default=__magic_name__ , metadata={'help': 'A csv or a json file containing the validation data.'} )
lowercase = field(default=__magic_name__ , metadata={'help': 'A csv or a json file containing the test data.'} )
def _lowercase( self ) -> str:
if self.dataset_name is not None:
pass
elif self.train_file is None or self.validation_file is None:
raise ValueError("""Need either a GLUE task, a training/validation file or a dataset name.""" )
else:
UpperCAmelCase : Optional[int] = self.train_file.split(""".""" )[-1]
assert train_extension in ["csv", "json"], "`train_file` should be a csv or a json file."
UpperCAmelCase : List[str] = self.validation_file.split(""".""" )[-1]
assert (
validation_extension == train_extension
), "`validation_file` should have the same extension (csv or json) as `train_file`."
@dataclass
class UpperCamelCase_ :
lowercase = field(
default=__magic_name__ , metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
lowercase = field(
default=__magic_name__ , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
lowercase = field(
default=__magic_name__ , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
lowercase = field(
default=__magic_name__ , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
lowercase = field(
default=__magic_name__ , metadata={'help': 'Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'} , )
lowercase = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
lowercase = field(
default=__magic_name__ , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
def __lowerCamelCase ( ) -> str:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
UpperCAmelCase : Optional[int] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : List[Any] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : int = parser.parse_args_into_dataclasses()
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
UpperCAmelCase : int = training_args.get_process_log_level()
logger.setLevel(_lowercase )
datasets.utils.logging.set_verbosity(_lowercase )
transformers.utils.logging.set_verbosity(_lowercase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(F'''Training/evaluation parameters {training_args}''' )
# Detecting last checkpoint.
UpperCAmelCase : Tuple = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
UpperCAmelCase : Optional[Any] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below)
# or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub).
#
# For JSON files, this script will use the `question` column for the input question and `table` column for the corresponding table.
#
# If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this
# single column. You can easily tweak this behavior (see below)
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
UpperCAmelCase : str = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from your local files.
# CSV/JSON training and evaluation files are needed.
UpperCAmelCase : Tuple = {"""train""": data_args.train_file, """validation""": data_args.validation_file}
# Get the test dataset: you can provide your own CSV/JSON test file (see below)
# when you use `do_predict` without specifying a GLUE benchmark task.
if training_args.do_predict:
if data_args.test_file is not None:
UpperCAmelCase : Any = data_args.train_file.split(""".""" )[-1]
UpperCAmelCase : Tuple = data_args.test_file.split(""".""" )[-1]
assert (
test_extension == train_extension
), "`test_file` should have the same extension (csv or json) as `train_file`."
UpperCAmelCase : int = data_args.test_file
else:
raise ValueError("""Need either a GLUE task or a test file for `do_predict`.""" )
for key in data_files.keys():
logger.info(F'''load a local file for {key}: {data_files[key]}''' )
if data_args.train_file.endswith(""".csv""" ):
# Loading a dataset from local csv files
UpperCAmelCase : str = load_dataset("""csv""" , data_files=_lowercase , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from local json files
UpperCAmelCase : Optional[int] = load_dataset("""json""" , data_files=_lowercase , cache_dir=model_args.cache_dir )
# See more about loading any type of standard or custom dataset at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Labels
UpperCAmelCase : Optional[int] = raw_datasets["""train"""].features["""label"""].names
UpperCAmelCase : Union[str, Any] = len(_lowercase )
# Load pretrained model and tokenizer
#
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
UpperCAmelCase : int = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=_lowercase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# load tapex tokenizer
UpperCAmelCase : int = TapexTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , add_prefix_space=_lowercase , )
UpperCAmelCase : str = BartForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=_lowercase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Padding strategy
if data_args.pad_to_max_length:
UpperCAmelCase : List[Any] = """max_length"""
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
UpperCAmelCase : Union[str, Any] = False
# Some models have set the order of the labels to use, so let's make sure we do use it.
UpperCAmelCase : Any = {"""Refused""": 0, """Entailed""": 1}
UpperCAmelCase : str = {0: """Refused""", 1: """Entailed"""}
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F'''The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the'''
F'''model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.''' )
UpperCAmelCase : Any = min(data_args.max_seq_length , tokenizer.model_max_length )
def preprocess_tabfact_function(_lowercase ):
# Tokenize the texts
def _convert_table_text_to_pandas(_lowercase ):
UpperCAmelCase : Optional[int] = [_table_row.split("""#""" ) for _table_row in _table_text.strip("""\n""" ).split("""\n""" )]
UpperCAmelCase : Optional[int] = pd.DataFrame.from_records(_table_content[1:] , columns=_table_content[0] )
return _table_pd
UpperCAmelCase : Optional[Any] = examples["""statement"""]
UpperCAmelCase : Any = list(map(_convert_table_text_to_pandas , examples["""table_text"""] ) )
UpperCAmelCase : int = tokenizer(_lowercase , _lowercase , padding=_lowercase , max_length=_lowercase , truncation=_lowercase )
UpperCAmelCase : Optional[Any] = examples["""label"""]
return result
with training_args.main_process_first(desc="""dataset map pre-processing""" ):
UpperCAmelCase : Optional[Any] = raw_datasets.map(
_lowercase , batched=_lowercase , load_from_cache_file=not data_args.overwrite_cache , desc="""Running tokenizer on dataset""" , )
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError("""--do_train requires a train dataset""" )
UpperCAmelCase : Any = raw_datasets["""train"""]
if data_args.max_train_samples is not None:
UpperCAmelCase : Any = train_dataset.select(range(data_args.max_train_samples ) )
if training_args.do_eval:
if "validation" not in raw_datasets and "validation_matched" not in raw_datasets:
raise ValueError("""--do_eval requires a validation dataset""" )
UpperCAmelCase : Union[str, Any] = raw_datasets["""validation"""]
if data_args.max_eval_samples is not None:
UpperCAmelCase : List[str] = eval_dataset.select(range(data_args.max_eval_samples ) )
if training_args.do_predict or data_args.test_file is not None:
if "test" not in raw_datasets and "test_matched" not in raw_datasets:
raise ValueError("""--do_predict requires a test dataset""" )
UpperCAmelCase : List[str] = raw_datasets["""test"""]
if data_args.max_predict_samples is not None:
UpperCAmelCase : Union[str, Any] = predict_dataset.select(range(data_args.max_predict_samples ) )
# Log a few random samples from the training set:
if training_args.do_train:
for index in random.sample(range(len(_lowercase ) ) , 3 ):
logger.info(F'''Sample {index} of the training set: {train_dataset[index]}.''' )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(_lowercase ):
UpperCAmelCase : Optional[Any] = p.predictions[0] if isinstance(p.predictions , _lowercase ) else p.predictions
UpperCAmelCase : Any = np.argmax(_lowercase , axis=1 )
return {"accuracy": (preds == p.label_ids).astype(np.floataa ).mean().item()}
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
UpperCAmelCase : Tuple = default_data_collator
elif training_args.fpaa:
UpperCAmelCase : Tuple = DataCollatorWithPadding(_lowercase , pad_to_multiple_of=8 )
else:
UpperCAmelCase : Dict = None
# Initialize our Trainer
UpperCAmelCase : int = Trainer(
model=_lowercase , args=_lowercase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=_lowercase , tokenizer=_lowercase , data_collator=_lowercase , )
# Training
if training_args.do_train:
UpperCAmelCase : Optional[int] = None
if training_args.resume_from_checkpoint is not None:
UpperCAmelCase : Optional[int] = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
UpperCAmelCase : Dict = last_checkpoint
UpperCAmelCase : str = trainer.train(resume_from_checkpoint=_lowercase )
UpperCAmelCase : List[Any] = train_result.metrics
UpperCAmelCase : Optional[Any] = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(_lowercase )
)
UpperCAmelCase : Dict = min(_lowercase , len(_lowercase ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics("""train""" , _lowercase )
trainer.save_metrics("""train""" , _lowercase )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
UpperCAmelCase : Union[str, Any] = trainer.evaluate(eval_dataset=_lowercase )
UpperCAmelCase : Any = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(_lowercase )
UpperCAmelCase : Any = min(_lowercase , len(_lowercase ) )
trainer.log_metrics("""eval""" , _lowercase )
trainer.save_metrics("""eval""" , _lowercase )
if training_args.do_predict:
logger.info("""*** Predict ***""" )
# Removing the `label` columns because it contains -1 and Trainer won't like that.
UpperCAmelCase : Tuple = predict_dataset.remove_columns("""label""" )
UpperCAmelCase : List[str] = trainer.predict(_lowercase , metric_key_prefix="""predict""" ).predictions
UpperCAmelCase : Dict = np.argmax(_lowercase , axis=1 )
UpperCAmelCase : Any = os.path.join(training_args.output_dir , """predict_results_tabfact.txt""" )
if trainer.is_world_process_zero():
with open(_lowercase , """w""" ) as writer:
logger.info("""***** Predict Results *****""" )
writer.write("""index\tprediction\n""" )
for index, item in enumerate(_lowercase ):
UpperCAmelCase : Tuple = label_list[item]
writer.write(F'''{index}\t{item}\n''' )
UpperCAmelCase : Any = {"""finetuned_from""": model_args.model_name_or_path, """tasks""": """text-classification"""}
if training_args.push_to_hub:
trainer.push_to_hub(**_lowercase )
else:
trainer.create_model_card(**_lowercase )
def __lowerCamelCase ( _lowercase ) -> str:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 338 |
'''simple docstring'''
import copy
from collections import OrderedDict
from typing import Dict, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
a : Union[str, Any] = logging.get_logger(__name__)
a : Union[str, Any] = {
"""facebook/detr-resnet-50""": """https://huggingface.co/facebook/detr-resnet-50/resolve/main/config.json""",
# See all DETR models at https://huggingface.co/models?filter=detr
}
class UpperCamelCase_ ( __magic_name__ ):
lowercase = 'detr'
lowercase = ['past_key_values']
lowercase = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self , A=True , A=None , A=3 , A=100 , A=6 , A=2048 , A=8 , A=6 , A=2048 , A=8 , A=0.0 , A=0.0 , A=True , A="relu" , A=256 , A=0.1 , A=0.0 , A=0.0 , A=0.0_2 , A=1.0 , A=False , A="sine" , A="resnet50" , A=True , A=False , A=1 , A=5 , A=2 , A=1 , A=1 , A=5 , A=2 , A=0.1 , **A , ) -> List[str]:
if backbone_config is not None and use_timm_backbone:
raise ValueError("""You can't specify both `backbone_config` and `use_timm_backbone`.""" )
if not use_timm_backbone:
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
UpperCAmelCase : Optional[Any] = CONFIG_MAPPING["""resnet"""](out_features=["""stage4"""] )
elif isinstance(A , A ):
UpperCAmelCase : Any = backbone_config.get("""model_type""" )
UpperCAmelCase : int = CONFIG_MAPPING[backbone_model_type]
UpperCAmelCase : List[Any] = config_class.from_dict(A )
# set timm attributes to None
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Any = None, None, None
UpperCAmelCase : Dict = use_timm_backbone
UpperCAmelCase : Any = backbone_config
UpperCAmelCase : List[Any] = num_channels
UpperCAmelCase : int = num_queries
UpperCAmelCase : List[str] = d_model
UpperCAmelCase : Tuple = encoder_ffn_dim
UpperCAmelCase : Optional[Any] = encoder_layers
UpperCAmelCase : Any = encoder_attention_heads
UpperCAmelCase : Optional[Any] = decoder_ffn_dim
UpperCAmelCase : Optional[int] = decoder_layers
UpperCAmelCase : Any = decoder_attention_heads
UpperCAmelCase : str = dropout
UpperCAmelCase : Tuple = attention_dropout
UpperCAmelCase : Dict = activation_dropout
UpperCAmelCase : Tuple = activation_function
UpperCAmelCase : List[Any] = init_std
UpperCAmelCase : str = init_xavier_std
UpperCAmelCase : List[Any] = encoder_layerdrop
UpperCAmelCase : int = decoder_layerdrop
UpperCAmelCase : List[Any] = encoder_layers
UpperCAmelCase : Union[str, Any] = auxiliary_loss
UpperCAmelCase : str = position_embedding_type
UpperCAmelCase : Union[str, Any] = backbone
UpperCAmelCase : List[str] = use_pretrained_backbone
UpperCAmelCase : Optional[int] = dilation
# Hungarian matcher
UpperCAmelCase : Union[str, Any] = class_cost
UpperCAmelCase : Optional[Any] = bbox_cost
UpperCAmelCase : List[Any] = giou_cost
# Loss coefficients
UpperCAmelCase : int = mask_loss_coefficient
UpperCAmelCase : Optional[int] = dice_loss_coefficient
UpperCAmelCase : Dict = bbox_loss_coefficient
UpperCAmelCase : Any = giou_loss_coefficient
UpperCAmelCase : Any = eos_coefficient
super().__init__(is_encoder_decoder=A , **A )
@property
def _lowercase( self ) -> int:
return self.encoder_attention_heads
@property
def _lowercase( self ) -> int:
return self.d_model
@classmethod
def _lowercase( cls , A , **A ) -> Dict:
return cls(backbone_config=A , **A )
def _lowercase( self ) -> Dict[str, any]:
UpperCAmelCase : Any = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
UpperCAmelCase : Any = self.backbone_config.to_dict()
UpperCAmelCase : Optional[Any] = self.__class__.model_type
return output
class UpperCamelCase_ ( __magic_name__ ):
lowercase = version.parse('1.11' )
@property
def _lowercase( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
("""pixel_mask""", {0: """batch"""}),
] )
@property
def _lowercase( self ) -> float:
return 1e-5
@property
def _lowercase( self ) -> int:
return 12
| 338 | 1 |
'''simple docstring'''
def A__ ( UpperCAmelCase_ , UpperCAmelCase_ ):
if not isinstance(A__ , A__ ):
raise ValueError('iterations must be defined as integers' )
if not isinstance(A__ , A__ ) or not number >= 1:
raise ValueError(
'starting number must be\n and integer and be more than 0' )
if not iterations >= 1:
raise ValueError('Iterations must be done more than 0 times to play FizzBuzz' )
_UpperCamelCase : List[Any] = ''
while number <= iterations:
if number % 3 == 0:
out += "Fizz"
if number % 5 == 0:
out += "Buzz"
if 0 not in (number % 3, number % 5):
out += str(A__ )
# print(out)
number += 1
out += " "
return out
if __name__ == "__main__":
import doctest
doctest.testmod()
| 83 |
'''simple docstring'''
from io import BytesIO
from typing import List, Union
import requests
from ..utils import add_end_docstrings, is_decord_available, is_torch_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_decord_available():
import numpy as np
from decord import VideoReader
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
_lowerCamelCase : Any = logging.get_logger(__name__)
@add_end_docstrings(_a )
class SCREAMING_SNAKE_CASE ( _a ):
"""simple docstring"""
def __init__( self : Any , *UpperCamelCase__ : Dict , **UpperCamelCase__ : Union[str, Any] ):
"""simple docstring"""
super().__init__(*UpperCamelCase__ , **UpperCamelCase__ )
requires_backends(self , 'decord' )
self.check_model_type(UpperCamelCase__ )
def A ( self : Optional[int] , UpperCamelCase__ : Optional[int]=None , UpperCamelCase__ : Optional[Any]=None , UpperCamelCase__ : Optional[Any]=None ):
"""simple docstring"""
UpperCamelCase = {}
if frame_sampling_rate is not None:
UpperCamelCase = frame_sampling_rate
if num_frames is not None:
UpperCamelCase = num_frames
UpperCamelCase = {}
if top_k is not None:
UpperCamelCase = top_k
return preprocess_params, {}, postprocess_params
def __call__( self : List[str] , UpperCamelCase__ : Union[str, List[str]] , **UpperCamelCase__ : Dict ):
"""simple docstring"""
return super().__call__(UpperCamelCase__ , **UpperCamelCase__ )
def A ( self : Tuple , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Tuple=None , UpperCamelCase__ : Tuple=1 ):
"""simple docstring"""
if num_frames is None:
UpperCamelCase = self.model.config.num_frames
if video.startswith('http://' ) or video.startswith('https://' ):
UpperCamelCase = BytesIO(requests.get(UpperCamelCase__ ).content )
UpperCamelCase = VideoReader(UpperCamelCase__ )
videoreader.seek(0 )
UpperCamelCase = 0
UpperCamelCase = num_frames * frame_sampling_rate - 1
UpperCamelCase = np.linspace(UpperCamelCase__ , UpperCamelCase__ , num=UpperCamelCase__ , dtype=np.intaa )
UpperCamelCase = videoreader.get_batch(UpperCamelCase__ ).asnumpy()
UpperCamelCase = list(UpperCamelCase__ )
UpperCamelCase = self.image_processor(UpperCamelCase__ , return_tensors=self.framework )
return model_inputs
def A ( self : Union[str, Any] , UpperCamelCase__ : List[str] ):
"""simple docstring"""
UpperCamelCase = self.model(**UpperCamelCase__ )
return model_outputs
def A ( self : int , UpperCamelCase__ : str , UpperCamelCase__ : List[Any]=5 ):
"""simple docstring"""
if top_k > self.model.config.num_labels:
UpperCamelCase = self.model.config.num_labels
if self.framework == "pt":
UpperCamelCase = model_outputs.logits.softmax(-1 )[0]
UpperCamelCase , UpperCamelCase = probs.topk(UpperCamelCase__ )
else:
raise ValueError(f"""Unsupported framework: {self.framework}""" )
UpperCamelCase = scores.tolist()
UpperCamelCase = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(UpperCamelCase__ , UpperCamelCase__ )]
| 28 | 0 |
'''simple docstring'''
import torch
from diffusers import DDIMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class __UpperCamelCase ( lowerCamelCase__ ):
lowercase : List[Any] =(DDIMParallelScheduler,)
lowercase : List[Any] =(('eta', 0.0), ('num_inference_steps', 50))
def lowercase__ ( self, **lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ ={
'''num_train_timesteps''': 1_000,
'''beta_start''': 0.0_0_0_1,
'''beta_end''': 0.0_2,
'''beta_schedule''': '''linear''',
'''clip_sample''': True,
}
config.update(**lowerCAmelCase )
return config
def lowercase__ ( self, **lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =self.scheduler_classes[0]
lowerCamelCase_ =self.get_scheduler_config(**lowerCAmelCase )
lowerCamelCase_ =scheduler_class(**lowerCAmelCase )
lowerCamelCase_, lowerCamelCase_ =10, 0.0
lowerCamelCase_ =self.dummy_model()
lowerCamelCase_ =self.dummy_sample_deter
scheduler.set_timesteps(lowerCAmelCase )
for t in scheduler.timesteps:
lowerCamelCase_ =model(lowerCAmelCase, lowerCAmelCase )
lowerCamelCase_ =scheduler.step(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ).prev_sample
return sample
def lowercase__ ( self ):
"""simple docstring"""
for timesteps in [100, 500, 1_000]:
self.check_over_configs(num_train_timesteps=lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=lowerCAmelCase )
lowerCamelCase_ =self.scheduler_classes[0]
lowerCamelCase_ =self.get_scheduler_config(steps_offset=1 )
lowerCamelCase_ =scheduler_class(**lowerCAmelCase )
scheduler.set_timesteps(5 )
assert torch.equal(scheduler.timesteps, torch.LongTensor([801, 601, 401, 201, 1] ) )
def lowercase__ ( self ):
"""simple docstring"""
for beta_start, beta_end in zip([0.0_0_0_1, 0.0_0_1, 0.0_1, 0.1], [0.0_0_2, 0.0_2, 0.2, 2] ):
self.check_over_configs(beta_start=lowerCAmelCase, beta_end=lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
for timestep_spacing in ["trailing", "leading"]:
self.check_over_configs(timestep_spacing=lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
for rescale_betas_zero_snr in [True, False]:
self.check_over_configs(rescale_betas_zero_snr=lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
self.check_over_configs(thresholding=lowerCAmelCase )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(
thresholding=lowerCAmelCase, prediction_type=lowerCAmelCase, sample_max_value=lowerCAmelCase, )
def lowercase__ ( self ):
"""simple docstring"""
for t in [1, 10, 49]:
self.check_over_forward(time_step=lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
for t, num_inference_steps in zip([1, 10, 50], [10, 50, 500] ):
self.check_over_forward(time_step=lowerCAmelCase, num_inference_steps=lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
for t, eta in zip([1, 10, 49], [0.0, 0.5, 1.0] ):
self.check_over_forward(time_step=lowerCAmelCase, eta=lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.scheduler_classes[0]
lowerCamelCase_ =self.get_scheduler_config()
lowerCamelCase_ =scheduler_class(**lowerCAmelCase )
assert torch.sum(torch.abs(scheduler._get_variance(0, 0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(420, 400 ) - 0.1_4_7_7_1 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(980, 960 ) - 0.3_2_4_6_0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(0, 0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487, 486 ) - 0.0_0_9_7_9 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999, 998 ) - 0.0_2 ) ) < 1e-5
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.scheduler_classes[0]
lowerCamelCase_ =self.get_scheduler_config()
lowerCamelCase_ =scheduler_class(**lowerCAmelCase )
lowerCamelCase_, lowerCamelCase_ =10, 0.0
scheduler.set_timesteps(lowerCAmelCase )
lowerCamelCase_ =self.dummy_model()
lowerCamelCase_ =self.dummy_sample_deter
lowerCamelCase_ =self.dummy_sample_deter + 0.1
lowerCamelCase_ =self.dummy_sample_deter - 0.1
lowerCamelCase_ =samplea.shape[0]
lowerCamelCase_ =torch.stack([samplea, samplea, samplea], dim=0 )
lowerCamelCase_ =torch.arange(lowerCAmelCase )[0:3, None].repeat(1, lowerCAmelCase )
lowerCamelCase_ =model(samples.flatten(0, 1 ), timesteps.flatten(0, 1 ) )
lowerCamelCase_ =scheduler.batch_step_no_noise(lowerCAmelCase, timesteps.flatten(0, 1 ), samples.flatten(0, 1 ), lowerCAmelCase )
lowerCamelCase_ =torch.sum(torch.abs(lowerCAmelCase ) )
lowerCamelCase_ =torch.mean(torch.abs(lowerCAmelCase ) )
assert abs(result_sum.item() - 1_1_4_7.7_9_0_4 ) < 1e-2
assert abs(result_mean.item() - 0.4_9_8_2 ) < 1e-3
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.full_loop()
lowerCamelCase_ =torch.sum(torch.abs(lowerCAmelCase ) )
lowerCamelCase_ =torch.mean(torch.abs(lowerCAmelCase ) )
assert abs(result_sum.item() - 1_7_2.0_0_6_7 ) < 1e-2
assert abs(result_mean.item() - 0.2_2_3_9_6_7 ) < 1e-3
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.full_loop(prediction_type='''v_prediction''' )
lowerCamelCase_ =torch.sum(torch.abs(lowerCAmelCase ) )
lowerCamelCase_ =torch.mean(torch.abs(lowerCAmelCase ) )
assert abs(result_sum.item() - 5_2.5_3_0_2 ) < 1e-2
assert abs(result_mean.item() - 0.0_6_8_4 ) < 1e-3
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.full_loop(set_alpha_to_one=lowerCAmelCase, beta_start=0.0_1 )
lowerCamelCase_ =torch.sum(torch.abs(lowerCAmelCase ) )
lowerCamelCase_ =torch.mean(torch.abs(lowerCAmelCase ) )
assert abs(result_sum.item() - 1_4_9.8_2_9_5 ) < 1e-2
assert abs(result_mean.item() - 0.1_9_5_1 ) < 1e-3
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.full_loop(set_alpha_to_one=lowerCAmelCase, beta_start=0.0_1 )
lowerCamelCase_ =torch.sum(torch.abs(lowerCAmelCase ) )
lowerCamelCase_ =torch.mean(torch.abs(lowerCAmelCase ) )
assert abs(result_sum.item() - 1_4_9.0_7_8_4 ) < 1e-2
assert abs(result_mean.item() - 0.1_9_4_1 ) < 1e-3
| 6 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
a_ : Any = {
"""configuration_trocr""": ["""TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TrOCRConfig"""],
"""processing_trocr""": ["""TrOCRProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Optional[int] = [
"""TROCR_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TrOCRForCausalLM""",
"""TrOCRPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
a_ : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 6 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {
"facebook/nllb-moe-54B": "https://huggingface.co/facebook/nllb-moe-54b/resolve/main/config.json",
}
class _SCREAMING_SNAKE_CASE( A ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = '''nllb-moe'''
SCREAMING_SNAKE_CASE_ : List[Any] = ['''past_key_values''']
SCREAMING_SNAKE_CASE_ : Any = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self ,SCREAMING_SNAKE_CASE__=12_81_12 ,SCREAMING_SNAKE_CASE__=10_24 ,SCREAMING_SNAKE_CASE__=12 ,SCREAMING_SNAKE_CASE__=40_96 ,SCREAMING_SNAKE_CASE__=16 ,SCREAMING_SNAKE_CASE__=12 ,SCREAMING_SNAKE_CASE__=40_96 ,SCREAMING_SNAKE_CASE__=16 ,SCREAMING_SNAKE_CASE__=0.0_5 ,SCREAMING_SNAKE_CASE__=0.0_5 ,SCREAMING_SNAKE_CASE__=True ,SCREAMING_SNAKE_CASE__=True ,SCREAMING_SNAKE_CASE__="relu" ,SCREAMING_SNAKE_CASE__=10_24 ,SCREAMING_SNAKE_CASE__=0.1 ,SCREAMING_SNAKE_CASE__=0.1 ,SCREAMING_SNAKE_CASE__=0.0 ,SCREAMING_SNAKE_CASE__=0.0_2 ,SCREAMING_SNAKE_CASE__=2 ,SCREAMING_SNAKE_CASE__=True ,SCREAMING_SNAKE_CASE__=False ,SCREAMING_SNAKE_CASE__="float32" ,SCREAMING_SNAKE_CASE__=False ,SCREAMING_SNAKE_CASE__=1_28 ,SCREAMING_SNAKE_CASE__=64 ,SCREAMING_SNAKE_CASE__=4 ,SCREAMING_SNAKE_CASE__=4 ,SCREAMING_SNAKE_CASE__=0.0_0_1 ,SCREAMING_SNAKE_CASE__=0.0_0_1 ,SCREAMING_SNAKE_CASE__="all" ,SCREAMING_SNAKE_CASE__=False ,SCREAMING_SNAKE_CASE__=False ,SCREAMING_SNAKE_CASE__=1.0 ,SCREAMING_SNAKE_CASE__=0.2 ,SCREAMING_SNAKE_CASE__=1 ,SCREAMING_SNAKE_CASE__=0 ,SCREAMING_SNAKE_CASE__=2 ,SCREAMING_SNAKE_CASE__=False ,**SCREAMING_SNAKE_CASE__ ,) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Tuple = vocab_size
__SCREAMING_SNAKE_CASE :Any = max_position_embeddings
__SCREAMING_SNAKE_CASE :Tuple = d_model
__SCREAMING_SNAKE_CASE :int = encoder_ffn_dim
__SCREAMING_SNAKE_CASE :List[Any] = encoder_layers
__SCREAMING_SNAKE_CASE :Any = encoder_attention_heads
__SCREAMING_SNAKE_CASE :List[Any] = decoder_ffn_dim
__SCREAMING_SNAKE_CASE :str = decoder_layers
__SCREAMING_SNAKE_CASE :Tuple = decoder_attention_heads
__SCREAMING_SNAKE_CASE :int = dropout
__SCREAMING_SNAKE_CASE :List[Any] = attention_dropout
__SCREAMING_SNAKE_CASE :Optional[int] = activation_dropout
__SCREAMING_SNAKE_CASE :Union[str, Any] = activation_function
__SCREAMING_SNAKE_CASE :List[Any] = init_std
__SCREAMING_SNAKE_CASE :List[Any] = encoder_layerdrop
__SCREAMING_SNAKE_CASE :List[Any] = decoder_layerdrop
__SCREAMING_SNAKE_CASE :List[Any] = use_cache
__SCREAMING_SNAKE_CASE :Optional[Any] = encoder_layers
__SCREAMING_SNAKE_CASE :Union[str, Any] = scale_embedding # scale factor will be sqrt(d_model) if True
__SCREAMING_SNAKE_CASE :List[str] = router_z_loss_coef
__SCREAMING_SNAKE_CASE :List[str] = router_aux_loss_coef
__SCREAMING_SNAKE_CASE :Tuple = decoder_sparse_step
__SCREAMING_SNAKE_CASE :Tuple = encoder_sparse_step
__SCREAMING_SNAKE_CASE :Optional[Any] = num_experts
__SCREAMING_SNAKE_CASE :Optional[Any] = expert_capacity
__SCREAMING_SNAKE_CASE :str = router_bias
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(f'''`router_dtype` must be one of \'float32\', \'float16\' or \'bfloat16\', got {router_dtype}''' )
__SCREAMING_SNAKE_CASE :int = router_dtype
__SCREAMING_SNAKE_CASE :Tuple = router_ignore_padding_tokens
__SCREAMING_SNAKE_CASE :str = batch_prioritized_routing
__SCREAMING_SNAKE_CASE :Optional[Any] = second_expert_policy
__SCREAMING_SNAKE_CASE :Any = normalize_router_prob_before_dropping
__SCREAMING_SNAKE_CASE :Tuple = moe_eval_capacity_token_fraction
__SCREAMING_SNAKE_CASE :str = moe_token_dropout
__SCREAMING_SNAKE_CASE :Optional[int] = output_router_logits
super().__init__(
pad_token_id=SCREAMING_SNAKE_CASE__ ,bos_token_id=SCREAMING_SNAKE_CASE__ ,eos_token_id=SCREAMING_SNAKE_CASE__ ,is_encoder_decoder=SCREAMING_SNAKE_CASE__ ,decoder_start_token_id=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ ,) | 191 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available
from ...utils import OptionalDependencyNotAvailable
lowerCamelCase_ = {"configuration_dpt": ["DPT_PRETRAINED_CONFIG_ARCHIVE_MAP", "DPTConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = ["DPTFeatureExtractor"]
lowerCamelCase_ = ["DPTImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = [
"DPT_PRETRAINED_MODEL_ARCHIVE_LIST",
"DPTForDepthEstimation",
"DPTForSemanticSegmentation",
"DPTModel",
"DPTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_dpt import DPTFeatureExtractor
from .image_processing_dpt import DPTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dpt import (
DPT_PRETRAINED_MODEL_ARCHIVE_LIST,
DPTForDepthEstimation,
DPTForSemanticSegmentation,
DPTModel,
DPTPreTrainedModel,
)
else:
import sys
lowerCamelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) | 191 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
_A = logging.get_logger(__name__)
_A = {
"""Salesforce/codegen-350M-nl""": """https://huggingface.co/Salesforce/codegen-350M-nl/resolve/main/config.json""",
"""Salesforce/codegen-350M-multi""": """https://huggingface.co/Salesforce/codegen-350M-multi/resolve/main/config.json""",
"""Salesforce/codegen-350M-mono""": """https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/config.json""",
"""Salesforce/codegen-2B-nl""": """https://huggingface.co/Salesforce/codegen-2B-nl/resolve/main/config.json""",
"""Salesforce/codegen-2B-multi""": """https://huggingface.co/Salesforce/codegen-2B-multi/resolve/main/config.json""",
"""Salesforce/codegen-2B-mono""": """https://huggingface.co/Salesforce/codegen-2B-mono/resolve/main/config.json""",
"""Salesforce/codegen-6B-nl""": """https://huggingface.co/Salesforce/codegen-6B-nl/resolve/main/config.json""",
"""Salesforce/codegen-6B-multi""": """https://huggingface.co/Salesforce/codegen-6B-multi/resolve/main/config.json""",
"""Salesforce/codegen-6B-mono""": """https://huggingface.co/Salesforce/codegen-6B-mono/resolve/main/config.json""",
"""Salesforce/codegen-16B-nl""": """https://huggingface.co/Salesforce/codegen-16B-nl/resolve/main/config.json""",
"""Salesforce/codegen-16B-multi""": """https://huggingface.co/Salesforce/codegen-16B-multi/resolve/main/config.json""",
"""Salesforce/codegen-16B-mono""": """https://huggingface.co/Salesforce/codegen-16B-mono/resolve/main/config.json""",
}
class _lowerCamelCase ( a_ ):
_lowerCamelCase :Tuple = "codegen"
_lowerCamelCase :Union[str, Any] = {
"max_position_embeddings": "n_positions",
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self : List[str] , UpperCamelCase : Optional[int]=5_04_00 , UpperCamelCase : str=20_48 , UpperCamelCase : int=20_48 , UpperCamelCase : Union[str, Any]=40_96 , UpperCamelCase : List[str]=28 , UpperCamelCase : Any=16 , UpperCamelCase : int=64 , UpperCamelCase : Tuple=None , UpperCamelCase : List[Any]="gelu_new" , UpperCamelCase : List[str]=0.0 , UpperCamelCase : Optional[int]=0.0 , UpperCamelCase : Optional[Any]=0.0 , UpperCamelCase : List[Any]=1E-5 , UpperCamelCase : List[Any]=0.02 , UpperCamelCase : str=True , UpperCamelCase : List[str]=5_02_56 , UpperCamelCase : List[Any]=5_02_56 , UpperCamelCase : List[str]=False , **UpperCamelCase : Union[str, Any] , ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ : int = vocab_size
lowerCAmelCase__ : Dict = n_ctx
lowerCAmelCase__ : List[str] = n_positions
lowerCAmelCase__ : Tuple = n_embd
lowerCAmelCase__ : List[str] = n_layer
lowerCAmelCase__ : List[str] = n_head
lowerCAmelCase__ : Optional[int] = n_inner
lowerCAmelCase__ : List[Any] = rotary_dim
lowerCAmelCase__ : Union[str, Any] = activation_function
lowerCAmelCase__ : Tuple = resid_pdrop
lowerCAmelCase__ : int = embd_pdrop
lowerCAmelCase__ : Optional[int] = attn_pdrop
lowerCAmelCase__ : Tuple = layer_norm_epsilon
lowerCAmelCase__ : List[str] = initializer_range
lowerCAmelCase__ : Any = use_cache
lowerCAmelCase__ : List[str] = bos_token_id
lowerCAmelCase__ : Tuple = eos_token_id
super().__init__(
bos_token_id=UpperCamelCase , eos_token_id=UpperCamelCase , tie_word_embeddings=UpperCamelCase , **UpperCamelCase )
class _lowerCamelCase ( a_ ):
def __init__( self : List[str] , UpperCamelCase : PretrainedConfig , UpperCamelCase : str = "default" , UpperCamelCase : List[PatchingSpec] = None , UpperCamelCase : bool = False , ) -> List[Any]:
"""simple docstring"""
super().__init__(UpperCamelCase , task=UpperCamelCase , patching_specs=UpperCamelCase , use_past=UpperCamelCase )
if not getattr(self._config , """pad_token_id""" , UpperCamelCase ):
# TODO: how to do that better?
lowerCAmelCase__ : int = 0
@property
def _lowerCAmelCase ( self : List[str] ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
lowerCAmelCase__ : Optional[Any] = OrderedDict({"""input_ids""": {0: """batch""", 1: """sequence"""}} )
if self.use_past:
self.fill_with_past_key_values_(UpperCamelCase , direction="""inputs""" )
lowerCAmelCase__ : List[str] = {0: """batch""", 1: """past_sequence + sequence"""}
else:
lowerCAmelCase__ : Optional[Any] = {0: """batch""", 1: """sequence"""}
return common_inputs
@property
def _lowerCAmelCase ( self : Any ) -> int:
"""simple docstring"""
return self._config.n_layer
@property
def _lowerCAmelCase ( self : int ) -> int:
"""simple docstring"""
return self._config.n_head
def _lowerCAmelCase ( self : Optional[int] , UpperCamelCase : PreTrainedTokenizer , UpperCamelCase : int = -1 , UpperCamelCase : int = -1 , UpperCamelCase : bool = False , UpperCamelCase : Optional[TensorType] = None , ) -> Mapping[str, Any]:
"""simple docstring"""
lowerCAmelCase__ : Any = super(UpperCamelCase , self ).generate_dummy_inputs(
UpperCamelCase , batch_size=UpperCamelCase , seq_length=UpperCamelCase , is_pair=UpperCamelCase , framework=UpperCamelCase )
# We need to order the input in the way they appears in the forward()
lowerCAmelCase__ : int = OrderedDict({"""input_ids""": common_inputs["""input_ids"""]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
lowerCAmelCase__ : Dict = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
lowerCAmelCase__ : str = seqlen + 2
lowerCAmelCase__ : Tuple = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
lowerCAmelCase__ : str = [
(torch.zeros(UpperCamelCase ), torch.zeros(UpperCamelCase )) for _ in range(self.num_layers )
]
lowerCAmelCase__ : Tuple = common_inputs["""attention_mask"""]
if self.use_past:
lowerCAmelCase__ : Optional[Any] = ordered_inputs["""attention_mask"""].dtype
lowerCAmelCase__ : List[str] = torch.cat(
[ordered_inputs["""attention_mask"""], torch.ones(UpperCamelCase , UpperCamelCase , dtype=UpperCamelCase )] , dim=1 )
return ordered_inputs
@property
def _lowerCAmelCase ( self : Tuple ) -> int:
"""simple docstring"""
return 13
| 354 |
"""simple docstring"""
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _lowerCamelCase ( a_ ):
_lowerCamelCase :Dict = ["image_processor", "tokenizer"]
_lowerCamelCase :Dict = "BlipImageProcessor"
_lowerCamelCase :Any = ("BertTokenizer", "BertTokenizerFast")
def __init__( self : Dict , UpperCamelCase : Optional[int] , UpperCamelCase : Union[str, Any] ) -> str:
"""simple docstring"""
lowerCAmelCase__ : Optional[Any] = False
super().__init__(UpperCamelCase , UpperCamelCase )
lowerCAmelCase__ : List[Any] = self.image_processor
def __call__( self : int , UpperCamelCase : ImageInput = None , UpperCamelCase : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , UpperCamelCase : bool = True , UpperCamelCase : Union[bool, str, PaddingStrategy] = False , UpperCamelCase : Union[bool, str, TruncationStrategy] = None , UpperCamelCase : Optional[int] = None , UpperCamelCase : int = 0 , UpperCamelCase : Optional[int] = None , UpperCamelCase : Optional[bool] = None , UpperCamelCase : bool = False , UpperCamelCase : bool = False , UpperCamelCase : bool = False , UpperCamelCase : bool = False , UpperCamelCase : bool = False , UpperCamelCase : bool = True , UpperCamelCase : Optional[Union[str, TensorType]] = None , **UpperCamelCase : Optional[int] , ) -> BatchEncoding:
"""simple docstring"""
if images is None and text is None:
raise ValueError("""You have to specify either images or text.""" )
# Get only text
if images is None:
lowerCAmelCase__ : Any = self.tokenizer
lowerCAmelCase__ : Optional[int] = self.tokenizer(
text=UpperCamelCase , add_special_tokens=UpperCamelCase , padding=UpperCamelCase , truncation=UpperCamelCase , max_length=UpperCamelCase , stride=UpperCamelCase , pad_to_multiple_of=UpperCamelCase , return_attention_mask=UpperCamelCase , return_overflowing_tokens=UpperCamelCase , return_special_tokens_mask=UpperCamelCase , return_offsets_mapping=UpperCamelCase , return_token_type_ids=UpperCamelCase , return_length=UpperCamelCase , verbose=UpperCamelCase , return_tensors=UpperCamelCase , **UpperCamelCase , )
return text_encoding
# add pixel_values
lowerCAmelCase__ : Tuple = self.image_processor(UpperCamelCase , return_tensors=UpperCamelCase )
if text is not None:
lowerCAmelCase__ : Optional[int] = self.tokenizer(
text=UpperCamelCase , add_special_tokens=UpperCamelCase , padding=UpperCamelCase , truncation=UpperCamelCase , max_length=UpperCamelCase , stride=UpperCamelCase , pad_to_multiple_of=UpperCamelCase , return_attention_mask=UpperCamelCase , return_overflowing_tokens=UpperCamelCase , return_special_tokens_mask=UpperCamelCase , return_offsets_mapping=UpperCamelCase , return_token_type_ids=UpperCamelCase , return_length=UpperCamelCase , verbose=UpperCamelCase , return_tensors=UpperCamelCase , **UpperCamelCase , )
else:
lowerCAmelCase__ : Tuple = None
if text_encoding is not None:
encoding_image_processor.update(UpperCamelCase )
return encoding_image_processor
def _lowerCAmelCase ( self : int , *UpperCamelCase : Union[str, Any] , **UpperCamelCase : List[str] ) -> Dict:
"""simple docstring"""
return self.tokenizer.batch_decode(*UpperCamelCase , **UpperCamelCase )
def _lowerCAmelCase ( self : List[Any] , *UpperCamelCase : Tuple , **UpperCamelCase : List[str] ) -> List[Any]:
"""simple docstring"""
return self.tokenizer.decode(*UpperCamelCase , **UpperCamelCase )
@property
def _lowerCAmelCase ( self : List[Any] ) -> Tuple:
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = self.tokenizer.model_input_names
lowerCAmelCase__ : Union[str, Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 212 | 0 |
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
)
@flax.struct.dataclass
class __A ( lowerCAmelCase ):
'''simple docstring'''
lowerCAmelCase_ = 42
lowerCAmelCase_ = 42
class __A ( nn.Module ):
'''simple docstring'''
lowerCAmelCase_ = 42
lowerCAmelCase_ = (16, 32, 96, 256)
lowerCAmelCase_ = jnp.floataa
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = nn.Conv(
self.block_out_channels[0] , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
lowerCamelCase__ = []
for i in range(len(self.block_out_channels ) - 1 ):
lowerCamelCase__ = self.block_out_channels[i]
lowerCamelCase__ = self.block_out_channels[i + 1]
lowerCamelCase__ = nn.Conv(
__lowerCAmelCase , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(__lowerCAmelCase )
lowerCamelCase__ = nn.Conv(
__lowerCAmelCase , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(__lowerCAmelCase )
lowerCamelCase__ = blocks
lowerCamelCase__ = nn.Conv(
self.conditioning_embedding_channels , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = self.conv_in(__lowerCAmelCase )
lowerCamelCase__ = nn.silu(__lowerCAmelCase )
for block in self.blocks:
lowerCamelCase__ = block(__lowerCAmelCase )
lowerCamelCase__ = nn.silu(__lowerCAmelCase )
lowerCamelCase__ = self.conv_out(__lowerCAmelCase )
return embedding
@flax_register_to_config
class __A ( nn.Module , lowerCAmelCase , lowerCAmelCase ):
'''simple docstring'''
lowerCAmelCase_ = 32
lowerCAmelCase_ = 4
lowerCAmelCase_ = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
lowerCAmelCase_ = False
lowerCAmelCase_ = (320, 640, 1280, 1280)
lowerCAmelCase_ = 2
lowerCAmelCase_ = 8
lowerCAmelCase_ = None
lowerCAmelCase_ = 1280
lowerCAmelCase_ = 0.0
lowerCAmelCase_ = False
lowerCAmelCase_ = jnp.floataa
lowerCAmelCase_ = True
lowerCAmelCase_ = 0
lowerCAmelCase_ = "rgb"
lowerCAmelCase_ = (16, 32, 96, 256)
def __lowerCamelCase ( self , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = (1, self.in_channels, self.sample_size, self.sample_size)
lowerCamelCase__ = jnp.zeros(__lowerCAmelCase , dtype=jnp.floataa )
lowerCamelCase__ = jnp.ones((1,) , dtype=jnp.intaa )
lowerCamelCase__ = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
lowerCamelCase__ = (1, 3, self.sample_size * 8, self.sample_size * 8)
lowerCamelCase__ = jnp.zeros(__lowerCAmelCase , dtype=jnp.floataa )
lowerCamelCase__ , lowerCamelCase__ = jax.random.split(__lowerCAmelCase )
lowerCamelCase__ = {'''params''': params_rng, '''dropout''': dropout_rng}
return self.init(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )["params"]
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.block_out_channels
lowerCamelCase__ = block_out_channels[0] * 4
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
lowerCamelCase__ = self.num_attention_heads or self.attention_head_dim
# input
lowerCamelCase__ = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
lowerCamelCase__ = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
lowerCamelCase__ = FlaxTimestepEmbedding(__lowerCAmelCase , dtype=self.dtype )
lowerCamelCase__ = FlaxControlNetConditioningEmbedding(
conditioning_embedding_channels=block_out_channels[0] , block_out_channels=self.conditioning_embedding_out_channels , )
lowerCamelCase__ = self.only_cross_attention
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
lowerCamelCase__ = (only_cross_attention,) * len(self.down_block_types )
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
lowerCamelCase__ = (num_attention_heads,) * len(self.down_block_types )
# down
lowerCamelCase__ = []
lowerCamelCase__ = []
lowerCamelCase__ = block_out_channels[0]
lowerCamelCase__ = nn.Conv(
__lowerCAmelCase , kernel_size=(1, 1) , padding='''VALID''' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(__lowerCAmelCase )
for i, down_block_type in enumerate(self.down_block_types ):
lowerCamelCase__ = output_channel
lowerCamelCase__ = block_out_channels[i]
lowerCamelCase__ = i == len(__lowerCAmelCase ) - 1
if down_block_type == "CrossAttnDownBlock2D":
lowerCamelCase__ = FlaxCrossAttnDownBlockaD(
in_channels=__lowerCAmelCase , out_channels=__lowerCAmelCase , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , dtype=self.dtype , )
else:
lowerCamelCase__ = FlaxDownBlockaD(
in_channels=__lowerCAmelCase , out_channels=__lowerCAmelCase , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(__lowerCAmelCase )
for _ in range(self.layers_per_block ):
lowerCamelCase__ = nn.Conv(
__lowerCAmelCase , kernel_size=(1, 1) , padding='''VALID''' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(__lowerCAmelCase )
if not is_final_block:
lowerCamelCase__ = nn.Conv(
__lowerCAmelCase , kernel_size=(1, 1) , padding='''VALID''' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(__lowerCAmelCase )
lowerCamelCase__ = down_blocks
lowerCamelCase__ = controlnet_down_blocks
# mid
lowerCamelCase__ = block_out_channels[-1]
lowerCamelCase__ = FlaxUNetMidBlockaDCrossAttn(
in_channels=__lowerCAmelCase , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , dtype=self.dtype , )
lowerCamelCase__ = nn.Conv(
__lowerCAmelCase , kernel_size=(1, 1) , padding='''VALID''' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = 1.0 , __lowerCAmelCase = True , __lowerCAmelCase = False , ):
'''simple docstring'''
lowerCamelCase__ = self.controlnet_conditioning_channel_order
if channel_order == "bgr":
lowerCamelCase__ = jnp.flip(__lowerCAmelCase , axis=1 )
# 1. time
if not isinstance(__lowerCAmelCase , jnp.ndarray ):
lowerCamelCase__ = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(__lowerCAmelCase , jnp.ndarray ) and len(timesteps.shape ) == 0:
lowerCamelCase__ = timesteps.astype(dtype=jnp.floataa )
lowerCamelCase__ = jnp.expand_dims(__lowerCAmelCase , 0 )
lowerCamelCase__ = self.time_proj(__lowerCAmelCase )
lowerCamelCase__ = self.time_embedding(__lowerCAmelCase )
# 2. pre-process
lowerCamelCase__ = jnp.transpose(__lowerCAmelCase , (0, 2, 3, 1) )
lowerCamelCase__ = self.conv_in(__lowerCAmelCase )
lowerCamelCase__ = jnp.transpose(__lowerCAmelCase , (0, 2, 3, 1) )
lowerCamelCase__ = self.controlnet_cond_embedding(__lowerCAmelCase )
sample += controlnet_cond
# 3. down
lowerCamelCase__ = (sample,)
for down_block in self.down_blocks:
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
lowerCamelCase__ , lowerCamelCase__ = down_block(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , deterministic=not train )
else:
lowerCamelCase__ , lowerCamelCase__ = down_block(__lowerCAmelCase , __lowerCAmelCase , deterministic=not train )
down_block_res_samples += res_samples
# 4. mid
lowerCamelCase__ = self.mid_block(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , deterministic=not train )
# 5. contronet blocks
lowerCamelCase__ = ()
for down_block_res_sample, controlnet_block in zip(__lowerCAmelCase , self.controlnet_down_blocks ):
lowerCamelCase__ = controlnet_block(__lowerCAmelCase )
controlnet_down_block_res_samples += (down_block_res_sample,)
lowerCamelCase__ = controlnet_down_block_res_samples
lowerCamelCase__ = self.controlnet_mid_block(__lowerCAmelCase )
# 6. scaling
lowerCamelCase__ = [sample * conditioning_scale for sample in down_block_res_samples]
mid_block_res_sample *= conditioning_scale
if not return_dict:
return (down_block_res_samples, mid_block_res_sample)
return FlaxControlNetOutput(
down_block_res_samples=__lowerCAmelCase , mid_block_res_sample=__lowerCAmelCase )
| 209 |
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __A ( lowerCAmelCase ):
'''simple docstring'''
lowerCAmelCase_ = ["""image_processor""", """tokenizer"""]
lowerCAmelCase_ = """BlipImageProcessor"""
lowerCAmelCase_ = ("""BertTokenizer""", """BertTokenizerFast""")
def __init__( self , __lowerCAmelCase , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = False
super().__init__(__lowerCAmelCase , __lowerCAmelCase )
lowerCamelCase__ = self.image_processor
def __call__( self , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = True , __lowerCAmelCase = False , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = 0 , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = False , __lowerCAmelCase = False , __lowerCAmelCase = False , __lowerCAmelCase = False , __lowerCAmelCase = False , __lowerCAmelCase = True , __lowerCAmelCase = None , **__lowerCAmelCase , ):
'''simple docstring'''
if images is None and text is None:
raise ValueError('''You have to specify either images or text.''' )
# Get only text
if images is None:
lowerCamelCase__ = self.tokenizer
lowerCamelCase__ = self.tokenizer(
text=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , padding=__lowerCAmelCase , truncation=__lowerCAmelCase , max_length=__lowerCAmelCase , stride=__lowerCAmelCase , pad_to_multiple_of=__lowerCAmelCase , return_attention_mask=__lowerCAmelCase , return_overflowing_tokens=__lowerCAmelCase , return_special_tokens_mask=__lowerCAmelCase , return_offsets_mapping=__lowerCAmelCase , return_token_type_ids=__lowerCAmelCase , return_length=__lowerCAmelCase , verbose=__lowerCAmelCase , return_tensors=__lowerCAmelCase , **__lowerCAmelCase , )
return text_encoding
# add pixel_values
lowerCamelCase__ = self.image_processor(__lowerCAmelCase , return_tensors=__lowerCAmelCase )
if text is not None:
lowerCamelCase__ = self.tokenizer(
text=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , padding=__lowerCAmelCase , truncation=__lowerCAmelCase , max_length=__lowerCAmelCase , stride=__lowerCAmelCase , pad_to_multiple_of=__lowerCAmelCase , return_attention_mask=__lowerCAmelCase , return_overflowing_tokens=__lowerCAmelCase , return_special_tokens_mask=__lowerCAmelCase , return_offsets_mapping=__lowerCAmelCase , return_token_type_ids=__lowerCAmelCase , return_length=__lowerCAmelCase , verbose=__lowerCAmelCase , return_tensors=__lowerCAmelCase , **__lowerCAmelCase , )
else:
lowerCamelCase__ = None
if text_encoding is not None:
encoding_image_processor.update(__lowerCAmelCase )
return encoding_image_processor
def __lowerCamelCase ( self , *__lowerCAmelCase , **__lowerCAmelCase ):
'''simple docstring'''
return self.tokenizer.batch_decode(*__lowerCAmelCase , **__lowerCAmelCase )
def __lowerCamelCase ( self , *__lowerCAmelCase , **__lowerCAmelCase ):
'''simple docstring'''
return self.tokenizer.decode(*__lowerCAmelCase , **__lowerCAmelCase )
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.tokenizer.model_input_names
lowerCamelCase__ = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 209 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCAmelCase : int ={
'''configuration_swinv2''': ['''SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Swinv2Config'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Optional[Any] =[
'''SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Swinv2ForImageClassification''',
'''Swinv2ForMaskedImageModeling''',
'''Swinv2Model''',
'''Swinv2PreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_swinva import SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinvaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swinva import (
SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinvaForImageClassification,
SwinvaForMaskedImageModeling,
SwinvaModel,
SwinvaPreTrainedModel,
)
else:
import sys
__lowerCAmelCase : Union[str, Any] =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 357 | """simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class _A ( lowerCAmelCase , unittest.TestCase ):
snake_case__ : str = KandinskyInpaintPipeline
snake_case__ : Optional[int] = ['prompt', 'image_embeds', 'negative_image_embeds', 'image', 'mask_image']
snake_case__ : Optional[int] = [
'prompt',
'negative_prompt',
'image_embeds',
'negative_image_embeds',
'image',
'mask_image',
]
snake_case__ : Tuple = [
'generator',
'height',
'width',
'latents',
'guidance_scale',
'negative_prompt',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
snake_case__ : Dict = False
@property
def A__ ( self ):
"""simple docstring"""
return 32
@property
def A__ ( self ):
"""simple docstring"""
return 32
@property
def A__ ( self ):
"""simple docstring"""
return self.time_input_dim
@property
def A__ ( self ):
"""simple docstring"""
return self.time_input_dim * 4
@property
def A__ ( self ):
"""simple docstring"""
return 100
@property
def A__ ( self ):
"""simple docstring"""
lowercase = XLMRobertaTokenizerFast.from_pretrained("""YiYiXu/tiny-random-mclip-base""" )
return tokenizer
@property
def A__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowercase = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1005 , )
lowercase = MultilingualCLIP(__lowerCAmelCase )
lowercase = text_encoder.eval()
return text_encoder
@property
def A__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowercase = {
"""in_channels""": 9,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """text_image""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """text_image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
lowercase = UNetaDConditionModel(**__lowerCAmelCase )
return model
@property
def A__ ( self ):
"""simple docstring"""
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def A__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowercase = VQModel(**self.dummy_movq_kwargs )
return model
def A__ ( self ):
"""simple docstring"""
lowercase = self.dummy_text_encoder
lowercase = self.dummy_tokenizer
lowercase = self.dummy_unet
lowercase = self.dummy_movq
lowercase = DDIMScheduler(
num_train_timesteps=1000 , beta_schedule="""linear""" , beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , clip_sample=__lowerCAmelCase , set_alpha_to_one=__lowerCAmelCase , steps_offset=1 , prediction_type="""epsilon""" , thresholding=__lowerCAmelCase , )
lowercase = {
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase=0 ):
"""simple docstring"""
lowercase = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(__lowerCAmelCase ) ).to(__lowerCAmelCase )
lowercase = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(__lowerCAmelCase )
# create init_image
lowercase = floats_tensor((1, 3, 64, 64) , rng=random.Random(__lowerCAmelCase ) ).to(__lowerCAmelCase )
lowercase = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowercase = Image.fromarray(np.uinta(__lowerCAmelCase ) ).convert("""RGB""" ).resize((256, 256) )
# create mask
lowercase = np.ones((64, 64) , dtype=np.floataa )
lowercase = 0
if str(__lowerCAmelCase ).startswith("""mps""" ):
lowercase = torch.manual_seed(__lowerCAmelCase )
else:
lowercase = torch.Generator(device=__lowerCAmelCase ).manual_seed(__lowerCAmelCase )
lowercase = {
"""prompt""": """horse""",
"""image""": init_image,
"""mask_image""": mask,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""num_inference_steps""": 2,
"""guidance_scale""": 4.0,
"""output_type""": """np""",
}
return inputs
def A__ ( self ):
"""simple docstring"""
lowercase = """cpu"""
lowercase = self.get_dummy_components()
lowercase = self.pipeline_class(**__lowerCAmelCase )
lowercase = pipe.to(__lowerCAmelCase )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
lowercase = pipe(**self.get_dummy_inputs(__lowerCAmelCase ) )
lowercase = output.images
lowercase = pipe(
**self.get_dummy_inputs(__lowerCAmelCase ) , return_dict=__lowerCAmelCase , )[0]
lowercase = image[0, -3:, -3:, -1]
lowercase = image_from_tuple[0, -3:, -3:, -1]
print(f'image.shape {image.shape}' )
assert image.shape == (1, 64, 64, 3)
lowercase = np.array(
[0.8_3_2_6_9_1_9, 0.7_3_7_9_0_4_6_7, 0.2_0_9_1_8_5_8_1, 0.9_3_0_9_6_1_2, 0.5_5_1_1_7_9_1, 0.4_3_7_1_3_3_2_8, 0.5_5_1_3_3_2_1, 0.4_9_9_2_2_9_3_4, 0.5_9_4_9_7_7_8_6] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), f' expected_slice {expected_slice}, but got {image_slice.flatten()}'
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), f' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'
def A__ ( self ):
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class _A ( unittest.TestCase ):
def A__ ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A__ ( self ):
"""simple docstring"""
lowercase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy""" )
lowercase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" )
lowercase = np.ones((768, 768) , dtype=np.floataa )
lowercase = 0
lowercase = """a hat"""
lowercase = KandinskyPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-1-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(__lowerCAmelCase )
lowercase = KandinskyInpaintPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-1-inpaint""" , torch_dtype=torch.floataa )
lowercase = pipeline.to(__lowerCAmelCase )
pipeline.set_progress_bar_config(disable=__lowerCAmelCase )
lowercase = torch.Generator(device="""cpu""" ).manual_seed(0 )
lowercase , lowercase = pipe_prior(
__lowerCAmelCase , generator=__lowerCAmelCase , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple()
lowercase = pipeline(
__lowerCAmelCase , image=__lowerCAmelCase , mask_image=__lowerCAmelCase , image_embeds=__lowerCAmelCase , negative_image_embeds=__lowerCAmelCase , generator=__lowerCAmelCase , num_inference_steps=100 , height=768 , width=768 , output_type="""np""" , )
lowercase = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(__lowerCAmelCase , __lowerCAmelCase )
| 32 | 0 |
import gc
import unittest
from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline
from diffusers.utils import is_flax_available, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class snake_case ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self : Optional[Any]) -> Union[str, Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
def UpperCamelCase_ ( self : Tuple) -> Optional[int]:
"""simple docstring"""
_snake_case , _snake_case : Optional[Any] = FlaxStableDiffusionPipeline.from_pretrained(
"""stabilityai/stable-diffusion-2""" , revision="""bf16""" , dtype=jnp.bfloataa , )
_snake_case : int = """A painting of a squirrel eating a burger"""
_snake_case : List[str] = jax.device_count()
_snake_case : Any = num_samples * [prompt]
_snake_case : Optional[int] = sd_pipe.prepare_inputs(lowerCAmelCase)
_snake_case : List[Any] = replicate(lowerCAmelCase)
_snake_case : Any = shard(lowerCAmelCase)
_snake_case : Dict = jax.random.PRNGKey(0)
_snake_case : List[Any] = jax.random.split(lowerCAmelCase , jax.device_count())
_snake_case : Optional[int] = sd_pipe(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , num_inference_steps=25 , jit=lowerCAmelCase)[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
_snake_case : Tuple = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:])
_snake_case : str = images[0, 253:256, 253:256, -1]
_snake_case : Tuple = jnp.asarray(jax.device_get(image_slice.flatten()))
_snake_case : Any = jnp.array([0.4_238, 0.4_414, 0.4_395, 0.4_453, 0.4_629, 0.4_590, 0.4_531, 0.45_508, 0.4_512])
print(F'''output_slice: {output_slice}''')
assert jnp.abs(output_slice - expected_slice).max() < 1E-2
def UpperCamelCase_ ( self : List[str]) -> List[str]:
"""simple docstring"""
_snake_case : List[str] = """stabilityai/stable-diffusion-2"""
_snake_case , _snake_case : Optional[int] = FlaxDPMSolverMultistepScheduler.from_pretrained(lowerCAmelCase , subfolder="""scheduler""")
_snake_case , _snake_case : Union[str, Any] = FlaxStableDiffusionPipeline.from_pretrained(
lowerCAmelCase , scheduler=lowerCAmelCase , revision="""bf16""" , dtype=jnp.bfloataa , )
_snake_case : List[Any] = scheduler_params
_snake_case : Any = """A painting of a squirrel eating a burger"""
_snake_case : List[Any] = jax.device_count()
_snake_case : Optional[Any] = num_samples * [prompt]
_snake_case : Any = sd_pipe.prepare_inputs(lowerCAmelCase)
_snake_case : Tuple = replicate(lowerCAmelCase)
_snake_case : List[Any] = shard(lowerCAmelCase)
_snake_case : Optional[int] = jax.random.PRNGKey(0)
_snake_case : List[Any] = jax.random.split(lowerCAmelCase , jax.device_count())
_snake_case : int = sd_pipe(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , num_inference_steps=25 , jit=lowerCAmelCase)[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
_snake_case : Tuple = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:])
_snake_case : str = images[0, 253:256, 253:256, -1]
_snake_case : Any = jnp.asarray(jax.device_get(image_slice.flatten()))
_snake_case : Optional[Any] = jnp.array([0.4_336, 0.42_969, 0.4_453, 0.4_199, 0.4_297, 0.4_531, 0.4_434, 0.4_434, 0.4_297])
print(F'''output_slice: {output_slice}''')
assert jnp.abs(output_slice - expected_slice).max() < 1E-2
| 317 |
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Sequence, Value
from .base import TaskTemplate
@dataclass(frozen=SCREAMING_SNAKE_CASE_ )
class snake_case ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : str = field(default="""question-answering-extractive""" ,metadata={"""include_in_asdict_even_if_is_default""": True} )
snake_case_ : ClassVar[Features] = Features({"""question""": Value("""string""" ), """context""": Value("""string""" )} )
snake_case_ : ClassVar[Features] = Features(
{
"""answers""": Sequence(
{
"""text""": Value("""string""" ),
"""answer_start""": Value("""int32""" ),
} )
} )
snake_case_ : str = "question"
snake_case_ : str = "context"
snake_case_ : str = "answers"
@property
def UpperCamelCase_ ( self : Any) -> Dict[str, str]:
"""simple docstring"""
return {self.question_column: "question", self.context_column: "context", self.answers_column: "answers"}
| 317 | 1 |
def __UpperCamelCase ( _A : str = "The quick brown fox jumps over the lazy dog" , ) ->bool:
"""simple docstring"""
lowerCamelCase_ =set()
# Replace all the whitespace in our sentence
lowerCamelCase_ =input_str.replace(""" """ , """""" )
for alpha in input_str:
if "a" <= alpha.lower() <= "z":
frequency.add(alpha.lower() )
return len(_A ) == 26
def __UpperCamelCase ( _A : str = "The quick brown fox jumps over the lazy dog" , ) ->bool:
"""simple docstring"""
lowerCamelCase_ =[False] * 26
for char in input_str:
if char.islower():
lowerCamelCase_ =True
elif char.isupper():
lowerCamelCase_ =True
return all(_A )
def __UpperCamelCase ( _A : str = "The quick brown fox jumps over the lazy dog" , ) ->bool:
"""simple docstring"""
return len({char for char in input_str.lower() if char.isalpha()} ) == 26
def __UpperCamelCase ( ) ->None:
"""simple docstring"""
from timeit import timeit
lowerCamelCase_ ="""from __main__ import is_pangram, is_pangram_faster, is_pangram_fastest"""
print(timeit("""is_pangram()""" , setup=_A ) )
print(timeit("""is_pangram_faster()""" , setup=_A ) )
print(timeit("""is_pangram_fastest()""" , setup=_A ) )
# 5.348480500048026, 2.6477354579837993, 1.8470395830227062
# 5.036091582966037, 2.644472333951853, 1.8869528750656173
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 49 |
import numpy as np
import qiskit
def __UpperCamelCase ( _A : int = 8 , _A : int | None = None ) ->str:
"""simple docstring"""
lowerCamelCase_ =np.random.default_rng(seed=_A )
# Roughly 25% of the qubits will contribute to the key.
# So we take more than we need.
lowerCamelCase_ =6 * key_len
# Measurement basis for Alice's qubits.
lowerCamelCase_ =rng.integers(2 , size=_A )
# The set of states Alice will prepare.
lowerCamelCase_ =rng.integers(2 , size=_A )
# Measurement basis for Bob's qubits.
lowerCamelCase_ =rng.integers(2 , size=_A )
# Quantum Circuit to simulate BB84
lowerCamelCase_ =qiskit.QuantumCircuit(_A , name="""BB84""" )
# Alice prepares her qubits according to rules above.
for index, _ in enumerate(_A ):
if alice_state[index] == 1:
bbaa_circ.x(_A )
if alice_basis[index] == 1:
bbaa_circ.h(_A )
bbaa_circ.barrier()
# Bob measures the received qubits according to rules above.
for index, _ in enumerate(_A ):
if bob_basis[index] == 1:
bbaa_circ.h(_A )
bbaa_circ.barrier()
bbaa_circ.measure_all()
# Simulate the quantum circuit.
lowerCamelCase_ =qiskit.Aer.get_backend("""aer_simulator""" )
# We only need to run one shot because the key is unique.
# Multiple shots will produce the same key.
lowerCamelCase_ =qiskit.execute(_A , _A , shots=1 , seed_simulator=_A )
# Returns the result of measurement.
lowerCamelCase_ =job.result().get_counts(_A ).most_frequent()
# Extracting the generated key from the simulation results.
# Only keep measurement results where Alice and Bob chose the same basis.
lowerCamelCase_ ="""""".join(
[
result_bit
for alice_basis_bit, bob_basis_bit, result_bit in zip(
_A , _A , _A )
if alice_basis_bit == bob_basis_bit
] )
# Get final key. Pad with 0 if too short, otherwise truncate.
lowerCamelCase_ =gen_key[:key_len] if len(_A ) >= key_len else gen_key.ljust(_A , """0""" )
return key
if __name__ == "__main__":
print(F"""The generated key is : {bbaa(8, seed=0)}""")
from doctest import testmod
testmod()
| 49 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
lowercase__ :List[str] = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ :Union[str, Any] = ["MLukeTokenizer"]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mluke import MLukeTokenizer
else:
import sys
lowercase__ :List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 101 |
import argparse
import pickle
import numpy as np
import torch
from torch import nn
from transformers import ReformerConfig, ReformerModelWithLMHead
from transformers.utils import logging
logging.set_verbosity_info()
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__=None ) -> Optional[int]:
"""simple docstring"""
assert torch_layer.weight.shape == weight.shape, F'{torch_layer} layer.weight does not match'
_SCREAMING_SNAKE_CASE = nn.Parameter(snake_case__ )
if bias is not None:
assert torch_layer.bias.shape == bias.shape, F'{torch_layer} layer.bias does not match'
_SCREAMING_SNAKE_CASE = nn.Parameter(snake_case__ )
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__ ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = np.asarray(weights[0] )
_SCREAMING_SNAKE_CASE = np.asarray(weights[1] )
_SCREAMING_SNAKE_CASE = np.asarray(weights[2] )
set_param(
torch_layer.self_attention.query_key ,torch.tensor(snake_case__ ).transpose(1 ,2 ).contiguous().view(-1 ,snake_case__ ) ,)
set_param(
torch_layer.self_attention.value ,torch.tensor(snake_case__ ).transpose(1 ,2 ).contiguous().view(-1 ,snake_case__ ) ,)
set_param(
torch_layer.output.dense ,torch.tensor(snake_case__ ).view(-1 ,snake_case__ ).contiguous().transpose(0 ,1 ) ,)
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__ ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = np.asarray(weights[0] )
_SCREAMING_SNAKE_CASE = np.asarray(weights[1] )
_SCREAMING_SNAKE_CASE = np.asarray(weights[2] )
_SCREAMING_SNAKE_CASE = np.asarray(weights[3] )
set_param(
torch_layer.self_attention.query ,torch.tensor(snake_case__ ).transpose(1 ,2 ).contiguous().view(-1 ,snake_case__ ) ,)
set_param(
torch_layer.self_attention.key ,torch.tensor(snake_case__ ).transpose(1 ,2 ).contiguous().view(-1 ,snake_case__ ) ,)
set_param(
torch_layer.self_attention.value ,torch.tensor(snake_case__ ).transpose(1 ,2 ).contiguous().view(-1 ,snake_case__ ) ,)
set_param(
torch_layer.output.dense ,torch.tensor(snake_case__ ).view(-1 ,snake_case__ ).contiguous().transpose(0 ,1 ) ,)
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__ ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = weights[0][0][0]
_SCREAMING_SNAKE_CASE = np.asarray(layer_norm_a[0] )
_SCREAMING_SNAKE_CASE = np.asarray(layer_norm_a[1] )
set_param(
torch_block.attention.layer_norm ,torch.tensor(snake_case__ ) ,torch.tensor(snake_case__ ) ,)
# lsh weights + output
_SCREAMING_SNAKE_CASE = weights[0][1]
if len(snake_case__ ) < 4:
set_layer_weights_in_torch_lsh(snake_case__ ,torch_block.attention ,snake_case__ )
else:
set_layer_weights_in_torch_local(snake_case__ ,torch_block.attention ,snake_case__ )
# intermediate weighs
_SCREAMING_SNAKE_CASE = weights[2][0][1][2]
# Chunked Feed Forward
if len(snake_case__ ) == 4:
_SCREAMING_SNAKE_CASE = intermediate_weights[2]
# layernorm 2
_SCREAMING_SNAKE_CASE = np.asarray(intermediate_weights[0][0] )
_SCREAMING_SNAKE_CASE = np.asarray(intermediate_weights[0][1] )
set_param(
torch_block.feed_forward.layer_norm ,torch.tensor(snake_case__ ) ,torch.tensor(snake_case__ ) ,)
# intermediate dense
_SCREAMING_SNAKE_CASE = np.asarray(intermediate_weights[1][0] )
_SCREAMING_SNAKE_CASE = np.asarray(intermediate_weights[1][1] )
set_param(
torch_block.feed_forward.dense.dense ,torch.tensor(snake_case__ ).transpose(0 ,1 ).contiguous() ,torch.tensor(snake_case__ ) ,)
# intermediate out
_SCREAMING_SNAKE_CASE = np.asarray(intermediate_weights[4][0] )
_SCREAMING_SNAKE_CASE = np.asarray(intermediate_weights[4][1] )
set_param(
torch_block.feed_forward.output.dense ,torch.tensor(snake_case__ ).transpose(0 ,1 ).contiguous() ,torch.tensor(snake_case__ ) ,)
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__ ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = torch_model.reformer
# word embeds
_SCREAMING_SNAKE_CASE = np.asarray(weights[1] )
set_param(
torch_model_reformer.embeddings.word_embeddings ,torch.tensor(snake_case__ ) ,)
if isinstance(weights[3] ,snake_case__ ):
_SCREAMING_SNAKE_CASE = torch_model_reformer.embeddings.position_embeddings
for emb_idx in range(len(position_embeddings.weights ) ):
_SCREAMING_SNAKE_CASE = np.asarray(weights[3][emb_idx][0] )
assert (
position_embeddings.weights[emb_idx].shape == emb_weights.shape
), F'{position_embeddings[emb_idx]} emb does not match'
_SCREAMING_SNAKE_CASE = nn.Parameter(torch.tensor(snake_case__ ) )
_SCREAMING_SNAKE_CASE = weights[5]
assert len(torch_model_reformer.encoder.layers ) * 4 == len(
snake_case__ ), "HF and trax model do not have the same number of layers"
for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ):
_SCREAMING_SNAKE_CASE = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)]
set_block_weights_in_torch(snake_case__ ,snake_case__ ,snake_case__ )
# output layer norm
_SCREAMING_SNAKE_CASE = np.asarray(weights[7][0] )
_SCREAMING_SNAKE_CASE = np.asarray(weights[7][1] )
set_param(
torch_model_reformer.encoder.layer_norm ,torch.tensor(snake_case__ ) ,torch.tensor(snake_case__ ) ,)
# output embeddings
_SCREAMING_SNAKE_CASE = np.asarray(weights[9][0] )
_SCREAMING_SNAKE_CASE = np.asarray(weights[9][1] )
set_param(
torch_model.lm_head.decoder ,torch.tensor(snake_case__ ).transpose(0 ,1 ).contiguous() ,torch.tensor(snake_case__ ) ,)
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__ ) -> Tuple:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = ReformerConfig.from_json_file(snake_case__ )
print(F'Building PyTorch model from configuration: {config}' )
_SCREAMING_SNAKE_CASE = ReformerModelWithLMHead(snake_case__ )
with open(snake_case__ ,"""rb""" ) as f:
_SCREAMING_SNAKE_CASE = pickle.load(snake_case__ )["""weights"""]
set_model_weights_in_torch(snake_case__ ,snake_case__ ,config.hidden_size )
# Save pytorch-model
print(F'Save PyTorch model to {pytorch_dump_path}' )
torch.save(model.state_dict() ,snake_case__ )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--trax_model_pkl_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained Reformer model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
UpperCamelCase = parser.parse_args()
convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
| 306 | 0 |
'''simple docstring'''
import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / """utils"""))
from test_module.custom_configuration import CustomConfig # noqa E402
a_ : List[Any] = {
"""return_dict""": False,
"""output_hidden_states""": True,
"""output_attentions""": True,
"""torchscript""": True,
"""torch_dtype""": """float16""",
"""use_bfloat16""": True,
"""tf_legacy_loss""": True,
"""pruned_heads""": {"""a""": 1},
"""tie_word_embeddings""": False,
"""is_decoder""": True,
"""cross_attention_hidden_size""": 1_28,
"""add_cross_attention""": True,
"""tie_encoder_decoder""": True,
"""max_length""": 50,
"""min_length""": 3,
"""do_sample""": True,
"""early_stopping""": True,
"""num_beams""": 3,
"""num_beam_groups""": 3,
"""diversity_penalty""": 0.5,
"""temperature""": 2.0,
"""top_k""": 10,
"""top_p""": 0.7,
"""typical_p""": 0.2,
"""repetition_penalty""": 0.8,
"""length_penalty""": 0.8,
"""no_repeat_ngram_size""": 5,
"""encoder_no_repeat_ngram_size""": 5,
"""bad_words_ids""": [1, 2, 3],
"""num_return_sequences""": 3,
"""chunk_size_feed_forward""": 5,
"""output_scores""": True,
"""return_dict_in_generate""": True,
"""forced_bos_token_id""": 2,
"""forced_eos_token_id""": 3,
"""remove_invalid_values""": True,
"""architectures""": ["""BertModel"""],
"""finetuning_task""": """translation""",
"""id2label""": {0: """label"""},
"""label2id""": {"""label""": """0"""},
"""tokenizer_class""": """BertTokenizerFast""",
"""prefix""": """prefix""",
"""bos_token_id""": 6,
"""pad_token_id""": 7,
"""eos_token_id""": 8,
"""sep_token_id""": 9,
"""decoder_start_token_id""": 10,
"""exponential_decay_length_penalty""": (5, 1.01),
"""suppress_tokens""": [0, 1],
"""begin_suppress_tokens""": 2,
"""task_specific_params""": {"""translation""": """some_params"""},
"""problem_type""": """regression""",
}
@is_staging_test
class __UpperCamelCase ( unittest.TestCase ):
@classmethod
def lowercase__ ( cls ):
"""simple docstring"""
lowerCamelCase_ =TOKEN
HfFolder.save_token(lowerCAmelCase )
@classmethod
def lowercase__ ( cls ):
"""simple docstring"""
try:
delete_repo(token=cls._token, repo_id='''test-config''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token, repo_id='''valid_org/test-config-org''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token, repo_id='''test-dynamic-config''' )
except HTTPError:
pass
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =BertConfig(
vocab_size=99, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37 )
config.push_to_hub('''test-config''', use_auth_token=self._token )
lowerCamelCase_ =BertConfig.from_pretrained(f'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCAmelCase, getattr(lowerCAmelCase, lowerCAmelCase ) )
# Reset repo
delete_repo(token=self._token, repo_id='''test-config''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(lowerCAmelCase, repo_id='''test-config''', push_to_hub=lowerCAmelCase, use_auth_token=self._token )
lowerCamelCase_ =BertConfig.from_pretrained(f'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCAmelCase, getattr(lowerCAmelCase, lowerCAmelCase ) )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =BertConfig(
vocab_size=99, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37 )
config.push_to_hub('''valid_org/test-config-org''', use_auth_token=self._token )
lowerCamelCase_ =BertConfig.from_pretrained('''valid_org/test-config-org''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCAmelCase, getattr(lowerCAmelCase, lowerCAmelCase ) )
# Reset repo
delete_repo(token=self._token, repo_id='''valid_org/test-config-org''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
lowerCAmelCase, repo_id='''valid_org/test-config-org''', push_to_hub=lowerCAmelCase, use_auth_token=self._token )
lowerCamelCase_ =BertConfig.from_pretrained('''valid_org/test-config-org''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCAmelCase, getattr(lowerCAmelCase, lowerCAmelCase ) )
def lowercase__ ( self ):
"""simple docstring"""
CustomConfig.register_for_auto_class()
lowerCamelCase_ =CustomConfig(attribute=42 )
config.push_to_hub('''test-dynamic-config''', use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map, {'''AutoConfig''': '''custom_configuration.CustomConfig'''} )
lowerCamelCase_ =AutoConfig.from_pretrained(f'''{USER}/test-dynamic-config''', trust_remote_code=lowerCAmelCase )
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__, '''CustomConfig''' )
self.assertEqual(new_config.attribute, 42 )
class __UpperCamelCase ( unittest.TestCase ):
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
lowerCamelCase_ =c.n_embd + 1 # int
lowerCamelCase_ =c.resid_pdrop + 1.0 # float
lowerCamelCase_ =not c.scale_attn_weights # bool
lowerCamelCase_ =c.summary_type + '''foo''' # str
c.update_from_string(
f'''n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}''' )
self.assertEqual(lowerCAmelCase, c.n_embd, '''mismatch for key: n_embd''' )
self.assertEqual(lowerCAmelCase, c.resid_pdrop, '''mismatch for key: resid_pdrop''' )
self.assertEqual(lowerCAmelCase, c.scale_attn_weights, '''mismatch for key: scale_attn_weights''' )
self.assertEqual(lowerCAmelCase, c.summary_type, '''mismatch for key: summary_type''' )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =PretrainedConfig()
lowerCamelCase_ =[key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
lowerCAmelCase, ['''is_encoder_decoder''', '''_name_or_path''', '''_commit_hash''', '''transformers_version'''] )
lowerCamelCase_ =[key for key, value in config_common_kwargs.items() if value == getattr(lowerCAmelCase, lowerCAmelCase )]
if len(lowerCAmelCase ) > 0:
raise ValueError(
'''The following keys are set with the default values in'''
''' `test_configuration_common.config_common_kwargs` pick another value for them:'''
f''' {', '.join(lowerCAmelCase )}.''' )
def lowercase__ ( self ):
"""simple docstring"""
with self.assertRaises(lowerCAmelCase ):
# config is in subfolder, the following should not work without specifying the subfolder
lowerCamelCase_ =BertConfig.from_pretrained('''hf-internal-testing/tiny-random-bert-subfolder''' )
lowerCamelCase_ =BertConfig.from_pretrained('''hf-internal-testing/tiny-random-bert-subfolder''', subfolder='''bert''' )
self.assertIsNotNone(lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =mock.Mock()
lowerCamelCase_ =500
lowerCamelCase_ ={}
lowerCamelCase_ =HTTPError
lowerCamelCase_ ={}
# Download this model to make sure it's in the cache.
lowerCamelCase_ =BertConfig.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('''requests.Session.request''', return_value=lowerCAmelCase ) as mock_head:
lowerCamelCase_ =BertConfig.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
# This check we did call the fake head request
mock_head.assert_called()
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =BertConfig.from_pretrained(
'''https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json''' )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =AutoConfig.from_pretrained('''bert-base-cased''' )
lowerCamelCase_ =['''config.4.0.0.json''']
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(lowerCAmelCase )
lowerCamelCase_ =2
json.dump(configuration.to_dict(), open(os.path.join(lowerCAmelCase, '''config.4.0.0.json''' ), '''w''' ) )
# This should pick the new configuration file as the version of Transformers is > 4.0.0
lowerCamelCase_ =AutoConfig.from_pretrained(lowerCAmelCase )
self.assertEqual(new_configuration.hidden_size, 2 )
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
lowerCamelCase_ =['''config.42.0.0.json''']
lowerCamelCase_ =768
configuration.save_pretrained(lowerCAmelCase )
shutil.move(os.path.join(lowerCAmelCase, '''config.4.0.0.json''' ), os.path.join(lowerCAmelCase, '''config.42.0.0.json''' ) )
lowerCamelCase_ =AutoConfig.from_pretrained(lowerCAmelCase )
self.assertEqual(new_configuration.hidden_size, 768 )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ ='''hf-internal-testing/test-two-configs'''
import transformers as new_transformers
lowerCamelCase_ ='''v4.0.0'''
lowerCamelCase_, lowerCamelCase_ =new_transformers.models.auto.AutoConfig.from_pretrained(
lowerCAmelCase, return_unused_kwargs=lowerCAmelCase )
self.assertEqual(new_configuration.hidden_size, 2 )
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(lowerCAmelCase, {} )
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
lowerCamelCase_ ='''v3.0.0'''
lowerCamelCase_ =old_transformers.models.auto.AutoConfig.from_pretrained(lowerCAmelCase )
self.assertEqual(old_configuration.hidden_size, 768 )
| 6 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class __UpperCamelCase ( metaclass=lowerCamelCase__ ):
lowercase : str =['speech']
def __init__( self, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
requires_backends(self, ['''speech'''] )
class __UpperCamelCase ( metaclass=lowerCamelCase__ ):
lowercase : Any =['speech']
def __init__( self, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
requires_backends(self, ['''speech'''] )
| 6 | 1 |
import argparse
import math
import os
from copy import deepcopy
import torch
from audio_diffusion.models import DiffusionAttnUnetaD
from diffusion import sampling
from torch import nn
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
lowercase : List[str] = {
"""gwf-440k""": {
"""url""": """https://model-server.zqevans2.workers.dev/gwf-440k.ckpt""",
"""sample_rate""": 48000,
"""sample_size""": 65536,
},
"""jmann-small-190k""": {
"""url""": """https://model-server.zqevans2.workers.dev/jmann-small-190k.ckpt""",
"""sample_rate""": 48000,
"""sample_size""": 65536,
},
"""jmann-large-580k""": {
"""url""": """https://model-server.zqevans2.workers.dev/jmann-large-580k.ckpt""",
"""sample_rate""": 48000,
"""sample_size""": 131072,
},
"""maestro-uncond-150k""": {
"""url""": """https://model-server.zqevans2.workers.dev/maestro-uncond-150k.ckpt""",
"""sample_rate""": 16000,
"""sample_size""": 65536,
},
"""unlocked-uncond-250k""": {
"""url""": """https://model-server.zqevans2.workers.dev/unlocked-uncond-250k.ckpt""",
"""sample_rate""": 16000,
"""sample_size""": 65536,
},
"""honk-140k""": {
"""url""": """https://model-server.zqevans2.workers.dev/honk-140k.ckpt""",
"""sample_rate""": 16000,
"""sample_size""": 65536,
},
}
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Optional[int]:
return torch.atana(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) / math.pi * 2
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> Union[str, Any]:
lowercase : str = torch.sin(t * math.pi / 2 ) ** 2
lowercase : Any = (1 - sigma**2) ** 0.5
return alpha_sigma_to_t(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
class __snake_case ( lowerCAmelCase ):
pass
class __snake_case ( nn.Module ):
def __init__( self ,snake_case ):
'''simple docstring'''
super().__init__()
lowercase : List[str] = DiffusionAttnUnetaD(snake_case ,n_attn_layers=4 )
lowercase : int = deepcopy(self.diffusion )
lowercase : Dict = torch.quasirandom.SobolEngine(1 ,scramble=snake_case )
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> Optional[Any]:
lowercase : Tuple = MODELS_MAP[model_name]["""url"""]
os.system(f"wget {url} ./" )
return f"./{model_name}.ckpt"
lowercase : Any = {
"""1""": """resnets.0""",
"""2""": """attentions.0""",
"""3""": """resnets.1""",
"""4""": """attentions.1""",
"""5""": """resnets.2""",
"""6""": """attentions.2""",
}
lowercase : Union[str, Any] = {
"""8""": """resnets.0""",
"""9""": """attentions.0""",
"""10""": """resnets.1""",
"""11""": """attentions.1""",
"""12""": """resnets.2""",
"""13""": """attentions.2""",
}
lowercase : str = {
"""1""": """resnets.0""",
"""2""": """attentions.0""",
"""3""": """resnets.1""",
"""4""": """attentions.1""",
"""5""": """resnets.2""",
"""6""": """attentions.2""",
"""8""": """resnets.3""",
"""9""": """attentions.3""",
"""10""": """resnets.4""",
"""11""": """attentions.4""",
"""12""": """resnets.5""",
"""13""": """attentions.5""",
}
lowercase : int = {
"""0""": """resnets.0""",
"""1""": """resnets.1""",
"""2""": """resnets.2""",
"""4""": """resnets.0""",
"""5""": """resnets.1""",
"""6""": """resnets.2""",
}
lowercase : str = {
"""skip""": """conv_skip""",
"""main.0""": """conv_1""",
"""main.1""": """group_norm_1""",
"""main.3""": """conv_2""",
"""main.4""": """group_norm_2""",
}
lowercase : Any = {
"""norm""": """group_norm""",
"""qkv_proj""": ["""query""", """key""", """value"""],
"""out_proj""": ["""proj_attn"""],
}
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> Union[str, Any]:
if name.startswith("""skip""" ):
return name.replace("""skip""" , RES_CONV_MAP["""skip"""] )
# name has to be of format main.{digit}
if not name.startswith("""main.""" ):
raise ValueError(f"ResConvBlock error with {name}" )
return name.replace(name[:6] , RES_CONV_MAP[name[:6]] )
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> Any:
for key, value in ATTN_MAP.items():
if name.startswith(SCREAMING_SNAKE_CASE__ ) and not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
return name.replace(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
elif name.startswith(SCREAMING_SNAKE_CASE__ ):
return [name.replace(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for v in value]
raise ValueError(f"Attn error with {name}" )
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=13 ) -> Tuple:
lowercase : Optional[int] = input_string
if string.split(""".""" )[0] == "timestep_embed":
return string.replace("""timestep_embed""" , """time_proj""" )
lowercase : Optional[int] = 0
if string.startswith("""net.3.""" ):
depth += 1
lowercase : List[str] = string[6:]
elif string.startswith("""net.""" ):
lowercase : str = string[4:]
while string.startswith("""main.7.""" ):
depth += 1
lowercase : Union[str, Any] = string[7:]
if string.startswith("""main.""" ):
lowercase : List[str] = string[5:]
# mid block
if string[:2].isdigit():
lowercase : List[str] = string[:2]
lowercase : Tuple = string[2:]
else:
lowercase : Optional[Any] = string[0]
lowercase : Any = string[1:]
if depth == max_depth:
lowercase : str = MID_NUM_TO_LAYER[layer_num]
lowercase : Dict = """mid_block"""
elif depth > 0 and int(SCREAMING_SNAKE_CASE__ ) < 7:
lowercase : str = DOWN_NUM_TO_LAYER[layer_num]
lowercase : int = f"down_blocks.{depth}"
elif depth > 0 and int(SCREAMING_SNAKE_CASE__ ) > 7:
lowercase : Dict = UP_NUM_TO_LAYER[layer_num]
lowercase : Any = f"up_blocks.{max_depth - depth - 1}"
elif depth == 0:
lowercase : Dict = DEPTH_0_TO_LAYER[layer_num]
lowercase : str = f"up_blocks.{max_depth - 1}" if int(SCREAMING_SNAKE_CASE__ ) > 3 else """down_blocks.0"""
if not string_left.startswith(""".""" ):
raise ValueError(f"Naming error with {input_string} and string_left: {string_left}." )
lowercase : Any = string_left[1:]
if "resnets" in new_layer:
lowercase : Dict = convert_resconv_naming(SCREAMING_SNAKE_CASE__ )
elif "attentions" in new_layer:
lowercase : List[Any] = convert_attn_naming(SCREAMING_SNAKE_CASE__ )
lowercase : List[str] = new_string_left
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase : Dict = prefix + """.""" + new_layer + """.""" + string_left
else:
lowercase : int = [prefix + """.""" + new_layer + """.""" + s for s in string_left]
return new_string
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> Union[str, Any]:
lowercase : Optional[Any] = {}
for k, v in state_dict.items():
if k.endswith("""kernel""" ):
# up- and downsample layers, don't have trainable weights
continue
lowercase : List[str] = rename(SCREAMING_SNAKE_CASE__ )
# check if we need to transform from Conv => Linear for attention
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase : int = transform_conv_attns(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
else:
lowercase : Optional[int] = v
return new_state_dict
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> List[Any]:
if len(SCREAMING_SNAKE_CASE__ ) == 1:
if len(v.shape ) == 3:
# weight
lowercase : Optional[Any] = v[:, :, 0]
else:
# bias
lowercase : Tuple = v
else:
# qkv matrices
lowercase : Tuple = v.shape[0]
lowercase : Union[str, Any] = trippled_shape // 3
for i in range(3 ):
if len(v.shape ) == 3:
lowercase : Union[str, Any] = v[i * single_shape : (i + 1) * single_shape, :, 0]
else:
lowercase : List[str] = v[i * single_shape : (i + 1) * single_shape]
return new_state_dict
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> List[str]:
lowercase : List[Any] = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" )
lowercase : List[Any] = args.model_path.split("""/""" )[-1].split(""".""" )[0]
if not os.path.isfile(args.model_path ):
assert (
model_name == args.model_path
), f"Make sure to provide one of the official model names {MODELS_MAP.keys()}"
lowercase : List[str] = download(SCREAMING_SNAKE_CASE__ )
lowercase : Any = MODELS_MAP[model_name]["""sample_rate"""]
lowercase : str = MODELS_MAP[model_name]["""sample_size"""]
lowercase : List[Any] = Object()
lowercase : str = sample_size
lowercase : List[str] = sample_rate
lowercase : Any = 0
lowercase : Dict = UNetaDModel(sample_size=SCREAMING_SNAKE_CASE__ , sample_rate=SCREAMING_SNAKE_CASE__ )
lowercase : Optional[int] = diffusers_model.state_dict()
lowercase : Optional[int] = DiffusionUncond(SCREAMING_SNAKE_CASE__ )
orig_model.load_state_dict(torch.load(args.model_path , map_location=SCREAMING_SNAKE_CASE__ )["""state_dict"""] )
lowercase : Any = orig_model.diffusion_ema.eval()
lowercase : int = orig_model.state_dict()
lowercase : int = rename_orig_weights(SCREAMING_SNAKE_CASE__ )
lowercase : Optional[Any] = set(renamed_state_dict.keys() ) - set(diffusers_state_dict.keys() )
lowercase : Tuple = set(diffusers_state_dict.keys() ) - set(renamed_state_dict.keys() )
assert len(SCREAMING_SNAKE_CASE__ ) == 0, f"Problem with {renamed_minus_diffusers}"
assert all(k.endswith("""kernel""" ) for k in list(SCREAMING_SNAKE_CASE__ ) ), f"Problem with {diffusers_minus_renamed}"
for key, value in renamed_state_dict.items():
assert (
diffusers_state_dict[key].squeeze().shape == value.squeeze().shape
), f"Shape for {key} doesn't match. Diffusers: {diffusers_state_dict[key].shape} vs. {value.shape}"
if key == "time_proj.weight":
lowercase : str = value.squeeze()
lowercase : int = value
diffusers_model.load_state_dict(SCREAMING_SNAKE_CASE__ )
lowercase : List[Any] = 100
lowercase : int = 33
lowercase : Tuple = IPNDMScheduler(num_train_timesteps=SCREAMING_SNAKE_CASE__ )
lowercase : List[str] = torch.manual_seed(SCREAMING_SNAKE_CASE__ )
lowercase : str = torch.randn([1, 2, config.sample_size] , generator=SCREAMING_SNAKE_CASE__ ).to(SCREAMING_SNAKE_CASE__ )
lowercase : List[str] = torch.linspace(1 , 0 , steps + 1 , device=SCREAMING_SNAKE_CASE__ )[:-1]
lowercase : List[Any] = get_crash_schedule(SCREAMING_SNAKE_CASE__ )
lowercase : Any = DanceDiffusionPipeline(unet=SCREAMING_SNAKE_CASE__ , scheduler=SCREAMING_SNAKE_CASE__ )
lowercase : Dict = torch.manual_seed(33 )
lowercase : List[str] = pipe(num_inference_steps=SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ ).audios
lowercase : int = sampling.iplms_sample(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , {} )
lowercase : str = generated.clamp(-1 , 1 )
lowercase : Any = (generated - audio).abs().sum()
lowercase : Dict = (generated - audio).abs().max()
if args.save:
pipe.save_pretrained(args.checkpoint_path )
print("""Diff sum""" , SCREAMING_SNAKE_CASE__ )
print("""Diff max""" , SCREAMING_SNAKE_CASE__ )
assert diff_max < 1e-3, f"Diff max: {diff_max} is too much :-/"
print(f"Conversion for {model_name} successful!" )
if __name__ == "__main__":
lowercase : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument("""--model_path""", default=None, type=str, required=True, help="""Path to the model to convert.""")
parser.add_argument(
"""--save""", default=True, type=bool, required=False, help="""Whether to save the converted model or not."""
)
parser.add_argument("""--checkpoint_path""", default=None, type=str, required=True, help="""Path to the output model.""")
lowercase : Optional[int] = parser.parse_args()
main(args)
| 20 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase : List[str] = logging.get_logger(__name__)
lowercase : Any = {
"""uclanlp/visualbert-vqa""": """https://huggingface.co/uclanlp/visualbert-vqa/resolve/main/config.json""",
"""uclanlp/visualbert-vqa-pre""": """https://huggingface.co/uclanlp/visualbert-vqa-pre/resolve/main/config.json""",
"""uclanlp/visualbert-vqa-coco-pre""": (
"""https://huggingface.co/uclanlp/visualbert-vqa-coco-pre/resolve/main/config.json"""
),
"""uclanlp/visualbert-vcr""": """https://huggingface.co/uclanlp/visualbert-vcr/resolve/main/config.json""",
"""uclanlp/visualbert-vcr-pre""": """https://huggingface.co/uclanlp/visualbert-vcr-pre/resolve/main/config.json""",
"""uclanlp/visualbert-vcr-coco-pre""": (
"""https://huggingface.co/uclanlp/visualbert-vcr-coco-pre/resolve/main/config.json"""
),
"""uclanlp/visualbert-nlvr2""": """https://huggingface.co/uclanlp/visualbert-nlvr2/resolve/main/config.json""",
"""uclanlp/visualbert-nlvr2-pre""": """https://huggingface.co/uclanlp/visualbert-nlvr2-pre/resolve/main/config.json""",
"""uclanlp/visualbert-nlvr2-coco-pre""": (
"""https://huggingface.co/uclanlp/visualbert-nlvr2-coco-pre/resolve/main/config.json"""
)
# See all VisualBERT models at https://huggingface.co/models?filter=visual_bert
}
class __snake_case ( lowerCAmelCase ):
_a : Union[str, Any]= "visual_bert"
def __init__( self ,snake_case=30522 ,snake_case=768 ,snake_case=512 ,snake_case=12 ,snake_case=12 ,snake_case=3072 ,snake_case="gelu" ,snake_case=0.1 ,snake_case=0.1 ,snake_case=512 ,snake_case=2 ,snake_case=0.02 ,snake_case=1e-12 ,snake_case=False ,snake_case=True ,snake_case=1 ,snake_case=0 ,snake_case=2 ,**snake_case ,):
'''simple docstring'''
super().__init__(pad_token_id=snake_case ,bos_token_id=snake_case ,eos_token_id=snake_case ,**snake_case )
lowercase : Tuple = vocab_size
lowercase : int = max_position_embeddings
lowercase : Optional[Any] = hidden_size
lowercase : int = visual_embedding_dim
lowercase : Tuple = num_hidden_layers
lowercase : str = num_attention_heads
lowercase : Optional[Any] = intermediate_size
lowercase : str = hidden_act
lowercase : Tuple = hidden_dropout_prob
lowercase : List[Any] = attention_probs_dropout_prob
lowercase : Union[str, Any] = initializer_range
lowercase : int = type_vocab_size
lowercase : Union[str, Any] = layer_norm_eps
lowercase : Union[str, Any] = bypass_transformer
lowercase : int = special_visual_initialize
| 20 | 1 |
from typing import Union
import fire
import torch
from tqdm import tqdm
def A ( _lowercase , _lowercase = "cpu" , _lowercase = None ):
SCREAMING_SNAKE_CASE : Optional[int] = torch.load(_lowercase , map_location=_lowercase )
for k, v in tqdm(state_dict.items() ):
if not isinstance(_lowercase , torch.Tensor ):
raise TypeError('''FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin''' )
SCREAMING_SNAKE_CASE : List[Any] = v.half()
if save_path is None: # overwrite src_path
SCREAMING_SNAKE_CASE : str = src_path
torch.save(_lowercase , _lowercase )
if __name__ == "__main__":
fire.Fire(convert)
| 258 | from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__UpperCamelCase : Dict = {
'configuration_vision_encoder_decoder': ['VisionEncoderDecoderConfig', 'VisionEncoderDecoderOnnxConfig']
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Dict = ['VisionEncoderDecoderModel']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Tuple = ['TFVisionEncoderDecoderModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : List[str] = ['FlaxVisionEncoderDecoderModel']
if TYPE_CHECKING:
from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel
else:
import sys
__UpperCamelCase : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 258 | 1 |
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ : Any = n ** (1 / 3)
return (val * val * val) == n
if __name__ == "__main__":
print(perfect_cube(27))
print(perfect_cube(4))
| 37 |
'''simple docstring'''
from __future__ import annotations
import math
from collections import Counter
from string import ascii_lowercase
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ : List[str] = analyze_text(UpperCamelCase )
lowerCAmelCase__ : Optional[int] = list(""" """ + ascii_lowercase )
# what is our total sum of probabilities.
lowerCAmelCase__ : List[Any] = sum(single_char_strings.values() )
# one length string
lowerCAmelCase__ : Optional[int] = 0
# for each alpha we go in our dict and if it is in it we calculate entropy
for ch in my_alphas:
if ch in single_char_strings:
lowerCAmelCase__ : List[Any] = single_char_strings[ch]
lowerCAmelCase__ : List[Any] = my_str / all_sum
my_fir_sum += prob * math.loga(UpperCamelCase ) # entropy formula.
# print entropy
print(f"""{round(-1 * my_fir_sum ):.1f}""" )
# two len string
lowerCAmelCase__ : Dict = sum(two_char_strings.values() )
lowerCAmelCase__ : int = 0
# for each alpha (two in size) calculate entropy.
for cha in my_alphas:
for cha in my_alphas:
lowerCAmelCase__ : Union[str, Any] = cha + cha
if sequence in two_char_strings:
lowerCAmelCase__ : Dict = two_char_strings[sequence]
lowerCAmelCase__ : Tuple = int(UpperCamelCase ) / all_sum
my_sec_sum += prob * math.loga(UpperCamelCase )
# print second entropy
print(f"""{round(-1 * my_sec_sum ):.1f}""" )
# print the difference between them
print(f"""{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}""" )
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ : Optional[Any] = Counter() # type: ignore
lowerCAmelCase__ : Tuple = Counter() # type: ignore
single_char_strings[text[-1]] += 1
# first case when we have space at start.
two_char_strings[" " + text[0]] += 1
for i in range(0 , len(UpperCamelCase ) - 1 ):
single_char_strings[text[i]] += 1
two_char_strings[text[i : i + 2]] += 1
return single_char_strings, two_char_strings
def _SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
import doctest
doctest.testmod()
# text = (
# "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark "
# "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest "
# "jointure saw horrible. He private he on be imagine suppose. Fertile "
# "beloved evident through no service elderly is. Blind there if every no so "
# "at. Own neglected you preferred way sincerity delivered his attempted. To "
# "of message cottage windows do besides against uncivil. Delightful "
# "unreserved impossible few estimating men favourable see entreaties. She "
# "propriety immediate was improving. He or entrance humoured likewise "
# "moderate. Much nor game son say feel. Fat make met can must form into "
# "gate. Me we offending prevailed discovery. "
# )
# calculate_prob(text)
if __name__ == "__main__":
main()
| 37 | 1 |
'''simple docstring'''
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
lowerCamelCase_ = abspath(join(dirname(dirname(dirname(__file__))), 'src'))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='ignore', category=FutureWarning)
def SCREAMING_SNAKE_CASE_ ( __A : Optional[Any] ) -> List[str]:
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(__A )
def SCREAMING_SNAKE_CASE_ ( __A : str ) -> str:
from transformers.testing_utils import pytest_terminal_summary_main
_SCREAMING_SNAKE_CASE = terminalreporter.config.getoption("--make-reports" )
if make_reports:
pytest_terminal_summary_main(__A , id=__A )
| 363 |
'''simple docstring'''
import os
import re
import shutil
from argparse import ArgumentParser, Namespace
from datasets.commands import BaseDatasetsCLICommand
from datasets.utils.logging import get_logger
lowerCamelCase_ = '<<<<<<< This should probably be modified because it mentions: '
lowerCamelCase_ = '=======\n>>>>>>>\n'
lowerCamelCase_ = [
'TextEncoderConfig',
'ByteTextEncoder',
'SubwordTextEncoder',
'encoder_config',
'maybe_build_from_corpus',
'manual_dir',
]
lowerCamelCase_ = [
# (pattern, replacement)
# Order is important here for some replacements
(r'tfds\.core', r'datasets'),
(r'tf\.io\.gfile\.GFile', r'open'),
(r'tf\.([\w\d]+)', r'datasets.Value(\'\1\')'),
(r'tfds\.features\.Text\(\)', r'datasets.Value(\'string\')'),
(r'tfds\.features\.Text\(', r'datasets.Value(\'string\'),'),
(r'features\s*=\s*tfds.features.FeaturesDict\(', r'features=datasets.Features('),
(r'tfds\.features\.FeaturesDict\(', r'dict('),
(r'The TensorFlow Datasets Authors', r'The TensorFlow Datasets Authors and the HuggingFace Datasets Authors'),
(r'tfds\.', r'datasets.'),
(r'dl_manager\.manual_dir', r'self.config.data_dir'),
(r'self\.builder_config', r'self.config'),
]
def SCREAMING_SNAKE_CASE_ ( __A : Namespace ) -> List[Any]:
return ConvertCommand(args.tfds_path , args.datasets_directory )
class lowercase_ ( A ):
"""simple docstring"""
@staticmethod
def lowerCAmelCase_ ( __lowerCamelCase : ArgumentParser ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = parser.add_parser(
"convert" , help="Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset." , )
train_parser.add_argument(
"--tfds_path" , type=__lowerCamelCase , required=__lowerCamelCase , help="Path to a TensorFlow Datasets folder to convert or a single tfds file to convert." , )
train_parser.add_argument(
"--datasets_directory" , type=__lowerCamelCase , required=__lowerCamelCase , help="Path to the HuggingFace Datasets folder." )
train_parser.set_defaults(func=__lowerCamelCase )
def __init__( self : Dict , __lowerCamelCase : str , __lowerCamelCase : str , *__lowerCamelCase : Tuple ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = get_logger("datasets-cli/converting" )
_SCREAMING_SNAKE_CASE = tfds_path
_SCREAMING_SNAKE_CASE = datasets_directory
def lowerCAmelCase_ ( self : List[Any] ):
"""simple docstring"""
if os.path.isdir(self._tfds_path ):
_SCREAMING_SNAKE_CASE = os.path.abspath(self._tfds_path )
elif os.path.isfile(self._tfds_path ):
_SCREAMING_SNAKE_CASE = os.path.dirname(self._tfds_path )
else:
raise ValueError("--tfds_path is neither a directory nor a file. Please check path." )
_SCREAMING_SNAKE_CASE = os.path.abspath(self._datasets_directory )
self._logger.info(F"""Converting datasets from {abs_tfds_path} to {abs_datasets_path}""" )
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = {}
if os.path.isdir(self._tfds_path ):
_SCREAMING_SNAKE_CASE = os.listdir(__lowerCamelCase )
else:
_SCREAMING_SNAKE_CASE = [os.path.basename(self._tfds_path )]
for f_name in file_names:
self._logger.info(F"""Looking at file {f_name}""" )
_SCREAMING_SNAKE_CASE = os.path.join(__lowerCamelCase , __lowerCamelCase )
_SCREAMING_SNAKE_CASE = os.path.join(__lowerCamelCase , __lowerCamelCase )
if not os.path.isfile(__lowerCamelCase ) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name:
self._logger.info("Skipping file" )
continue
with open(__lowerCamelCase , encoding="utf-8" ) as f:
_SCREAMING_SNAKE_CASE = f.readlines()
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = []
for line in lines:
_SCREAMING_SNAKE_CASE = line
# Convert imports
if "import tensorflow.compat.v2 as tf" in out_line:
continue
elif "@tfds.core" in out_line:
continue
elif "builder=self" in out_line:
continue
elif "import tensorflow_datasets.public_api as tfds" in out_line:
_SCREAMING_SNAKE_CASE = "import datasets\n"
elif "import tensorflow" in out_line:
# order is important here
_SCREAMING_SNAKE_CASE = ""
continue
elif "from absl import logging" in out_line:
_SCREAMING_SNAKE_CASE = "from datasets import logging\n"
elif "getLogger" in out_line:
_SCREAMING_SNAKE_CASE = out_line.replace("getLogger" , "get_logger" )
elif any(expression in out_line for expression in TO_HIGHLIGHT ):
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = list(filter(lambda __lowerCamelCase : e in out_line , __lowerCamelCase ) )
out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(__lowerCamelCase ) + "\n" )
out_lines.append(__lowerCamelCase )
out_lines.append(__lowerCamelCase )
continue
else:
for pattern, replacement in TO_CONVERT:
_SCREAMING_SNAKE_CASE = re.sub(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# Take care of saving utilities (to later move them together with main script)
if "tensorflow_datasets" in out_line:
_SCREAMING_SNAKE_CASE = re.match(r"from\stensorflow_datasets.*import\s([^\.\r\n]+)" , __lowerCamelCase )
tfds_imports.extend(imp.strip() for imp in match.group(1 ).split("," ) )
_SCREAMING_SNAKE_CASE = "from . import " + match.group(1 )
# Check we have not forget anything
if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line:
raise ValueError(F"""Error converting {out_line.strip()}""" )
if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line:
_SCREAMING_SNAKE_CASE = True
out_lines.append(__lowerCamelCase )
if is_builder or "wmt" in f_name:
# We create a new directory for each dataset
_SCREAMING_SNAKE_CASE = f_name.replace(".py" , "" )
_SCREAMING_SNAKE_CASE = os.path.join(__lowerCamelCase , __lowerCamelCase )
_SCREAMING_SNAKE_CASE = os.path.join(__lowerCamelCase , __lowerCamelCase )
os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase )
self._logger.info(F"""Adding directory {output_dir}""" )
imports_to_builder_map.update({imp: output_dir for imp in tfds_imports} )
else:
# Utilities will be moved at the end
utils_files.append(__lowerCamelCase )
if needs_manual_update:
with_manual_update.append(__lowerCamelCase )
with open(__lowerCamelCase , "w" , encoding="utf-8" ) as f:
f.writelines(__lowerCamelCase )
self._logger.info(F"""Converted in {output_file}""" )
for utils_file in utils_files:
try:
_SCREAMING_SNAKE_CASE = os.path.basename(__lowerCamelCase )
_SCREAMING_SNAKE_CASE = imports_to_builder_map[f_name.replace(".py" , "" )]
self._logger.info(F"""Moving {dest_folder} to {utils_file}""" )
shutil.copy(__lowerCamelCase , __lowerCamelCase )
except KeyError:
self._logger.error(F"""Cannot find destination folder for {utils_file}. Please copy manually.""" )
if with_manual_update:
for file_path in with_manual_update:
self._logger.warning(
F"""You need to manually update file {file_path} to remove configurations using 'TextEncoderConfig'.""" )
| 111 | 0 |
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import numpy as np
import pandas as pd
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
BartForSequenceClassification,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
TapexTokenizer,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.17.0.dev0''')
require_version('''datasets>=1.8.0''', '''To fix: pip install -r examples/pytorch/text-classification/requirements.txt''')
lowercase__ : Dict = logging.getLogger(__name__)
@dataclass
class lowercase_ :
"""simple docstring"""
UpperCAmelCase_ : Optional[str] = field(
default="""tab_fact""" , metadata={"""help""": """The name of the dataset to use (via the datasets library)."""} )
UpperCAmelCase_ : Optional[str] = field(
default="""tab_fact""" , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} , )
UpperCAmelCase_ : int = field(
default=1024 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
UpperCAmelCase_ : bool = field(
default=UpperCamelCase_ , metadata={"""help""": """Overwrite the cached preprocessed datasets or not."""} )
UpperCAmelCase_ : bool = field(
default=UpperCamelCase_ , metadata={
"""help""": (
"""Whether to pad all samples to `max_seq_length`. """
"""If False, will pad the samples dynamically when batching to the maximum length in the batch."""
)
} , )
UpperCAmelCase_ : Optional[int] = field(
default=UpperCamelCase_ , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
UpperCAmelCase_ : Optional[int] = field(
default=UpperCamelCase_ , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
UpperCAmelCase_ : Optional[int] = field(
default=UpperCamelCase_ , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of prediction examples to this """
"""value if set."""
)
} , )
UpperCAmelCase_ : Optional[str] = field(
default=UpperCamelCase_ , metadata={"""help""": """A csv or a json file containing the training data."""} )
UpperCAmelCase_ : Optional[str] = field(
default=UpperCamelCase_ , metadata={"""help""": """A csv or a json file containing the validation data."""} )
UpperCAmelCase_ : Optional[str] = field(default=UpperCamelCase_ , metadata={"""help""": """A csv or a json file containing the test data."""} )
def SCREAMING_SNAKE_CASE_ ( self ) ->Any:
if self.dataset_name is not None:
pass
elif self.train_file is None or self.validation_file is None:
raise ValueError('''Need either a GLUE task, a training/validation file or a dataset name.''' )
else:
lowerCAmelCase = self.train_file.split('''.''' )[-1]
assert train_extension in ["csv", "json"], "`train_file` should be a csv or a json file."
lowerCAmelCase = self.validation_file.split('''.''' )[-1]
assert (
validation_extension == train_extension
), "`validation_file` should have the same extension (csv or json) as `train_file`."
@dataclass
class lowercase_ :
"""simple docstring"""
UpperCAmelCase_ : str = field(
default=UpperCamelCase_ , metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
UpperCAmelCase_ : Optional[str] = field(
default=UpperCamelCase_ , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
UpperCAmelCase_ : Optional[str] = field(
default=UpperCamelCase_ , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
UpperCAmelCase_ : Optional[str] = field(
default=UpperCamelCase_ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
UpperCAmelCase_ : bool = field(
default=UpperCamelCase_ , metadata={"""help""": """Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."""} , )
UpperCAmelCase_ : str = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
UpperCAmelCase_ : bool = field(
default=UpperCamelCase_ , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
def SCREAMING_SNAKE_CASE_ ( ) -> Dict:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowerCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = parser.parse_args_into_dataclasses()
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
lowerCAmelCase = training_args.get_process_log_level()
logger.setLevel(snake_case__ )
datasets.utils.logging.set_verbosity(snake_case__ )
transformers.utils.logging.set_verbosity(snake_case__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ f"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" )
logger.info(f"Training/evaluation parameters {training_args}" )
# Detecting last checkpoint.
lowerCAmelCase = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowerCAmelCase = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below)
# or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub).
#
# For JSON files, this script will use the `question` column for the input question and `table` column for the corresponding table.
#
# If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this
# single column. You can easily tweak this behavior (see below)
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
lowerCAmelCase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from your local files.
# CSV/JSON training and evaluation files are needed.
lowerCAmelCase = {'''train''': data_args.train_file, '''validation''': data_args.validation_file}
# Get the test dataset: you can provide your own CSV/JSON test file (see below)
# when you use `do_predict` without specifying a GLUE benchmark task.
if training_args.do_predict:
if data_args.test_file is not None:
lowerCAmelCase = data_args.train_file.split('''.''' )[-1]
lowerCAmelCase = data_args.test_file.split('''.''' )[-1]
assert (
test_extension == train_extension
), "`test_file` should have the same extension (csv or json) as `train_file`."
lowerCAmelCase = data_args.test_file
else:
raise ValueError('''Need either a GLUE task or a test file for `do_predict`.''' )
for key in data_files.keys():
logger.info(f"load a local file for {key}: {data_files[key]}" )
if data_args.train_file.endswith('''.csv''' ):
# Loading a dataset from local csv files
lowerCAmelCase = load_dataset('''csv''' , data_files=snake_case__ , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from local json files
lowerCAmelCase = load_dataset('''json''' , data_files=snake_case__ , cache_dir=model_args.cache_dir )
# See more about loading any type of standard or custom dataset at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Labels
lowerCAmelCase = raw_datasets['''train'''].features['''label'''].names
lowerCAmelCase = len(snake_case__ )
# Load pretrained model and tokenizer
#
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowerCAmelCase = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=snake_case__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# load tapex tokenizer
lowerCAmelCase = TapexTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , add_prefix_space=snake_case__ , )
lowerCAmelCase = BartForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=snake_case__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Padding strategy
if data_args.pad_to_max_length:
lowerCAmelCase = '''max_length'''
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
lowerCAmelCase = False
# Some models have set the order of the labels to use, so let's make sure we do use it.
lowerCAmelCase = {'''Refused''': 0, '''Entailed''': 1}
lowerCAmelCase = {0: '''Refused''', 1: '''Entailed'''}
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f"The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"
f"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}." )
lowerCAmelCase = min(data_args.max_seq_length , tokenizer.model_max_length )
def preprocess_tabfact_function(snake_case__ ):
# Tokenize the texts
def _convert_table_text_to_pandas(snake_case__ ):
lowerCAmelCase = [_table_row.split('''#''' ) for _table_row in _table_text.strip('''\n''' ).split('''\n''' )]
lowerCAmelCase = pd.DataFrame.from_records(_table_content[1:] , columns=_table_content[0] )
return _table_pd
lowerCAmelCase = examples['''statement''']
lowerCAmelCase = list(map(_convert_table_text_to_pandas , examples['''table_text'''] ) )
lowerCAmelCase = tokenizer(snake_case__ , snake_case__ , padding=snake_case__ , max_length=snake_case__ , truncation=snake_case__ )
lowerCAmelCase = examples['''label''']
return result
with training_args.main_process_first(desc='''dataset map pre-processing''' ):
lowerCAmelCase = raw_datasets.map(
snake_case__ , batched=snake_case__ , load_from_cache_file=not data_args.overwrite_cache , desc='''Running tokenizer on dataset''' , )
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError('''--do_train requires a train dataset''' )
lowerCAmelCase = raw_datasets['''train''']
if data_args.max_train_samples is not None:
lowerCAmelCase = train_dataset.select(range(data_args.max_train_samples ) )
if training_args.do_eval:
if "validation" not in raw_datasets and "validation_matched" not in raw_datasets:
raise ValueError('''--do_eval requires a validation dataset''' )
lowerCAmelCase = raw_datasets['''validation''']
if data_args.max_eval_samples is not None:
lowerCAmelCase = eval_dataset.select(range(data_args.max_eval_samples ) )
if training_args.do_predict or data_args.test_file is not None:
if "test" not in raw_datasets and "test_matched" not in raw_datasets:
raise ValueError('''--do_predict requires a test dataset''' )
lowerCAmelCase = raw_datasets['''test''']
if data_args.max_predict_samples is not None:
lowerCAmelCase = predict_dataset.select(range(data_args.max_predict_samples ) )
# Log a few random samples from the training set:
if training_args.do_train:
for index in random.sample(range(len(snake_case__ ) ) , 3 ):
logger.info(f"Sample {index} of the training set: {train_dataset[index]}." )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(snake_case__ ):
lowerCAmelCase = p.predictions[0] if isinstance(p.predictions , snake_case__ ) else p.predictions
lowerCAmelCase = np.argmax(snake_case__ , axis=1 )
return {"accuracy": (preds == p.label_ids).astype(np.floataa ).mean().item()}
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
lowerCAmelCase = default_data_collator
elif training_args.fpaa:
lowerCAmelCase = DataCollatorWithPadding(snake_case__ , pad_to_multiple_of=8 )
else:
lowerCAmelCase = None
# Initialize our Trainer
lowerCAmelCase = Trainer(
model=snake_case__ , args=snake_case__ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=snake_case__ , tokenizer=snake_case__ , data_collator=snake_case__ , )
# Training
if training_args.do_train:
lowerCAmelCase = None
if training_args.resume_from_checkpoint is not None:
lowerCAmelCase = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowerCAmelCase = last_checkpoint
lowerCAmelCase = trainer.train(resume_from_checkpoint=snake_case__ )
lowerCAmelCase = train_result.metrics
lowerCAmelCase = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(snake_case__ )
)
lowerCAmelCase = min(snake_case__ , len(snake_case__ ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics('''train''' , snake_case__ )
trainer.save_metrics('''train''' , snake_case__ )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
lowerCAmelCase = trainer.evaluate(eval_dataset=snake_case__ )
lowerCAmelCase = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(snake_case__ )
lowerCAmelCase = min(snake_case__ , len(snake_case__ ) )
trainer.log_metrics('''eval''' , snake_case__ )
trainer.save_metrics('''eval''' , snake_case__ )
if training_args.do_predict:
logger.info('''*** Predict ***''' )
# Removing the `label` columns because it contains -1 and Trainer won't like that.
lowerCAmelCase = predict_dataset.remove_columns('''label''' )
lowerCAmelCase = trainer.predict(snake_case__ , metric_key_prefix='''predict''' ).predictions
lowerCAmelCase = np.argmax(snake_case__ , axis=1 )
lowerCAmelCase = os.path.join(training_args.output_dir , '''predict_results_tabfact.txt''' )
if trainer.is_world_process_zero():
with open(snake_case__ , '''w''' ) as writer:
logger.info('''***** Predict Results *****''' )
writer.write('''index\tprediction\n''' )
for index, item in enumerate(snake_case__ ):
lowerCAmelCase = label_list[item]
writer.write(f"{index}\t{item}\n" )
lowerCAmelCase = {'''finetuned_from''': model_args.model_name_or_path, '''tasks''': '''text-classification'''}
if training_args.push_to_hub:
trainer.push_to_hub(**snake_case__ )
else:
trainer.create_model_card(**snake_case__ )
def SCREAMING_SNAKE_CASE_ ( snake_case__ ) -> Optional[int]:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 338 | from collections import defaultdict
from math import ceil, sqrt
def SCREAMING_SNAKE_CASE_ ( snake_case__ = 1_0_0_0_0_0_0 , snake_case__ = 1_0 ) -> int:
lowerCAmelCase = defaultdict(snake_case__ )
for outer_width in range(3 , (t_limit // 4) + 2 ):
if outer_width * outer_width > t_limit:
lowerCAmelCase = max(
ceil(sqrt(outer_width * outer_width - t_limit ) ) , 1 )
else:
lowerCAmelCase = 1
hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2
for hole_width in range(snake_case__ , outer_width - 1 , 2 ):
count[outer_width * outer_width - hole_width * hole_width] += 1
return sum(1 for n in count.values() if 1 <= n <= 1_0 )
if __name__ == "__main__":
print(f'{solution() = }')
| 338 | 1 |
"""simple docstring"""
import collections
import os
import re
from pathlib import Path
A : List[Any] = '''src/transformers'''
# Matches is_xxx_available()
A : Any = re.compile(R'''is\_([a-z_]*)_available()''')
# Catches a one-line _import_struct = {xxx}
A : Union[str, Any] = re.compile(R'''^_import_structure\s+=\s+\{([^\}]+)\}''')
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
A : Tuple = re.compile(R'''\s+"\S*":\s+\[([^\]]*)\]''')
# Catches a line if not is_foo_available
A : Any = re.compile(R'''^\s*if\s+not\s+is\_[a-z_]*\_available\(\)''')
# Catches a line _import_struct["bla"].append("foo")
A : Tuple = re.compile(R'''^\s*_import_structure\["\S*"\]\.append\("(\S*)"\)''')
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
A : Tuple = re.compile(R'''^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]''')
# Catches a line with an object between quotes and a comma: "MyModel",
A : List[str] = re.compile(R'''^\s+"([^"]+)",''')
# Catches a line with objects between brackets only: ["foo", "bar"],
A : str = re.compile(R'''^\s+\[([^\]]+)\]''')
# Catches a line with from foo import bar, bla, boo
A : Optional[Any] = re.compile(R'''\s+from\s+\S*\s+import\s+([^\(\s].*)\n''')
# Catches a line with try:
A : Union[str, Any] = re.compile(R'''^\s*try:''')
# Catches a line with else:
A : str = re.compile(R'''^\s*else:''')
def __lowerCamelCase ( __a :List[str] ) -> Any:
"""simple docstring"""
if _re_test_backend.search(__a ) is None:
return None
A__ = [b[0] for b in _re_backend.findall(__a )]
backends.sort()
return "_and_".join(__a )
def __lowerCamelCase ( __a :Tuple ) -> List[Any]:
"""simple docstring"""
with open(__a , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
A__ = f.readlines()
A__ = 0
while line_index < len(__a ) and not lines[line_index].startswith("""_import_structure = {""" ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(__a ):
return None
# First grab the objects without a specific backend in _import_structure
A__ = []
while not lines[line_index].startswith("""if TYPE_CHECKING""" ) and find_backend(lines[line_index] ) is None:
A__ = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(__a ):
A__ = _re_one_line_import_struct.search(__a ).groups()[0]
A__ = re.findall(R"""\[([^\]]+)\]""" , __a )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(""", """ )] )
line_index += 1
continue
A__ = _re_import_struct_key_value.search(__a )
if single_line_import_search is not None:
A__ = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(""", """ ) if len(__a ) > 0]
objects.extend(__a )
elif line.startswith(""" """ * 8 + """\"""" ):
objects.append(line[9:-3] )
line_index += 1
A__ = {"""none""": objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith("""if TYPE_CHECKING""" ):
# If the line is an if not is_backend_available, we grab all objects associated.
A__ = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
A__ = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
A__ = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(""" """ * 4 ):
A__ = lines[line_index]
if _re_import_struct_add_one.search(__a ) is not None:
objects.append(_re_import_struct_add_one.search(__a ).groups()[0] )
elif _re_import_struct_add_many.search(__a ) is not None:
A__ = _re_import_struct_add_many.search(__a ).groups()[0].split(""", """ )
A__ = [obj[1:-1] for obj in imports if len(__a ) > 0]
objects.extend(__a )
elif _re_between_brackets.search(__a ) is not None:
A__ = _re_between_brackets.search(__a ).groups()[0].split(""", """ )
A__ = [obj[1:-1] for obj in imports if len(__a ) > 0]
objects.extend(__a )
elif _re_quote_object.search(__a ) is not None:
objects.append(_re_quote_object.search(__a ).groups()[0] )
elif line.startswith(""" """ * 8 + """\"""" ):
objects.append(line[9:-3] )
elif line.startswith(""" """ * 1_2 + """\"""" ):
objects.append(line[1_3:-3] )
line_index += 1
A__ = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
A__ = []
while (
line_index < len(__a )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith("""else""" )
):
A__ = lines[line_index]
A__ = _re_import.search(__a )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(""", """ ) )
elif line.startswith(""" """ * 8 ):
objects.append(line[8:-2] )
line_index += 1
A__ = {"""none""": objects}
# Let's continue with backend-specific objects
while line_index < len(__a ):
# If the line is an if is_backend_available, we grab all objects associated.
A__ = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
A__ = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
A__ = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(""" """ * 8 ):
A__ = lines[line_index]
A__ = _re_import.search(__a )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(""", """ ) )
elif line.startswith(""" """ * 1_2 ):
objects.append(line[1_2:-2] )
line_index += 1
A__ = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def __lowerCamelCase ( __a :Optional[int] , __a :str ) -> Tuple:
"""simple docstring"""
def find_duplicates(__a :List[Any] ):
return [k for k, v in collections.Counter(__a ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
A__ = []
for key in import_dict_objects.keys():
A__ = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(F'Duplicate _import_structure definitions for: {duplicate_imports}' )
A__ = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(F'Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}' )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
A__ = """base imports""" if key == """none""" else F'{key} backend'
errors.append(F'Differences for {name}:' )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(F' {a} in TYPE_HINT but not in _import_structure.' )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(F' {a} in _import_structure but not in TYPE_HINT.' )
return errors
def __lowerCamelCase ( ) -> Union[str, Any]:
"""simple docstring"""
A__ = []
for root, _, files in os.walk(__a ):
if "__init__.py" in files:
A__ = os.path.join(__a , """__init__.py""" )
A__ = parse_init(__a )
if objects is not None:
A__ = analyze_results(*__a )
if len(__a ) > 0:
A__ = F'Problem in {fname}, both halves do not define the same objects.\n{errors[0]}'
failures.append("""\n""".join(__a ) )
if len(__a ) > 0:
raise ValueError("""\n\n""".join(__a ) )
def __lowerCamelCase ( ) -> Tuple:
"""simple docstring"""
A__ = []
for path, directories, files in os.walk(__a ):
for folder in directories:
# Ignore private modules
if folder.startswith("""_""" ):
directories.remove(__a )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(__a ) / folder).glob("""*.py""" ) ) ) == 0:
continue
A__ = str((Path(__a ) / folder).relative_to(__a ) )
A__ = short_path.replace(os.path.sep , """.""" )
submodules.append(__a )
for fname in files:
if fname == "__init__.py":
continue
A__ = str((Path(__a ) / fname).relative_to(__a ) )
A__ = short_path.replace(""".py""" , """""" ).replace(os.path.sep , """.""" )
if len(submodule.split(""".""" ) ) == 1:
submodules.append(__a )
return submodules
A : Optional[Any] = [
'''convert_pytorch_checkpoint_to_tf2''',
'''modeling_flax_pytorch_utils''',
'''models.esm.openfold_utils''',
]
def __lowerCamelCase ( ) -> Union[str, Any]:
"""simple docstring"""
from transformers.utils import direct_transformers_import
A__ = direct_transformers_import(__a )
A__ = set(transformers._import_structure.keys() )
# This contains all the base keys of the _import_structure object defined in the init, but if the user is missing
# some optional dependencies, they may not have all of them. Thus we read the init to read all additions and
# (potentiall re-) add them.
with open(os.path.join(__a , """__init__.py""" ) , """r""" ) as f:
A__ = f.read()
import_structure_keys.update(set(re.findall(R"""import_structure\[\"([^\"]*)\"\]""" , __a ) ) )
A__ = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in import_structure_keys
]
if len(__a ) > 0:
A__ = """\n""".join(F'- {module}' for module in module_not_registered )
raise ValueError(
"""The following submodules are not properly registed in the main init of Transformers:\n"""
F'{list_of_modules}\n'
"""Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.""" )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 352 |
import argparse
import torch
from transformers import BertForMaskedLM
if __name__ == "__main__":
A : List[str] = argparse.ArgumentParser(
description=(
'''Extraction some layers of the full BertForMaskedLM or RObertaForMaskedLM for Transfer Learned'''
''' Distillation'''
)
)
parser.add_argument('''--model_type''', default='''bert''', choices=['''bert'''])
parser.add_argument('''--model_name''', default='''bert-base-uncased''', type=str)
parser.add_argument('''--dump_checkpoint''', default='''serialization_dir/tf_bert-base-uncased_0247911.pth''', type=str)
parser.add_argument('''--vocab_transform''', action='''store_true''')
A : Tuple = parser.parse_args()
if args.model_type == "bert":
A : Dict = BertForMaskedLM.from_pretrained(args.model_name)
A : List[str] = '''bert'''
else:
raise ValueError('''args.model_type should be "bert".''')
A : Optional[Any] = model.state_dict()
A : int = {}
for w in ["word_embeddings", "position_embeddings"]:
A : str = state_dict[F'''{prefix}.embeddings.{w}.weight''']
for w in ["weight", "bias"]:
A : Any = state_dict[F'''{prefix}.embeddings.LayerNorm.{w}''']
A : Tuple = 0
for teacher_idx in [0, 2, 4, 7, 9, 1_1]:
for w in ["weight", "bias"]:
A : Optional[Any] = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.attention.self.query.{w}'''
]
A : Optional[Any] = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.attention.self.key.{w}'''
]
A : Optional[Any] = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.attention.self.value.{w}'''
]
A : int = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.attention.output.dense.{w}'''
]
A : List[Any] = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.attention.output.LayerNorm.{w}'''
]
A : List[str] = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.intermediate.dense.{w}'''
]
A : Union[str, Any] = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.output.dense.{w}'''
]
A : List[str] = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.output.LayerNorm.{w}'''
]
std_idx += 1
A : int = state_dict['''cls.predictions.decoder.weight''']
A : str = state_dict['''cls.predictions.bias''']
if args.vocab_transform:
for w in ["weight", "bias"]:
A : List[Any] = state_dict[F'''cls.predictions.transform.dense.{w}''']
A : List[str] = state_dict[F'''cls.predictions.transform.LayerNorm.{w}''']
print(F'''N layers selected for distillation: {std_idx}''')
print(F'''Number of params transferred for distillation: {len(compressed_sd.keys())}''')
print(F'''Save transferred checkpoint to {args.dump_checkpoint}.''')
torch.save(compressed_sd, args.dump_checkpoint)
| 276 | 0 |
import torch
from diffusers import DDIMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class __A( a ):
snake_case_ = (DDIMParallelScheduler,)
snake_case_ = (('''eta''', 0.0), ('''num_inference_steps''', 5_0))
def SCREAMING_SNAKE_CASE_ ( self , **_snake_case ) -> str:
'''simple docstring'''
__a = {
'''num_train_timesteps''': 1_000,
'''beta_start''': 0.0001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''clip_sample''': True,
}
config.update(**_snake_case )
return config
def SCREAMING_SNAKE_CASE_ ( self , **_snake_case ) -> str:
'''simple docstring'''
__a = self.scheduler_classes[0]
__a = self.get_scheduler_config(**_snake_case )
__a = scheduler_class(**_snake_case )
__a , __a = 10, 0.0
__a = self.dummy_model()
__a = self.dummy_sample_deter
scheduler.set_timesteps(_snake_case )
for t in scheduler.timesteps:
__a = model(_snake_case , _snake_case )
__a = scheduler.step(_snake_case , _snake_case , _snake_case , _snake_case ).prev_sample
return sample
def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[Any]:
'''simple docstring'''
for timesteps in [100, 500, 1_000]:
self.check_over_configs(num_train_timesteps=_snake_case )
def SCREAMING_SNAKE_CASE_ ( self ) -> Union[str, Any]:
'''simple docstring'''
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=_snake_case )
__a = self.scheduler_classes[0]
__a = self.get_scheduler_config(steps_offset=1 )
__a = scheduler_class(**_snake_case )
scheduler.set_timesteps(5 )
assert torch.equal(scheduler.timesteps , torch.LongTensor([801, 601, 401, 201, 1] ) )
def SCREAMING_SNAKE_CASE_ ( self ) -> Any:
'''simple docstring'''
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=_snake_case , beta_end=_snake_case )
def SCREAMING_SNAKE_CASE_ ( self ) -> int:
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=_snake_case )
def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[Any]:
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_snake_case )
def SCREAMING_SNAKE_CASE_ ( self ) -> List[Any]:
'''simple docstring'''
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=_snake_case )
def SCREAMING_SNAKE_CASE_ ( self ) -> int:
'''simple docstring'''
for timestep_spacing in ["trailing", "leading"]:
self.check_over_configs(timestep_spacing=_snake_case )
def SCREAMING_SNAKE_CASE_ ( self ) -> Tuple:
'''simple docstring'''
for rescale_betas_zero_snr in [True, False]:
self.check_over_configs(rescale_betas_zero_snr=_snake_case )
def SCREAMING_SNAKE_CASE_ ( self ) -> str:
'''simple docstring'''
self.check_over_configs(thresholding=_snake_case )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(
thresholding=_snake_case , prediction_type=_snake_case , sample_max_value=_snake_case , )
def SCREAMING_SNAKE_CASE_ ( self ) -> Tuple:
'''simple docstring'''
for t in [1, 10, 49]:
self.check_over_forward(time_step=_snake_case )
def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[Any]:
'''simple docstring'''
for t, num_inference_steps in zip([1, 10, 50] , [10, 50, 500] ):
self.check_over_forward(time_step=_snake_case , num_inference_steps=_snake_case )
def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[Any]:
'''simple docstring'''
for t, eta in zip([1, 10, 49] , [0.0, 0.5, 1.0] ):
self.check_over_forward(time_step=_snake_case , eta=_snake_case )
def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[int]:
'''simple docstring'''
__a = self.scheduler_classes[0]
__a = self.get_scheduler_config()
__a = scheduler_class(**_snake_case )
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(420 , 400 ) - 0.1_4771 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(980 , 960 ) - 0.3_2460 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487 , 486 ) - 0.0_0979 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999 , 998 ) - 0.02 ) ) < 1E-5
def SCREAMING_SNAKE_CASE_ ( self ) -> Tuple:
'''simple docstring'''
__a = self.scheduler_classes[0]
__a = self.get_scheduler_config()
__a = scheduler_class(**_snake_case )
__a , __a = 10, 0.0
scheduler.set_timesteps(_snake_case )
__a = self.dummy_model()
__a = self.dummy_sample_deter
__a = self.dummy_sample_deter + 0.1
__a = self.dummy_sample_deter - 0.1
__a = samplea.shape[0]
__a = torch.stack([samplea, samplea, samplea] , dim=0 )
__a = torch.arange(_snake_case )[0:3, None].repeat(1 , _snake_case )
__a = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
__a = scheduler.batch_step_no_noise(_snake_case , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) , _snake_case )
__a = torch.sum(torch.abs(_snake_case ) )
__a = torch.mean(torch.abs(_snake_case ) )
assert abs(result_sum.item() - 1147.7904 ) < 1E-2
assert abs(result_mean.item() - 0.4982 ) < 1E-3
def SCREAMING_SNAKE_CASE_ ( self ) -> str:
'''simple docstring'''
__a = self.full_loop()
__a = torch.sum(torch.abs(_snake_case ) )
__a = torch.mean(torch.abs(_snake_case ) )
assert abs(result_sum.item() - 172.0067 ) < 1E-2
assert abs(result_mean.item() - 0.22_3967 ) < 1E-3
def SCREAMING_SNAKE_CASE_ ( self ) -> Tuple:
'''simple docstring'''
__a = self.full_loop(prediction_type='''v_prediction''' )
__a = torch.sum(torch.abs(_snake_case ) )
__a = torch.mean(torch.abs(_snake_case ) )
assert abs(result_sum.item() - 52.5302 ) < 1E-2
assert abs(result_mean.item() - 0.0684 ) < 1E-3
def SCREAMING_SNAKE_CASE_ ( self ) -> Union[str, Any]:
'''simple docstring'''
__a = self.full_loop(set_alpha_to_one=_snake_case , beta_start=0.01 )
__a = torch.sum(torch.abs(_snake_case ) )
__a = torch.mean(torch.abs(_snake_case ) )
assert abs(result_sum.item() - 149.8295 ) < 1E-2
assert abs(result_mean.item() - 0.1951 ) < 1E-3
def SCREAMING_SNAKE_CASE_ ( self ) -> List[Any]:
'''simple docstring'''
__a = self.full_loop(set_alpha_to_one=_snake_case , beta_start=0.01 )
__a = torch.sum(torch.abs(_snake_case ) )
__a = torch.mean(torch.abs(_snake_case ) )
assert abs(result_sum.item() - 149.0784 ) < 1E-2
assert abs(result_mean.item() - 0.1941 ) < 1E-3 | 6 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
A : Optional[int] = {
'configuration_whisper': ['WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'WhisperConfig', 'WhisperOnnxConfig'],
'feature_extraction_whisper': ['WhisperFeatureExtractor'],
'processing_whisper': ['WhisperProcessor'],
'tokenization_whisper': ['WhisperTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : int = ['WhisperTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : str = [
'WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST',
'WhisperForConditionalGeneration',
'WhisperModel',
'WhisperPreTrainedModel',
'WhisperForAudioClassification',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Tuple = [
'TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFWhisperForConditionalGeneration',
'TFWhisperModel',
'TFWhisperPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : int = [
'FlaxWhisperForConditionalGeneration',
'FlaxWhisperModel',
'FlaxWhisperPreTrainedModel',
'FlaxWhisperForAudioClassification',
]
if TYPE_CHECKING:
from .configuration_whisper import WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP, WhisperConfig, WhisperOnnxConfig
from .feature_extraction_whisper import WhisperFeatureExtractor
from .processing_whisper import WhisperProcessor
from .tokenization_whisper import WhisperTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_whisper_fast import WhisperTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_whisper import (
WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
WhisperForAudioClassification,
WhisperForConditionalGeneration,
WhisperModel,
WhisperPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_whisper import (
TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWhisperForConditionalGeneration,
TFWhisperModel,
TFWhisperPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_whisper import (
FlaxWhisperForAudioClassification,
FlaxWhisperForConditionalGeneration,
FlaxWhisperModel,
FlaxWhisperPreTrainedModel,
)
else:
import sys
A : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 6 | 1 |
'''simple docstring'''
from typing import Any, Dict, Optional
import torch
import torch.nn.functional as F
from torch import nn
from ..utils import maybe_allow_in_graph
from .activations import get_activation
from .attention_processor import Attention
from .embeddings import CombinedTimestepLabelEmbeddings
@maybe_allow_in_graph
class __UpperCamelCase ( nn.Module ):
def __init__( self :Any ,_UpperCamelCase :int ,_UpperCamelCase :int ,_UpperCamelCase :int ,_UpperCamelCase :int=0.0 ,_UpperCamelCase :Optional[int] = None ,_UpperCamelCase :str = "geglu" ,_UpperCamelCase :Optional[int] = None ,_UpperCamelCase :bool = False ,_UpperCamelCase :bool = False ,_UpperCamelCase :bool = False ,_UpperCamelCase :bool = False ,_UpperCamelCase :bool = True ,_UpperCamelCase :str = "layer_norm" ,_UpperCamelCase :bool = False ,):
super().__init__()
snake_case_ : Any = only_cross_attention
snake_case_ : Union[str, Any] = (num_embeds_ada_norm is not None) and norm_type == """ada_norm_zero"""
snake_case_ : Any = (num_embeds_ada_norm is not None) and norm_type == """ada_norm"""
if norm_type in ("ada_norm", "ada_norm_zero") and num_embeds_ada_norm is None:
raise ValueError(
F'''`norm_type` is set to {norm_type}, but `num_embeds_ada_norm` is not defined. Please make sure to'''
F''' define `num_embeds_ada_norm` if setting `norm_type` to {norm_type}.''' )
# Define 3 blocks. Each block has its own normalization layer.
# 1. Self-Attn
if self.use_ada_layer_norm:
snake_case_ : Dict = AdaLayerNorm(_UpperCamelCase ,_UpperCamelCase )
elif self.use_ada_layer_norm_zero:
snake_case_ : str = AdaLayerNormZero(_UpperCamelCase ,_UpperCamelCase )
else:
snake_case_ : List[Any] = nn.LayerNorm(_UpperCamelCase ,elementwise_affine=_UpperCamelCase )
snake_case_ : List[str] = Attention(
query_dim=_UpperCamelCase ,heads=_UpperCamelCase ,dim_head=_UpperCamelCase ,dropout=_UpperCamelCase ,bias=_UpperCamelCase ,cross_attention_dim=cross_attention_dim if only_cross_attention else None ,upcast_attention=_UpperCamelCase ,)
# 2. Cross-Attn
if cross_attention_dim is not None or double_self_attention:
# We currently only use AdaLayerNormZero for self attention where there will only be one attention block.
# I.e. the number of returned modulation chunks from AdaLayerZero would not make sense if returned during
# the second cross attention block.
snake_case_ : str = (
AdaLayerNorm(_UpperCamelCase ,_UpperCamelCase )
if self.use_ada_layer_norm
else nn.LayerNorm(_UpperCamelCase ,elementwise_affine=_UpperCamelCase )
)
snake_case_ : List[str] = Attention(
query_dim=_UpperCamelCase ,cross_attention_dim=cross_attention_dim if not double_self_attention else None ,heads=_UpperCamelCase ,dim_head=_UpperCamelCase ,dropout=_UpperCamelCase ,bias=_UpperCamelCase ,upcast_attention=_UpperCamelCase ,) # is self-attn if encoder_hidden_states is none
else:
snake_case_ : Any = None
snake_case_ : Optional[Any] = None
# 3. Feed-forward
snake_case_ : List[str] = nn.LayerNorm(_UpperCamelCase ,elementwise_affine=_UpperCamelCase )
snake_case_ : Union[str, Any] = FeedForward(_UpperCamelCase ,dropout=_UpperCamelCase ,activation_fn=_UpperCamelCase ,final_dropout=_UpperCamelCase )
# let chunk size default to None
snake_case_ : Optional[int] = None
snake_case_ : Dict = 0
def a__ ( self :List[Any] ,_UpperCamelCase :Optional[int] ,_UpperCamelCase :int ):
# Sets chunk feed-forward
snake_case_ : Optional[Any] = chunk_size
snake_case_ : Optional[Any] = dim
def a__ ( self :List[str] ,_UpperCamelCase :torch.FloatTensor ,_UpperCamelCase :Optional[torch.FloatTensor] = None ,_UpperCamelCase :Optional[torch.FloatTensor] = None ,_UpperCamelCase :Optional[torch.FloatTensor] = None ,_UpperCamelCase :Optional[torch.LongTensor] = None ,_UpperCamelCase :Dict[str, Any] = None ,_UpperCamelCase :Optional[torch.LongTensor] = None ,):
# Notice that normalization is always applied before the real computation in the following blocks.
# 1. Self-Attention
if self.use_ada_layer_norm:
snake_case_ : Optional[Any] = self.norma(_UpperCamelCase ,_UpperCamelCase )
elif self.use_ada_layer_norm_zero:
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ : Union[str, Any] = self.norma(
_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,hidden_dtype=hidden_states.dtype )
else:
snake_case_ : Optional[int] = self.norma(_UpperCamelCase )
snake_case_ : int = cross_attention_kwargs if cross_attention_kwargs is not None else {}
snake_case_ : Union[str, Any] = self.attna(
_UpperCamelCase ,encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None ,attention_mask=_UpperCamelCase ,**_UpperCamelCase ,)
if self.use_ada_layer_norm_zero:
snake_case_ : Union[str, Any] = gate_msa.unsqueeze(1 ) * attn_output
snake_case_ : Union[str, Any] = attn_output + hidden_states
# 2. Cross-Attention
if self.attna is not None:
snake_case_ : Any = (
self.norma(_UpperCamelCase ,_UpperCamelCase ) if self.use_ada_layer_norm else self.norma(_UpperCamelCase )
)
snake_case_ : List[Any] = self.attna(
_UpperCamelCase ,encoder_hidden_states=_UpperCamelCase ,attention_mask=_UpperCamelCase ,**_UpperCamelCase ,)
snake_case_ : Tuple = attn_output + hidden_states
# 3. Feed-forward
snake_case_ : Optional[Any] = self.norma(_UpperCamelCase )
if self.use_ada_layer_norm_zero:
snake_case_ : Dict = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None]
if self._chunk_size is not None:
# "feed_forward_chunk_size" can be used to save memory
if norm_hidden_states.shape[self._chunk_dim] % self._chunk_size != 0:
raise ValueError(
F'''`hidden_states` dimension to be chunked: {norm_hidden_states.shape[self._chunk_dim]} has to be divisible by chunk size: {self._chunk_size}. Make sure to set an appropriate `chunk_size` when calling `unet.enable_forward_chunking`.''' )
snake_case_ : Union[str, Any] = norm_hidden_states.shape[self._chunk_dim] // self._chunk_size
snake_case_ : int = torch.cat(
[self.ff(_UpperCamelCase ) for hid_slice in norm_hidden_states.chunk(_UpperCamelCase ,dim=self._chunk_dim )] ,dim=self._chunk_dim ,)
else:
snake_case_ : List[str] = self.ff(_UpperCamelCase )
if self.use_ada_layer_norm_zero:
snake_case_ : Union[str, Any] = gate_mlp.unsqueeze(1 ) * ff_output
snake_case_ : Any = ff_output + hidden_states
return hidden_states
class __UpperCamelCase ( nn.Module ):
def __init__( self :Dict ,_UpperCamelCase :int ,_UpperCamelCase :Optional[int] = None ,_UpperCamelCase :int = 4 ,_UpperCamelCase :float = 0.0 ,_UpperCamelCase :str = "geglu" ,_UpperCamelCase :bool = False ,):
super().__init__()
snake_case_ : Tuple = int(dim * mult )
snake_case_ : Optional[int] = dim_out if dim_out is not None else dim
if activation_fn == "gelu":
snake_case_ : Any = GELU(_UpperCamelCase ,_UpperCamelCase )
if activation_fn == "gelu-approximate":
snake_case_ : Tuple = GELU(_UpperCamelCase ,_UpperCamelCase ,approximate="""tanh""" )
elif activation_fn == "geglu":
snake_case_ : Dict = GEGLU(_UpperCamelCase ,_UpperCamelCase )
elif activation_fn == "geglu-approximate":
snake_case_ : Optional[Any] = ApproximateGELU(_UpperCamelCase ,_UpperCamelCase )
snake_case_ : Dict = nn.ModuleList([] )
# project in
self.net.append(_UpperCamelCase )
# project dropout
self.net.append(nn.Dropout(_UpperCamelCase ) )
# project out
self.net.append(nn.Linear(_UpperCamelCase ,_UpperCamelCase ) )
# FF as used in Vision Transformer, MLP-Mixer, etc. have a final dropout
if final_dropout:
self.net.append(nn.Dropout(_UpperCamelCase ) )
def a__ ( self :Tuple ,_UpperCamelCase :Union[str, Any] ):
for module in self.net:
snake_case_ : Tuple = module(_UpperCamelCase )
return hidden_states
class __UpperCamelCase ( nn.Module ):
def __init__( self :Optional[Any] ,_UpperCamelCase :int ,_UpperCamelCase :int ,_UpperCamelCase :str = "none" ):
super().__init__()
snake_case_ : Union[str, Any] = nn.Linear(_UpperCamelCase ,_UpperCamelCase )
snake_case_ : Optional[Any] = approximate
def a__ ( self :str ,_UpperCamelCase :int ):
if gate.device.type != "mps":
return F.gelu(_UpperCamelCase ,approximate=self.approximate )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) ,approximate=self.approximate ).to(dtype=gate.dtype )
def a__ ( self :Optional[int] ,_UpperCamelCase :Optional[Any] ):
snake_case_ : Optional[Any] = self.proj(_UpperCamelCase )
snake_case_ : int = self.gelu(_UpperCamelCase )
return hidden_states
class __UpperCamelCase ( nn.Module ):
def __init__( self :List[Any] ,_UpperCamelCase :int ,_UpperCamelCase :int ):
super().__init__()
snake_case_ : str = nn.Linear(_UpperCamelCase ,dim_out * 2 )
def a__ ( self :Dict ,_UpperCamelCase :List[str] ):
if gate.device.type != "mps":
return F.gelu(_UpperCamelCase )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) ).to(dtype=gate.dtype )
def a__ ( self :Optional[Any] ,_UpperCamelCase :Optional[int] ):
snake_case_ , snake_case_ : Dict = self.proj(_UpperCamelCase ).chunk(2 ,dim=-1 )
return hidden_states * self.gelu(_UpperCamelCase )
class __UpperCamelCase ( nn.Module ):
def __init__( self :List[str] ,_UpperCamelCase :int ,_UpperCamelCase :int ):
super().__init__()
snake_case_ : int = nn.Linear(_UpperCamelCase ,_UpperCamelCase )
def a__ ( self :Optional[int] ,_UpperCamelCase :Optional[int] ):
snake_case_ : int = self.proj(_UpperCamelCase )
return x * torch.sigmoid(1.7_02 * x )
class __UpperCamelCase ( nn.Module ):
def __init__( self :int ,_UpperCamelCase :str ,_UpperCamelCase :List[Any] ):
super().__init__()
snake_case_ : int = nn.Embedding(_UpperCamelCase ,_UpperCamelCase )
snake_case_ : Union[str, Any] = nn.SiLU()
snake_case_ : Any = nn.Linear(_UpperCamelCase ,embedding_dim * 2 )
snake_case_ : Dict = nn.LayerNorm(_UpperCamelCase ,elementwise_affine=_UpperCamelCase )
def a__ ( self :int ,_UpperCamelCase :List[str] ,_UpperCamelCase :int ):
snake_case_ : Union[str, Any] = self.linear(self.silu(self.emb(_UpperCamelCase ) ) )
snake_case_ , snake_case_ : Tuple = torch.chunk(_UpperCamelCase ,2 )
snake_case_ : Tuple = self.norm(_UpperCamelCase ) * (1 + scale) + shift
return x
class __UpperCamelCase ( nn.Module ):
def __init__( self :List[str] ,_UpperCamelCase :Tuple ,_UpperCamelCase :int ):
super().__init__()
snake_case_ : int = CombinedTimestepLabelEmbeddings(_UpperCamelCase ,_UpperCamelCase )
snake_case_ : int = nn.SiLU()
snake_case_ : List[str] = nn.Linear(_UpperCamelCase ,6 * embedding_dim ,bias=_UpperCamelCase )
snake_case_ : str = nn.LayerNorm(_UpperCamelCase ,elementwise_affine=_UpperCamelCase ,eps=1E-6 )
def a__ ( self :Union[str, Any] ,_UpperCamelCase :Any ,_UpperCamelCase :Tuple ,_UpperCamelCase :Optional[Any] ,_UpperCamelCase :str=None ):
snake_case_ : Union[str, Any] = self.linear(self.silu(self.emb(_UpperCamelCase ,_UpperCamelCase ,hidden_dtype=_UpperCamelCase ) ) )
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ : Any = emb.chunk(6 ,dim=1 )
snake_case_ : str = self.norm(_UpperCamelCase ) * (1 + scale_msa[:, None]) + shift_msa[:, None]
return x, gate_msa, shift_mlp, scale_mlp, gate_mlp
class __UpperCamelCase ( nn.Module ):
def __init__( self :Optional[int] ,_UpperCamelCase :int ,_UpperCamelCase :int ,_UpperCamelCase :int ,_UpperCamelCase :Optional[str] = None ,_UpperCamelCase :float = 1E-5 ):
super().__init__()
snake_case_ : Optional[int] = num_groups
snake_case_ : List[Any] = eps
if act_fn is None:
snake_case_ : int = None
else:
snake_case_ : Dict = get_activation(_UpperCamelCase )
snake_case_ : Optional[int] = nn.Linear(_UpperCamelCase ,out_dim * 2 )
def a__ ( self :List[Any] ,_UpperCamelCase :Optional[Any] ,_UpperCamelCase :List[str] ):
if self.act:
snake_case_ : Any = self.act(_UpperCamelCase )
snake_case_ : Optional[int] = self.linear(_UpperCamelCase )
snake_case_ : Dict = emb[:, :, None, None]
snake_case_ , snake_case_ : str = emb.chunk(2 ,dim=1 )
snake_case_ : str = F.group_norm(_UpperCamelCase ,self.num_groups ,eps=self.eps )
snake_case_ : List[str] = x * (1 + scale) + shift
return x | 8 |
'''simple docstring'''
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def UpperCAmelCase ( lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :int , lowerCamelCase_ :List[Any] , lowerCamelCase_ :List[Any] , lowerCamelCase_ :str=True , lowerCamelCase_ :str="pt" ):
'''simple docstring'''
snake_case_ : Tuple = {"""add_prefix_space""": True} if isinstance(lowerCamelCase_ , lowerCamelCase_ ) and not line.startswith(""" """ ) else {}
snake_case_ : Union[str, Any] = padding_side
return tokenizer(
[line] , max_length=lowerCamelCase_ , padding="""max_length""" if pad_to_max_length else None , truncation=lowerCamelCase_ , return_tensors=lowerCamelCase_ , add_special_tokens=lowerCamelCase_ , **lowerCamelCase_ , )
def UpperCAmelCase ( lowerCamelCase_ :Optional[int] , lowerCamelCase_ :str , lowerCamelCase_ :Any=None , ):
'''simple docstring'''
snake_case_ : Dict = input_ids.ne(lowerCamelCase_ ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class __UpperCamelCase ( lowercase__ ):
def __init__( self :List[Any] ,_UpperCamelCase :List[Any] ,_UpperCamelCase :Any ,_UpperCamelCase :int ,_UpperCamelCase :Optional[Any] ,_UpperCamelCase :Any="train" ,_UpperCamelCase :Optional[int]=None ,_UpperCamelCase :int=None ,_UpperCamelCase :List[Any]=None ,_UpperCamelCase :Optional[int]="" ,):
super().__init__()
snake_case_ : List[str] = Path(_UpperCamelCase ).joinpath(type_path + """.source""" )
snake_case_ : int = Path(_UpperCamelCase ).joinpath(type_path + """.target""" )
snake_case_ : Optional[int] = self.get_char_lens(self.src_file )
snake_case_ : List[str] = max_source_length
snake_case_ : str = max_target_length
assert min(self.src_lens ) > 0, F'''found empty line in {self.src_file}'''
snake_case_ : str = tokenizer
snake_case_ : str = prefix
if n_obs is not None:
snake_case_ : int = self.src_lens[:n_obs]
snake_case_ : Tuple = src_lang
snake_case_ : str = tgt_lang
def __len__( self :Any ):
return len(self.src_lens )
def __getitem__( self :List[str] ,_UpperCamelCase :Union[str, Any] ):
snake_case_ : Optional[int] = index + 1 # linecache starts at 1
snake_case_ : Dict = self.prefix + linecache.getline(str(self.src_file ) ,_UpperCamelCase ).rstrip("""\n""" )
snake_case_ : List[Any] = linecache.getline(str(self.tgt_file ) ,_UpperCamelCase ).rstrip("""\n""" )
assert source_line, F'''empty source line for index {index}'''
assert tgt_line, F'''empty tgt line for index {index}'''
# Need to add eos token manually for T5
if isinstance(self.tokenizer ,_UpperCamelCase ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
snake_case_ : int = (
self.tokenizer.question_encoder if isinstance(self.tokenizer ,_UpperCamelCase ) else self.tokenizer
)
snake_case_ : Optional[int] = self.tokenizer.generator if isinstance(self.tokenizer ,_UpperCamelCase ) else self.tokenizer
snake_case_ : Optional[Any] = encode_line(_UpperCamelCase ,_UpperCamelCase ,self.max_source_length ,"""right""" )
snake_case_ : Tuple = encode_line(_UpperCamelCase ,_UpperCamelCase ,self.max_target_length ,"""right""" )
snake_case_ : int = source_inputs["""input_ids"""].squeeze()
snake_case_ : str = target_inputs["""input_ids"""].squeeze()
snake_case_ : Union[str, Any] = source_inputs["""attention_mask"""].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def a__ ( _UpperCamelCase :str ):
return [len(_UpperCamelCase ) for x in Path(_UpperCamelCase ).open().readlines()]
def a__ ( self :Optional[int] ,_UpperCamelCase :List[str] ):
snake_case_ : Optional[Any] = torch.stack([x["""input_ids"""] for x in batch] )
snake_case_ : List[Any] = torch.stack([x["""attention_mask"""] for x in batch] )
snake_case_ : Union[str, Any] = torch.stack([x["""decoder_input_ids"""] for x in batch] )
snake_case_ : Optional[Any] = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer ,_UpperCamelCase )
else self.tokenizer.pad_token_id
)
snake_case_ : Tuple = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer ,_UpperCamelCase )
else self.tokenizer.pad_token_id
)
snake_case_ : Optional[int] = trim_batch(_UpperCamelCase ,_UpperCamelCase )
snake_case_ , snake_case_ : Dict = trim_batch(_UpperCamelCase ,_UpperCamelCase ,attention_mask=_UpperCamelCase )
snake_case_ : Optional[int] = {
"""input_ids""": source_ids,
"""attention_mask""": source_mask,
"""decoder_input_ids""": y,
}
return batch
__A : List[Any] = getLogger(__name__)
def UpperCAmelCase ( lowerCamelCase_ :List[List] ):
'''simple docstring'''
return list(itertools.chain.from_iterable(lowerCamelCase_ ) )
def UpperCAmelCase ( lowerCamelCase_ :str ):
'''simple docstring'''
snake_case_ : int = get_git_info()
save_json(lowerCamelCase_ , os.path.join(lowerCamelCase_ , """git_log.json""" ) )
def UpperCAmelCase ( lowerCamelCase_ :Optional[int] , lowerCamelCase_ :int , lowerCamelCase_ :Optional[int]=4 , **lowerCamelCase_ :Union[str, Any] ):
'''simple docstring'''
with open(lowerCamelCase_ , """w""" ) as f:
json.dump(lowerCamelCase_ , lowerCamelCase_ , indent=lowerCamelCase_ , **lowerCamelCase_ )
def UpperCAmelCase ( lowerCamelCase_ :List[Any] ):
'''simple docstring'''
with open(lowerCamelCase_ ) as f:
return json.load(lowerCamelCase_ )
def UpperCAmelCase ( ):
'''simple docstring'''
snake_case_ : Optional[Any] = git.Repo(search_parent_directories=lowerCamelCase_ )
snake_case_ : List[str] = {
"""repo_id""": str(lowerCamelCase_ ),
"""repo_sha""": str(repo.head.object.hexsha ),
"""repo_branch""": str(repo.active_branch ),
"""hostname""": str(socket.gethostname() ),
}
return repo_infos
def UpperCAmelCase ( lowerCamelCase_ :Callable , lowerCamelCase_ :Iterable ):
'''simple docstring'''
return list(map(lowerCamelCase_ , lowerCamelCase_ ) )
def UpperCAmelCase ( lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :int ):
'''simple docstring'''
with open(lowerCamelCase_ , """wb""" ) as f:
return pickle.dump(lowerCamelCase_ , lowerCamelCase_ )
def UpperCAmelCase ( lowerCamelCase_ :Dict ):
'''simple docstring'''
def remove_articles(lowerCamelCase_ :str ):
return re.sub(R"""\b(a|an|the)\b""" , """ """ , lowerCamelCase_ )
def white_space_fix(lowerCamelCase_ :Optional[Any] ):
return " ".join(text.split() )
def remove_punc(lowerCamelCase_ :Tuple ):
snake_case_ : Union[str, Any] = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(lowerCamelCase_ :Optional[Any] ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(lowerCamelCase_ ) ) ) )
def UpperCAmelCase ( lowerCamelCase_ :List[Any] , lowerCamelCase_ :Optional[int] ):
'''simple docstring'''
snake_case_ : List[Any] = normalize_answer(lowerCamelCase_ ).split()
snake_case_ : Optional[int] = normalize_answer(lowerCamelCase_ ).split()
snake_case_ : List[Any] = Counter(lowerCamelCase_ ) & Counter(lowerCamelCase_ )
snake_case_ : Optional[Any] = sum(common.values() )
if num_same == 0:
return 0
snake_case_ : Optional[Any] = 1.0 * num_same / len(lowerCamelCase_ )
snake_case_ : Union[str, Any] = 1.0 * num_same / len(lowerCamelCase_ )
snake_case_ : Optional[Any] = (2 * precision * recall) / (precision + recall)
return fa
def UpperCAmelCase ( lowerCamelCase_ :Optional[Any] , lowerCamelCase_ :Union[str, Any] ):
'''simple docstring'''
return normalize_answer(lowerCamelCase_ ) == normalize_answer(lowerCamelCase_ )
def UpperCAmelCase ( lowerCamelCase_ :List[str] , lowerCamelCase_ :List[str] ):
'''simple docstring'''
assert len(lowerCamelCase_ ) == len(lowerCamelCase_ )
snake_case_ : Optional[int] = 0
for hypo, pred in zip(lowerCamelCase_ , lowerCamelCase_ ):
em += exact_match_score(lowerCamelCase_ , lowerCamelCase_ )
if len(lowerCamelCase_ ) > 0:
em /= len(lowerCamelCase_ )
return {"em": em}
def UpperCAmelCase ( lowerCamelCase_ :Any ):
'''simple docstring'''
return model_prefix.startswith("""rag""" )
def UpperCAmelCase ( lowerCamelCase_ :Optional[int] , lowerCamelCase_ :Any , lowerCamelCase_ :Union[str, Any] ):
'''simple docstring'''
snake_case_ : List[str] = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
snake_case_ : Optional[int] = """dropout_rate"""
for p in extra_params:
if getattr(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
if not hasattr(lowerCamelCase_ , lowerCamelCase_ ) and not hasattr(lowerCamelCase_ , equivalent_param[p] ):
logger.info("""config doesn't have a `{}` attribute""".format(lowerCamelCase_ ) )
delattr(lowerCamelCase_ , lowerCamelCase_ )
continue
snake_case_ : str = p if hasattr(lowerCamelCase_ , lowerCamelCase_ ) else equivalent_param[p]
setattr(lowerCamelCase_ , lowerCamelCase_ , getattr(lowerCamelCase_ , lowerCamelCase_ ) )
delattr(lowerCamelCase_ , lowerCamelCase_ )
return hparams, config | 8 | 1 |
'''simple docstring'''
import collections
import json
import os
import re
from typing import TYPE_CHECKING, List, Optional, Tuple
import numpy as np
from ...tokenization_utils_fast import PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {'''vocab_file''': '''vocab.txt''', '''emoji_file''': '''emoji.json'''}
lowerCAmelCase__ = {
'''vocab_file''': {
'''abeja/gpt-neox-japanese-2.7b''': '''https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt''',
},
'''emoji_file''': {
'''abeja/gpt-neox-japanese-2.7b''': '''https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json''',
},
}
lowerCAmelCase__ = {
'''abeja/gpt-neox-japanese-2.7b''': 2048,
}
def _A ( A__ , A__ ):
"""simple docstring"""
with open(SCREAMING_SNAKE_CASE_ , '''r''' , encoding='''utf-8''' ) as f:
__lowercase = json.loads(f.read() )
__lowercase = collections.OrderedDict()
__lowercase = collections.OrderedDict()
__lowercase = collections.OrderedDict()
with open(SCREAMING_SNAKE_CASE_ , '''r''' , encoding='''utf-8''' ) as f:
__lowercase = f.readlines()
__lowercase = [[t.rstrip('''\n''' )] if (t == ',' or ',' not in t) else t.rstrip('''\n''' ).split(''',''' ) for t in token]
for idx, b in enumerate(SCREAMING_SNAKE_CASE_ ):
__lowercase = b
__lowercase = idx
for wd in b:
__lowercase = idx
return vocab, raw_vocab, ids_to_tokens, emoji
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE : List[str] = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE : Tuple = ['input_ids', 'attention_mask']
def __init__( self : Union[str, Any] ,lowercase__ : Tuple ,lowercase__ : int ,lowercase__ : int="<|endoftext|>" ,lowercase__ : Optional[int]="<|endoftext|>" ,lowercase__ : List[str]="<|startoftext|>" ,lowercase__ : Tuple="<|endoftext|>" ,lowercase__ : Union[str, Any]=False ,**lowercase__ : int ,):
super().__init__(
unk_token=lowercase__ ,pad_token=lowercase__ ,bos_token=lowercase__ ,eos_token=lowercase__ ,do_clean_text=lowercase__ ,**lowercase__ ,)
if not os.path.isfile(lowercase__ ):
raise ValueError(
F"Can\'t find a vocabulary file at path \'{vocab_file}\'. To load the vocabulary from a Google pretrained"
''' model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`''' )
if not os.path.isfile(lowercase__ ):
raise ValueError(
F"Can\'t find a emoji file at path \'{emoji_file}\'. To load the emoji information from a Google"
''' pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`''' )
__lowercase = do_clean_text
__lowercase = load_vocab_and_emoji(lowercase__ ,lowercase__ )
__lowercase = SubWordJapaneseTokenizer(
vocab=self.vocab ,ids_to_tokens=self.ids_to_tokens ,emoji=self.emoji )
@property
def SCREAMING_SNAKE_CASE ( self : List[str] ):
return len(self.raw_vocab )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
return dict(self.raw_vocab ,**self.added_tokens_encoder )
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : str ):
return self.subword_tokenizer.tokenize(lowercase__ ,clean=self.do_clean_text )
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : Tuple ):
return self.vocab.get(lowercase__ ,self.vocab.get(self.unk_token ) )
def SCREAMING_SNAKE_CASE ( self : Tuple ,lowercase__ : Dict ):
return self.subword_tokenizer.convert_id_to_token(lowercase__ )
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : List[str] ):
__lowercase = ''.join(lowercase__ ).strip()
return out_string
def SCREAMING_SNAKE_CASE ( self : str ,lowercase__ : "Conversation" ):
__lowercase = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(lowercase__ ,add_special_tokens=lowercase__ ) + [self.eos_token_id] )
if len(lowercase__ ) > self.model_max_length:
__lowercase = input_ids[-self.model_max_length :]
return input_ids
def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : str ,lowercase__ : Optional[str] = None ):
__lowercase = 0
if os.path.isdir(lowercase__ ):
__lowercase = os.path.join(
lowercase__ ,(filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
__lowercase = os.path.join(
lowercase__ ,(filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''emoji_file'''] )
else:
__lowercase = (
(filename_prefix + '-' if filename_prefix else '') + save_directory + VOCAB_FILES_NAMES['vocab_file']
)
__lowercase = (
(filename_prefix + '-' if filename_prefix else '') + save_directory + VOCAB_FILES_NAMES['emoji_file']
)
with open(lowercase__ ,'''w''' ,encoding='''utf-8''' ) as writer:
for token_index, token in self.ids_to_tokens.items():
if index != token_index:
logger.warning(
F"Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."
''' Please check that the vocabulary is not corrupted!''' )
__lowercase = token_index
writer.write(''','''.join(lowercase__ ) + '''\n''' )
index += 1
with open(lowercase__ ,'''w''' ,encoding='''utf-8''' ) as writer:
json.dump(self.emoji ,lowercase__ )
return vocab_file, emoji_file
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
def __init__( self : str ,lowercase__ : Union[str, Any] ,lowercase__ : Dict ,lowercase__ : Optional[int] ):
__lowercase = vocab # same as swe
__lowercase = ids_to_tokens # same as bpe
__lowercase = emoji
__lowercase = np.max([len(lowercase__ ) for w in self.vocab.keys()] )
__lowercase = re.compile(r'''(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)''' )
__lowercase = re.compile(r'''[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*''' )
__lowercase = re.compile(r'''[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}''' )
__lowercase = re.compile(
r'''([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*''' )
__lowercase = re.compile(
r'''(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*''' )
__lowercase = re.compile(
r'''((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*''' )
__lowercase = '─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿'
__lowercase = '▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟'
__lowercase = str.maketrans({k: '''<BLOCK>''' for k in keisen + blocks} )
def __len__( self : Optional[int] ):
return len(self.ids_to_tokens )
def SCREAMING_SNAKE_CASE ( self : List[str] ,lowercase__ : List[Any] ):
__lowercase = self.content_repattera.sub('''<URL>''' ,lowercase__ )
__lowercase = self.content_repattera.sub('''<EMAIL>''' ,lowercase__ )
__lowercase = self.content_repattera.sub('''<TEL>''' ,lowercase__ )
__lowercase = self.content_repattera.sub('''<DATE>''' ,lowercase__ )
__lowercase = self.content_repattera.sub('''<DATE>''' ,lowercase__ )
__lowercase = self.content_repattera.sub('''<PRICE>''' ,lowercase__ )
__lowercase = content.translate(self.content_transa )
while "<BLOCK><BLOCK>" in content:
__lowercase = content.replace('''<BLOCK><BLOCK>''' ,'''<BLOCK>''' )
return content
def SCREAMING_SNAKE_CASE ( self : str ,lowercase__ : Tuple ,lowercase__ : Optional[int]=False ):
__lowercase = text.replace(''' ''' ,'''<SP>''' )
__lowercase = text.replace(''' ''' ,'''<SP>''' )
__lowercase = text.replace('''\r\n''' ,'''<BR>''' )
__lowercase = text.replace('''\n''' ,'''<BR>''' )
__lowercase = text.replace('''\r''' ,'''<BR>''' )
__lowercase = text.replace('''\t''' ,'''<TAB>''' )
__lowercase = text.replace('''—''' ,'''ー''' )
__lowercase = text.replace('''−''' ,'''ー''' )
for k, v in self.emoji["emoji"].items():
if k in text:
__lowercase = text.replace(lowercase__ ,lowercase__ )
if clean:
__lowercase = self.clean_text(lowercase__ )
def check_simbol(lowercase__ : Tuple ):
__lowercase = x.encode()
if len(lowercase__ ) == 1 and len(lowercase__ ) == 2:
__lowercase = (int(e[0] ) << 8) + int(e[1] )
if (
(c >= 0xc_2a1 and c <= 0xc_2bf)
or (c >= 0xc_780 and c <= 0xc_783)
or (c >= 0xc_ab9 and c <= 0xc_bbf)
or (c >= 0xc_c80 and c <= 0xc_da2)
):
return True
return False
def checkuae(lowercase__ : Optional[Any] ):
__lowercase = x.encode()
if len(lowercase__ ) == 1 and len(lowercase__ ) == 3:
__lowercase = (int(e[0] ) << 1_6) + (int(e[1] ) << 8) + int(e[2] )
if c >= 0xe28_080 and c <= 0xe2b_07f:
return True
return False
__lowercase = 0
__lowercase = []
while pos < len(lowercase__ ):
__lowercase = min(len(lowercase__ ) ,pos + self.maxlen + 1 ) if text[pos] == '<' else pos + 3
__lowercase = [] # (token_id, token, pos)
for e in range(lowercase__ ,lowercase__ ,-1 ):
__lowercase = text[pos:e]
if wd in self.vocab:
if wd[0] == "<" and len(lowercase__ ) > 2:
__lowercase = [(self.vocab[wd], wd, e)]
break
else:
candidates.append((self.vocab[wd], wd, e) )
if len(lowercase__ ) > 0:
# the smallest token_id is adopted
__lowercase = sorted(lowercase__ ,key=lambda lowercase__ : x[0] )[0]
result.append(lowercase__ )
__lowercase = e
else:
__lowercase = pos + 1
__lowercase = text[pos:end]
if check_simbol(lowercase__ ):
result.append('''<KIGOU>''' )
elif checkuae(lowercase__ ):
result.append('''<U2000U2BFF>''' )
else:
for i in wd.encode('''utf-8''' ):
result.append('''<|byte%d|>''' % i )
__lowercase = end
return result
def SCREAMING_SNAKE_CASE ( self : List[str] ,lowercase__ : Dict ,lowercase__ : Optional[Any]="\n" ):
__lowercase = []
__lowercase = []
__lowercase = self.ids_to_tokens[index][0]
if word[:6] == "<|byte" and word[-2:] == "|>":
byte_tokens.append(int(word[6:-2] ) )
else:
if len(lowercase__ ) > 0:
words.append(bytearray(lowercase__ ).decode('''utf-8''' ,errors='''replace''' ) )
__lowercase = []
if word[:7] == "<|emoji" and word[-2:] == "|>":
words.append(self.emoji['''emoji_inv'''][word] )
elif word == "<SP>":
words.append(''' ''' )
elif word == "<BR>":
words.append(lowercase__ )
elif word == "<TAB>":
words.append('''\t''' )
elif word == "<BLOCK>":
words.append('''▀''' )
elif word == "<KIGOU>":
words.append('''ǀ''' )
elif word == "<U2000U2BFF>":
words.append('''‖''' )
else:
words.append(lowercase__ )
if len(lowercase__ ) > 0:
words.append(bytearray(lowercase__ ).decode('''utf-8''' ,errors='''replace''' ) )
__lowercase = ''.join(lowercase__ )
return text
| 104 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionAttendAndExcitePipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_numpy, skip_mps, slow
from diffusers.utils.testing_utils import require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
lowerCamelCase__ = False
@skip_mps
class A__ ( __magic_name__ , __magic_name__ , __magic_name__ , unittest.TestCase ):
lowercase = StableDiffusionAttendAndExcitePipeline
lowercase = False
lowercase = TEXT_TO_IMAGE_PARAMS
lowercase = TEXT_TO_IMAGE_BATCH_PARAMS.union({'token_indices'} )
lowercase = TEXT_TO_IMAGE_IMAGE_PARAMS
lowercase = TEXT_TO_IMAGE_IMAGE_PARAMS
@classmethod
def _lowerCamelCase ( cls : Tuple ):
'''simple docstring'''
super().setUpClass()
torch.use_deterministic_algorithms(a )
@classmethod
def _lowerCamelCase ( cls : Any ):
'''simple docstring'''
super().tearDownClass()
torch.use_deterministic_algorithms(a )
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase__ : Any = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=a , )
lowerCAmelCase__ : Any = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='scaled_linear' , clip_sample=a , set_alpha_to_one=a , )
torch.manual_seed(0 )
lowerCAmelCase__ : Union[str, Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
lowerCAmelCase__ : Tuple = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act='gelu' , projection_dim=512 , )
lowerCAmelCase__ : str = CLIPTextModel(a )
lowerCAmelCase__ : Optional[Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
lowerCAmelCase__ : Union[str, Any] = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def _lowerCamelCase ( self : Union[str, Any] , a : Tuple , a : Union[str, Any]=0 ):
'''simple docstring'''
if str(a ).startswith('mps' ):
lowerCAmelCase__ : List[str] = torch.manual_seed(a )
else:
lowerCAmelCase__ : Any = torch.Generator(device=a ).manual_seed(a )
lowerCAmelCase__ : Optional[int] = {
'prompt': 'a cat and a frog',
'token_indices': [2, 5],
'generator': generator,
'num_inference_steps': 1,
'guidance_scale': 6.0,
'output_type': 'numpy',
'max_iter_to_alter': 2,
'thresholds': {0: 0.7},
}
return inputs
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = 'cpu'
lowerCAmelCase__ : Optional[int] = self.get_dummy_components()
lowerCAmelCase__ : Dict = self.pipeline_class(**a )
pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowerCAmelCase__ : Union[str, Any] = self.get_dummy_inputs(a )
lowerCAmelCase__ : Union[str, Any] = pipe(**a ).images
lowerCAmelCase__ : Optional[int] = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 64, 64, 3) )
lowerCAmelCase__ : Dict = np.array(
[0.6_3_9_0_5_3_6_4, 0.6_2_8_9_7_3_0_7, 0.4_8_5_9_9_0_1_7, 0.5_1_3_3_6_2_4, 0.5_5_5_0_0_4_8, 0.4_5_7_6_9_5_1_6, 0.5_0_3_2_6_9_7_3, 0.5_0_2_3_1_3_9, 0.4_5_3_8_4_4_9_6] )
lowerCAmelCase__ : Optional[int] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(a , 1E-3 )
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
super().test_cpu_offload_forward_pass(expected_max_diff=5E-4 )
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
self._test_inference_batch_single_identical(batch_size=2 , expected_max_diff=7E-4 )
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=5E-4 )
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
super().test_save_load_local(expected_max_difference=5E-4 )
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
super().test_save_load_optional_components(expected_max_difference=4E-4 )
@require_torch_gpu
@slow
class A__ ( unittest.TestCase ):
@classmethod
def _lowerCamelCase ( cls : List[str] ):
'''simple docstring'''
super().setUpClass()
torch.use_deterministic_algorithms(a )
@classmethod
def _lowerCamelCase ( cls : List[str] ):
'''simple docstring'''
super().tearDownClass()
torch.use_deterministic_algorithms(a )
def _lowerCamelCase ( self : int ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = torch.manual_seed(51 )
lowerCAmelCase__ : Any = StableDiffusionAttendAndExcitePipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , safety_checker=a , torch_dtype=torch.floataa )
pipe.to('cuda' )
lowerCAmelCase__ : Optional[int] = 'a painting of an elephant with glasses'
lowerCAmelCase__ : Any = [5, 7]
lowerCAmelCase__ : Optional[Any] = pipe(
prompt=a , token_indices=a , guidance_scale=7.5 , generator=a , num_inference_steps=5 , max_iter_to_alter=5 , output_type='numpy' , ).images[0]
lowerCAmelCase__ : str = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/attend-and-excite/elephant_glasses.npy' )
assert np.abs((expected_image - image).max() ) < 5E-1 | 212 | 0 |
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
UpperCamelCase = logging.get_logger(__name__)
class lowerCAmelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
UpperCamelCase_ : Dict = """upernet"""
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Tuple=None , SCREAMING_SNAKE_CASE_ : List[str]=5_12 , SCREAMING_SNAKE_CASE_ : Optional[int]=0.02 , SCREAMING_SNAKE_CASE_ : Dict=[1, 2, 3, 6] , SCREAMING_SNAKE_CASE_ : Optional[Any]=True , SCREAMING_SNAKE_CASE_ : int=0.4 , SCREAMING_SNAKE_CASE_ : str=3_84 , SCREAMING_SNAKE_CASE_ : Dict=2_56 , SCREAMING_SNAKE_CASE_ : Optional[Any]=1 , SCREAMING_SNAKE_CASE_ : List[Any]=False , SCREAMING_SNAKE_CASE_ : Union[str, Any]=2_55 , **SCREAMING_SNAKE_CASE_ : List[Any] , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE_ )
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
A: Dict = CONFIG_MAPPING['''resnet'''](out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] )
elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
A: int = backbone_config.get('''model_type''' )
A: List[str] = CONFIG_MAPPING[backbone_model_type]
A: Optional[Any] = config_class.from_dict(SCREAMING_SNAKE_CASE_ )
A: Optional[int] = backbone_config
A: Any = hidden_size
A: Any = initializer_range
A: Union[str, Any] = pool_scales
A: Optional[Any] = use_auxiliary_head
A: List[str] = auxiliary_loss_weight
A: Optional[int] = auxiliary_in_channels
A: Union[str, Any] = auxiliary_channels
A: Tuple = auxiliary_num_convs
A: int = auxiliary_concat_input
A: Union[str, Any] = loss_ignore_index
def _snake_case ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
A: List[str] = copy.deepcopy(self.__dict__ )
A: Union[str, Any] = self.backbone_config.to_dict()
A: Optional[Any] = self.__class__.model_type
return output
| 334 |
'''simple docstring'''
from collections import deque
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ) -> None:
'''simple docstring'''
A: Union[str, Any] = process_name # process name
A: List[str] = arrival_time # arrival time of the process
# completion time of finished process or last interrupted time
A: Dict = arrival_time
A: Optional[Any] = burst_time # remaining burst time
A: Any = 0 # total time of the process wait in ready queue
A: Any = 0 # time from arrival time to completion time
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : list[int] , SCREAMING_SNAKE_CASE_ : deque[Process] , SCREAMING_SNAKE_CASE_ : int , ) -> None:
'''simple docstring'''
A: Dict = number_of_queues
# time slice of queues that round robin algorithm applied
A: int = time_slices
# unfinished process is in this ready_queue
A: Tuple = queue
# current time
A: int = current_time
# finished process is in this sequence queue
A: deque[Process] = deque()
def _snake_case ( self : List[Any] ) -> list[str]:
'''simple docstring'''
A: str = []
for i in range(len(self.finish_queue ) ):
sequence.append(self.finish_queue[i].process_name )
return sequence
def _snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE_ : list[Process] ) -> list[int]:
'''simple docstring'''
A: Optional[int] = []
for i in range(len(SCREAMING_SNAKE_CASE_ ) ):
waiting_times.append(queue[i].waiting_time )
return waiting_times
def _snake_case ( self : Any , SCREAMING_SNAKE_CASE_ : list[Process] ) -> list[int]:
'''simple docstring'''
A: Any = []
for i in range(len(SCREAMING_SNAKE_CASE_ ) ):
turnaround_times.append(queue[i].turnaround_time )
return turnaround_times
def _snake_case ( self : str , SCREAMING_SNAKE_CASE_ : list[Process] ) -> list[int]:
'''simple docstring'''
A: List[Any] = []
for i in range(len(SCREAMING_SNAKE_CASE_ ) ):
completion_times.append(queue[i].stop_time )
return completion_times
def _snake_case ( self : Tuple , SCREAMING_SNAKE_CASE_ : deque[Process] ) -> list[int]:
'''simple docstring'''
return [q.burst_time for q in queue]
def _snake_case ( self : int , SCREAMING_SNAKE_CASE_ : Process ) -> int:
'''simple docstring'''
process.waiting_time += self.current_time - process.stop_time
return process.waiting_time
def _snake_case ( self : List[str] , SCREAMING_SNAKE_CASE_ : deque[Process] ) -> deque[Process]:
'''simple docstring'''
A: deque[Process] = deque() # sequence deque of finished process
while len(SCREAMING_SNAKE_CASE_ ) != 0:
A: Optional[Any] = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of current process
self.update_waiting_time(SCREAMING_SNAKE_CASE_ )
# update current time
self.current_time += cp.burst_time
# finish the process and set the process's burst-time 0
A: Any = 0
# set the process's turnaround time because it is finished
A: int = self.current_time - cp.arrival_time
# set the completion time
A: List[str] = self.current_time
# add the process to queue that has finished queue
finished.append(SCREAMING_SNAKE_CASE_ )
self.finish_queue.extend(SCREAMING_SNAKE_CASE_ ) # add finished process to finish queue
# FCFS will finish all remaining processes
return finished
def _snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE_ : deque[Process] , SCREAMING_SNAKE_CASE_ : int ) -> tuple[deque[Process], deque[Process]]:
'''simple docstring'''
A: deque[Process] = deque() # sequence deque of terminated process
# just for 1 cycle and unfinished processes will go back to queue
for _ in range(len(SCREAMING_SNAKE_CASE_ ) ):
A: Dict = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of unfinished processes
self.update_waiting_time(SCREAMING_SNAKE_CASE_ )
# if the burst time of process is bigger than time-slice
if cp.burst_time > time_slice:
# use CPU for only time-slice
self.current_time += time_slice
# update remaining burst time
cp.burst_time -= time_slice
# update end point time
A: Optional[Any] = self.current_time
# locate the process behind the queue because it is not finished
ready_queue.append(SCREAMING_SNAKE_CASE_ )
else:
# use CPU for remaining burst time
self.current_time += cp.burst_time
# set burst time 0 because the process is finished
A: int = 0
# set the finish time
A: Union[str, Any] = self.current_time
# update the process' turnaround time because it is finished
A: Tuple = self.current_time - cp.arrival_time
# add the process to queue that has finished queue
finished.append(SCREAMING_SNAKE_CASE_ )
self.finish_queue.extend(SCREAMING_SNAKE_CASE_ ) # add finished process to finish queue
# return finished processes queue and remaining processes queue
return finished, ready_queue
def _snake_case ( self : Optional[Any] ) -> deque[Process]:
'''simple docstring'''
for i in range(self.number_of_queues - 1 ):
A , A: Optional[Any] = self.round_robin(
self.ready_queue , self.time_slices[i] )
# the last queue has first_come_first_served algorithm
self.first_come_first_served(self.ready_queue )
return self.finish_queue
if __name__ == "__main__":
import doctest
UpperCamelCase = Process('''P1''', 0, 53)
UpperCamelCase = Process('''P2''', 0, 17)
UpperCamelCase = Process('''P3''', 0, 68)
UpperCamelCase = Process('''P4''', 0, 24)
UpperCamelCase = 3
UpperCamelCase = [17, 25]
UpperCamelCase = deque([Pa, Pa, Pa, Pa])
if len(time_slices) != number_of_queues - 1:
raise SystemExit(0)
doctest.testmod(extraglobs={'''queue''': deque([Pa, Pa, Pa, Pa])})
UpperCamelCase = Process('''P1''', 0, 53)
UpperCamelCase = Process('''P2''', 0, 17)
UpperCamelCase = Process('''P3''', 0, 68)
UpperCamelCase = Process('''P4''', 0, 24)
UpperCamelCase = 3
UpperCamelCase = [17, 25]
UpperCamelCase = deque([Pa, Pa, Pa, Pa])
UpperCamelCase = MLFQ(number_of_queues, time_slices, queue, 0)
UpperCamelCase = mlfq.multi_level_feedback_queue()
# print total waiting times of processes(P1, P2, P3, P4)
print(
f'waiting time:\
\t\t\t{MLFQ.calculate_waiting_time(mlfq, [Pa, Pa, Pa, Pa])}'
)
# print completion times of processes(P1, P2, P3, P4)
print(
f'completion time:\
\t\t{MLFQ.calculate_completion_time(mlfq, [Pa, Pa, Pa, Pa])}'
)
# print total turnaround times of processes(P1, P2, P3, P4)
print(
f'turnaround time:\
\t\t{MLFQ.calculate_turnaround_time(mlfq, [Pa, Pa, Pa, Pa])}'
)
# print sequence of finished processes
print(
f'sequence of finished processes:\
{mlfq.calculate_sequence_of_finish_queue()}'
)
| 334 | 1 |
from math import sqrt
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(sqrt(lowerCamelCase__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ = 1_0_0_0_1 ) -> int:
__lowerCamelCase : List[Any] = 0
__lowerCamelCase : str = 1
while count != nth and number < 3:
number += 1
if is_prime(lowerCamelCase__ ):
count += 1
while count != nth:
number += 2
if is_prime(lowerCamelCase__ ):
count += 1
return number
if __name__ == "__main__":
print(F"""{solution() = }""")
| 73 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
UpperCAmelCase_ : Tuple = {
'Acehnese Arabic': 'ace_Arab',
'Acehnese Latin': 'ace_Latn',
'Mesopotamian Arabic': 'acm_Arab',
'Ta\'izzi-Adeni Arabic': 'acq_Arab',
'Tunisian Arabic': 'aeb_Arab',
'Afrikaans': 'afr_Latn',
'South Levantine Arabic': 'ajp_Arab',
'Akan': 'aka_Latn',
'Amharic': 'amh_Ethi',
'North Levantine Arabic': 'apc_Arab',
'Modern Standard Arabic': 'arb_Arab',
'Modern Standard Arabic Romanized': 'arb_Latn',
'Najdi Arabic': 'ars_Arab',
'Moroccan Arabic': 'ary_Arab',
'Egyptian Arabic': 'arz_Arab',
'Assamese': 'asm_Beng',
'Asturian': 'ast_Latn',
'Awadhi': 'awa_Deva',
'Central Aymara': 'ayr_Latn',
'South Azerbaijani': 'azb_Arab',
'North Azerbaijani': 'azj_Latn',
'Bashkir': 'bak_Cyrl',
'Bambara': 'bam_Latn',
'Balinese': 'ban_Latn',
'Belarusian': 'bel_Cyrl',
'Bemba': 'bem_Latn',
'Bengali': 'ben_Beng',
'Bhojpuri': 'bho_Deva',
'Banjar Arabic': 'bjn_Arab',
'Banjar Latin': 'bjn_Latn',
'Standard Tibetan': 'bod_Tibt',
'Bosnian': 'bos_Latn',
'Buginese': 'bug_Latn',
'Bulgarian': 'bul_Cyrl',
'Catalan': 'cat_Latn',
'Cebuano': 'ceb_Latn',
'Czech': 'ces_Latn',
'Chokwe': 'cjk_Latn',
'Central Kurdish': 'ckb_Arab',
'Crimean Tatar': 'crh_Latn',
'Welsh': 'cym_Latn',
'Danish': 'dan_Latn',
'German': 'deu_Latn',
'Southwestern Dinka': 'dik_Latn',
'Dyula': 'dyu_Latn',
'Dzongkha': 'dzo_Tibt',
'Greek': 'ell_Grek',
'English': 'eng_Latn',
'Esperanto': 'epo_Latn',
'Estonian': 'est_Latn',
'Basque': 'eus_Latn',
'Ewe': 'ewe_Latn',
'Faroese': 'fao_Latn',
'Fijian': 'fij_Latn',
'Finnish': 'fin_Latn',
'Fon': 'fon_Latn',
'French': 'fra_Latn',
'Friulian': 'fur_Latn',
'Nigerian Fulfulde': 'fuv_Latn',
'Scottish Gaelic': 'gla_Latn',
'Irish': 'gle_Latn',
'Galician': 'glg_Latn',
'Guarani': 'grn_Latn',
'Gujarati': 'guj_Gujr',
'Haitian Creole': 'hat_Latn',
'Hausa': 'hau_Latn',
'Hebrew': 'heb_Hebr',
'Hindi': 'hin_Deva',
'Chhattisgarhi': 'hne_Deva',
'Croatian': 'hrv_Latn',
'Hungarian': 'hun_Latn',
'Armenian': 'hye_Armn',
'Igbo': 'ibo_Latn',
'Ilocano': 'ilo_Latn',
'Indonesian': 'ind_Latn',
'Icelandic': 'isl_Latn',
'Italian': 'ita_Latn',
'Javanese': 'jav_Latn',
'Japanese': 'jpn_Jpan',
'Kabyle': 'kab_Latn',
'Jingpho': 'kac_Latn',
'Kamba': 'kam_Latn',
'Kannada': 'kan_Knda',
'Kashmiri Arabic': 'kas_Arab',
'Kashmiri Devanagari': 'kas_Deva',
'Georgian': 'kat_Geor',
'Central Kanuri Arabic': 'knc_Arab',
'Central Kanuri Latin': 'knc_Latn',
'Kazakh': 'kaz_Cyrl',
'Kabiyè': 'kbp_Latn',
'Kabuverdianu': 'kea_Latn',
'Khmer': 'khm_Khmr',
'Kikuyu': 'kik_Latn',
'Kinyarwanda': 'kin_Latn',
'Kyrgyz': 'kir_Cyrl',
'Kimbundu': 'kmb_Latn',
'Northern Kurdish': 'kmr_Latn',
'Kikongo': 'kon_Latn',
'Korean': 'kor_Hang',
'Lao': 'lao_Laoo',
'Ligurian': 'lij_Latn',
'Limburgish': 'lim_Latn',
'Lingala': 'lin_Latn',
'Lithuanian': 'lit_Latn',
'Lombard': 'lmo_Latn',
'Latgalian': 'ltg_Latn',
'Luxembourgish': 'ltz_Latn',
'Luba-Kasai': 'lua_Latn',
'Ganda': 'lug_Latn',
'Luo': 'luo_Latn',
'Mizo': 'lus_Latn',
'Standard Latvian': 'lvs_Latn',
'Magahi': 'mag_Deva',
'Maithili': 'mai_Deva',
'Malayalam': 'mal_Mlym',
'Marathi': 'mar_Deva',
'Minangkabau Arabic ': 'min_Arab',
'Minangkabau Latin': 'min_Latn',
'Macedonian': 'mkd_Cyrl',
'Plateau Malagasy': 'plt_Latn',
'Maltese': 'mlt_Latn',
'Meitei Bengali': 'mni_Beng',
'Halh Mongolian': 'khk_Cyrl',
'Mossi': 'mos_Latn',
'Maori': 'mri_Latn',
'Burmese': 'mya_Mymr',
'Dutch': 'nld_Latn',
'Norwegian Nynorsk': 'nno_Latn',
'Norwegian Bokmål': 'nob_Latn',
'Nepali': 'npi_Deva',
'Northern Sotho': 'nso_Latn',
'Nuer': 'nus_Latn',
'Nyanja': 'nya_Latn',
'Occitan': 'oci_Latn',
'West Central Oromo': 'gaz_Latn',
'Odia': 'ory_Orya',
'Pangasinan': 'pag_Latn',
'Eastern Panjabi': 'pan_Guru',
'Papiamento': 'pap_Latn',
'Western Persian': 'pes_Arab',
'Polish': 'pol_Latn',
'Portuguese': 'por_Latn',
'Dari': 'prs_Arab',
'Southern Pashto': 'pbt_Arab',
'Ayacucho Quechua': 'quy_Latn',
'Romanian': 'ron_Latn',
'Rundi': 'run_Latn',
'Russian': 'rus_Cyrl',
'Sango': 'sag_Latn',
'Sanskrit': 'san_Deva',
'Santali': 'sat_Olck',
'Sicilian': 'scn_Latn',
'Shan': 'shn_Mymr',
'Sinhala': 'sin_Sinh',
'Slovak': 'slk_Latn',
'Slovenian': 'slv_Latn',
'Samoan': 'smo_Latn',
'Shona': 'sna_Latn',
'Sindhi': 'snd_Arab',
'Somali': 'som_Latn',
'Southern Sotho': 'sot_Latn',
'Spanish': 'spa_Latn',
'Tosk Albanian': 'als_Latn',
'Sardinian': 'srd_Latn',
'Serbian': 'srp_Cyrl',
'Swati': 'ssw_Latn',
'Sundanese': 'sun_Latn',
'Swedish': 'swe_Latn',
'Swahili': 'swh_Latn',
'Silesian': 'szl_Latn',
'Tamil': 'tam_Taml',
'Tatar': 'tat_Cyrl',
'Telugu': 'tel_Telu',
'Tajik': 'tgk_Cyrl',
'Tagalog': 'tgl_Latn',
'Thai': 'tha_Thai',
'Tigrinya': 'tir_Ethi',
'Tamasheq Latin': 'taq_Latn',
'Tamasheq Tifinagh': 'taq_Tfng',
'Tok Pisin': 'tpi_Latn',
'Tswana': 'tsn_Latn',
'Tsonga': 'tso_Latn',
'Turkmen': 'tuk_Latn',
'Tumbuka': 'tum_Latn',
'Turkish': 'tur_Latn',
'Twi': 'twi_Latn',
'Central Atlas Tamazight': 'tzm_Tfng',
'Uyghur': 'uig_Arab',
'Ukrainian': 'ukr_Cyrl',
'Umbundu': 'umb_Latn',
'Urdu': 'urd_Arab',
'Northern Uzbek': 'uzn_Latn',
'Venetian': 'vec_Latn',
'Vietnamese': 'vie_Latn',
'Waray': 'war_Latn',
'Wolof': 'wol_Latn',
'Xhosa': 'xho_Latn',
'Eastern Yiddish': 'ydd_Hebr',
'Yoruba': 'yor_Latn',
'Yue Chinese': 'yue_Hant',
'Chinese Simplified': 'zho_Hans',
'Chinese Traditional': 'zho_Hant',
'Standard Malay': 'zsm_Latn',
'Zulu': 'zul_Latn',
}
class SCREAMING_SNAKE_CASE__ ( lowercase__ ):
snake_case__ : str = '''facebook/nllb-200-distilled-600M'''
snake_case__ : Union[str, Any] = (
'''This is a tool that translates text from a language to another. It takes three inputs: `text`, which should '''
'''be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, '''
'''which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in '''
'''plain English, such as \'Romanian\', or \'Albanian\'. It returns the text translated in `tgt_lang`.'''
)
snake_case__ : Optional[Any] = '''translator'''
snake_case__ : Tuple = AutoTokenizer
snake_case__ : Union[str, Any] = AutoModelForSeqaSeqLM
snake_case__ : Dict = LANGUAGE_CODES
snake_case__ : str = ['''text''', '''text''', '''text''']
snake_case__ : Tuple = ['''text''']
def SCREAMING_SNAKE_CASE ( self : Dict , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Tuple:
if src_lang not in self.lang_to_code:
raise ValueError(F"""{src_lang} is not a supported language.""" )
if tgt_lang not in self.lang_to_code:
raise ValueError(F"""{tgt_lang} is not a supported language.""" )
a_ : str = self.lang_to_code[src_lang]
a_ : Any = self.lang_to_code[tgt_lang]
return self.pre_processor._build_translation_inputs(
SCREAMING_SNAKE_CASE__ , return_tensors='pt' , src_lang=SCREAMING_SNAKE_CASE__ , tgt_lang=SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : int , SCREAMING_SNAKE_CASE__ : Tuple ) -> Any:
return self.model.generate(**SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : Tuple , SCREAMING_SNAKE_CASE__ : List[str] ) -> Dict:
return self.post_processor.decode(outputs[0].tolist() , skip_special_tokens=SCREAMING_SNAKE_CASE__ )
| 32 | 0 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roberta import RobertaTokenizer
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
lowerCAmelCase = {
'vocab_file': {
'roberta-base': 'https://huggingface.co/roberta-base/resolve/main/vocab.json',
'roberta-large': 'https://huggingface.co/roberta-large/resolve/main/vocab.json',
'roberta-large-mnli': 'https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json',
'distilroberta-base': 'https://huggingface.co/distilroberta-base/resolve/main/vocab.json',
'roberta-base-openai-detector': 'https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json',
'roberta-large-openai-detector': (
'https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json'
),
},
'merges_file': {
'roberta-base': 'https://huggingface.co/roberta-base/resolve/main/merges.txt',
'roberta-large': 'https://huggingface.co/roberta-large/resolve/main/merges.txt',
'roberta-large-mnli': 'https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt',
'distilroberta-base': 'https://huggingface.co/distilroberta-base/resolve/main/merges.txt',
'roberta-base-openai-detector': 'https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt',
'roberta-large-openai-detector': (
'https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt'
),
},
'tokenizer_file': {
'roberta-base': 'https://huggingface.co/roberta-base/resolve/main/tokenizer.json',
'roberta-large': 'https://huggingface.co/roberta-large/resolve/main/tokenizer.json',
'roberta-large-mnli': 'https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json',
'distilroberta-base': 'https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json',
'roberta-base-openai-detector': (
'https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json'
),
'roberta-large-openai-detector': (
'https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json'
),
},
}
lowerCAmelCase = {
'roberta-base': 512,
'roberta-large': 512,
'roberta-large-mnli': 512,
'distilroberta-base': 512,
'roberta-base-openai-detector': 512,
'roberta-large-openai-detector': 512,
}
class _a ( UpperCamelCase__ ):
_lowercase : Union[str, Any] = VOCAB_FILES_NAMES
_lowercase : Any = PRETRAINED_VOCAB_FILES_MAP
_lowercase : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowercase : Optional[int] = ['''input_ids''', '''attention_mask''']
_lowercase : Dict = RobertaTokenizer
def __init__( self: int , UpperCamelCase_: Any=None , UpperCamelCase_: Any=None , UpperCamelCase_: List[str]=None , UpperCamelCase_: Optional[int]="replace" , UpperCamelCase_: Any="<s>" , UpperCamelCase_: List[Any]="</s>" , UpperCamelCase_: Optional[Any]="</s>" , UpperCamelCase_: Dict="<s>" , UpperCamelCase_: List[Any]="<unk>" , UpperCamelCase_: Tuple="<pad>" , UpperCamelCase_: Any="<mask>" , UpperCamelCase_: Optional[Any]=False , UpperCamelCase_: int=True , **UpperCamelCase_: Optional[Any] , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(
UpperCamelCase_ , UpperCamelCase_ , tokenizer_file=UpperCamelCase_ , errors=UpperCamelCase_ , bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , add_prefix_space=UpperCamelCase_ , trim_offsets=UpperCamelCase_ , **UpperCamelCase_ , )
lowercase__ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , UpperCamelCase_ ) != add_prefix_space:
lowercase__ = getattr(UpperCamelCase_ , pre_tok_state.pop('''type''' ) )
lowercase__ = add_prefix_space
lowercase__ = pre_tok_class(**UpperCamelCase_ )
lowercase__ = add_prefix_space
lowercase__ = '''post_processor'''
lowercase__ = getattr(self.backend_tokenizer , UpperCamelCase_ , UpperCamelCase_ )
if tokenizer_component_instance:
lowercase__ = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
lowercase__ = tuple(state['''sep'''] )
if "cls" in state:
lowercase__ = tuple(state['''cls'''] )
lowercase__ = False
if state.get('''add_prefix_space''' , UpperCamelCase_ ) != add_prefix_space:
lowercase__ = add_prefix_space
lowercase__ = True
if state.get('''trim_offsets''' , UpperCamelCase_ ) != trim_offsets:
lowercase__ = trim_offsets
lowercase__ = True
if changes_to_apply:
lowercase__ = getattr(UpperCamelCase_ , state.pop('''type''' ) )
lowercase__ = component_class(**UpperCamelCase_ )
setattr(self.backend_tokenizer , UpperCamelCase_ , UpperCamelCase_ )
@property
def lowerCamelCase_ ( self: List[str] ) -> str:
"""simple docstring"""
if self._mask_token is None:
if self.verbose:
logger.error('''Using mask_token, but it is not set yet.''' )
return None
return str(self._mask_token )
@mask_token.setter
def lowerCamelCase_ ( self: Dict , UpperCamelCase_: Tuple ) -> Tuple:
"""simple docstring"""
lowercase__ = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else value
lowercase__ = value
def lowerCamelCase_ ( self: str , *UpperCamelCase_: Any , **UpperCamelCase_: List[Any] ) -> BatchEncoding:
"""simple docstring"""
lowercase__ = kwargs.get('''is_split_into_words''' , UpperCamelCase_ )
assert self.add_prefix_space or not is_split_into_words, (
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*UpperCamelCase_ , **UpperCamelCase_ )
def lowerCamelCase_ ( self: Any , *UpperCamelCase_: List[Any] , **UpperCamelCase_: int ) -> BatchEncoding:
"""simple docstring"""
lowercase__ = kwargs.get('''is_split_into_words''' , UpperCamelCase_ )
assert self.add_prefix_space or not is_split_into_words, (
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._encode_plus(*UpperCamelCase_ , **UpperCamelCase_ )
def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase_: str , UpperCamelCase_: Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
lowercase__ = self._tokenizer.model.save(UpperCamelCase_ , name=UpperCamelCase_ )
return tuple(UpperCamelCase_ )
def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase_: Optional[Any] , UpperCamelCase_: str=None ) -> List[str]:
"""simple docstring"""
lowercase__ = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def lowerCamelCase_ ( self: Optional[Any] , UpperCamelCase_: List[int] , UpperCamelCase_: Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
lowercase__ = [self.sep_token_id]
lowercase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 358 |
import math_equivalence # From: git+https://github.com/hendrycks/math.git
import datasets
lowerCAmelCase = '\\n@article{hendrycksmath2021,\n title={Measuring Mathematical Problem Solving With the MATH Dataset},\n author={Dan Hendrycks\n and Collin Burns\n and Saurav Kadavath\n and Akul Arora\n and Steven Basart\n and Eric Tang\n and Dawn Song\n and Jacob Steinhardt},\n journal={arXiv preprint arXiv:2103.03874},\n year={2021}\n}\n'
lowerCAmelCase = '\\nThis metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset.\nIt first canonicalizes the inputs (e.g., converting "1/2" to "\\frac{1}{2}") and then computes accuracy.\n'
lowerCAmelCase = R'\nCalculates accuracy after canonicalizing inputs.\n\nArgs:\n predictions: list of predictions to score. Each prediction\n is a string that contains natural language and LaTex.\n references: list of reference for each prediction. Each\n reference is a string that contains natural language\n and LaTex.\nReturns:\n accuracy: accuracy after canonicalizing inputs\n (e.g., converting "1/2" to "\\frac{1}{2}")\n\nExamples:\n >>> metric = datasets.load_metric("competition_math")\n >>> results = metric.compute(references=["\\frac{1}{2}"], predictions=["1/2"])\n >>> print(results)\n {\'accuracy\': 1.0}\n'
@datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _a ( datasets.Metric ):
def lowerCamelCase_ ( self: List[str] ) -> Tuple:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' ),
'''references''': datasets.Value('''string''' ),
} ) , homepage='''https://github.com/hendrycks/math''' , codebase_urls=['''https://github.com/hendrycks/math'''] , )
def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase_: Any , UpperCamelCase_: int ) -> List[str]:
"""simple docstring"""
lowercase__ = 0.0
for i, j in zip(UpperCamelCase_ , UpperCamelCase_ ):
n_correct += 1.0 if math_equivalence.is_equiv(UpperCamelCase_ , UpperCamelCase_ ) else 0.0
lowercase__ = n_correct / len(UpperCamelCase_ )
return {
"accuracy": accuracy,
}
| 93 | 0 |
import json
import logging
import math
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from datasets import Dataset, load_dataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_FOR_MASKED_LM_MAPPING,
AutoConfig,
AutoModelForMaskedLM,
AutoTokenizer,
DataCollatorForWholeWordMask,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
__snake_case :List[Any] = logging.getLogger(__name__)
__snake_case :int = list(MODEL_FOR_MASKED_LM_MAPPING.keys())
__snake_case :Optional[Any] = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class _A :
UpperCamelCase__ : Optional[str] = field(
default=__UpperCAmelCase ,metadata={
'''help''': (
'''The model checkpoint for weights initialization.Don\'t set if you want to train a model from scratch.'''
)
} ,)
UpperCamelCase__ : Optional[str] = field(
default=__UpperCAmelCase ,metadata={'''help''': '''If training from scratch, pass a model type from the list: ''' + ''', '''.join(__UpperCAmelCase )} ,)
UpperCamelCase__ : Optional[str] = field(
default=__UpperCAmelCase ,metadata={
'''help''': (
'''Override some existing default config settings when a model is trained from scratch. Example: '''
'''n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index'''
)
} ,)
UpperCamelCase__ : Optional[str] = field(
default=__UpperCAmelCase ,metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
UpperCamelCase__ : Optional[str] = field(
default=__UpperCAmelCase ,metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
UpperCamelCase__ : Optional[str] = field(
default=__UpperCAmelCase ,metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} ,)
UpperCamelCase__ : bool = field(
default=__UpperCAmelCase ,metadata={'''help''': '''Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'''} ,)
UpperCamelCase__ : str = field(
default='''main''' ,metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} ,)
UpperCamelCase__ : bool = field(
default=__UpperCAmelCase ,metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} ,)
def _lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
if self.config_overrides is not None and (self.config_name is not None or self.model_name_or_path is not None):
raise ValueError(
'''--config_overrides can\'t be used in combination with --config_name or --model_name_or_path''')
@dataclass
class _A :
UpperCamelCase__ : Optional[str] = field(
default=__UpperCAmelCase ,metadata={'''help''': '''The name of the dataset to use (via the datasets library).'''} )
UpperCamelCase__ : Optional[str] = field(
default=__UpperCAmelCase ,metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} )
UpperCamelCase__ : Optional[str] = field(default=__UpperCAmelCase ,metadata={'''help''': '''The input training data file (a text file).'''} )
UpperCamelCase__ : Optional[str] = field(
default=__UpperCAmelCase ,metadata={'''help''': '''An optional input evaluation data file to evaluate the perplexity on (a text file).'''} ,)
UpperCamelCase__ : Optional[str] = field(
default=__UpperCAmelCase ,metadata={'''help''': '''An optional input train ref data file for whole word masking in Chinese.'''} ,)
UpperCamelCase__ : Optional[str] = field(
default=__UpperCAmelCase ,metadata={'''help''': '''An optional input validation ref data file for whole word masking in Chinese.'''} ,)
UpperCamelCase__ : bool = field(
default=__UpperCAmelCase ,metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
UpperCamelCase__ : Optional[int] = field(
default=5 ,metadata={
'''help''': '''The percentage of the train set used as validation set in case there\'s no validation split'''
} ,)
UpperCamelCase__ : Optional[int] = field(
default=__UpperCAmelCase ,metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated. Default to the max input length of the model.'''
)
} ,)
UpperCamelCase__ : Optional[int] = field(
default=__UpperCAmelCase ,metadata={'''help''': '''The number of processes to use for the preprocessing.'''} ,)
UpperCamelCase__ : float = field(
default=0.15 ,metadata={'''help''': '''Ratio of tokens to mask for masked language modeling loss'''} )
UpperCamelCase__ : bool = field(
default=__UpperCAmelCase ,metadata={
'''help''': (
'''Whether to pad all samples to `max_seq_length`. '''
'''If False, will pad the samples dynamically when batching to the maximum length in the batch.'''
)
} ,)
def _lowerCamelCase ( self : int):
'''simple docstring'''
if self.train_file is not None:
__a = self.train_file.split('''.''')[-1]
assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, a json or a txt file."
if self.validation_file is not None:
__a = self.validation_file.split('''.''')[-1]
assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, a json or a txt file."
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
with open(_UpperCAmelCase , '''r''' , encoding='''utf-8''' ) as f:
__a = [json.loads(_UpperCAmelCase ) for line in f.read().splitlines() if (len(_UpperCAmelCase ) > 0 and not line.isspace())]
assert len(_UpperCAmelCase ) == len(_UpperCAmelCase )
__a = {c: dataset[c] for c in dataset.column_names}
__a = refs
return Dataset.from_dict(_UpperCAmelCase )
def __snake_case ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__a = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__a , __a , __a = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__a , __a , __a = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
__a = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__a = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f'Output directory ({training_args.output_dir}) already exists and is not empty. '
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None:
logger.info(
f'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN )
# Log on each process the small summary:
logger.warning(
f'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'
+ f'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('''Training/evaluation parameters %s''' , _UpperCAmelCase )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
__a = load_dataset(data_args.dataset_name , data_args.dataset_config_name )
if "validation" not in datasets.keys():
__a = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=f'train[:{data_args.validation_split_percentage}%]' , )
__a = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=f'train[{data_args.validation_split_percentage}%:]' , )
else:
__a = {}
if data_args.train_file is not None:
__a = data_args.train_file
if data_args.validation_file is not None:
__a = data_args.validation_file
__a = data_args.train_file.split('''.''' )[-1]
if extension == "txt":
__a = '''text'''
__a = load_dataset(_UpperCAmelCase , data_files=_UpperCAmelCase )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__a = {
'''cache_dir''': model_args.cache_dir,
'''revision''': model_args.model_revision,
'''use_auth_token''': True if model_args.use_auth_token else None,
}
if model_args.config_name:
__a = AutoConfig.from_pretrained(model_args.config_name , **_UpperCAmelCase )
elif model_args.model_name_or_path:
__a = AutoConfig.from_pretrained(model_args.model_name_or_path , **_UpperCAmelCase )
else:
__a = CONFIG_MAPPING[model_args.model_type]()
logger.warning('''You are instantiating a new config instance from scratch.''' )
if model_args.config_overrides is not None:
logger.info(f'Overriding config: {model_args.config_overrides}' )
config.update_from_string(model_args.config_overrides )
logger.info(f'New config: {config}' )
__a = {
'''cache_dir''': model_args.cache_dir,
'''use_fast''': model_args.use_fast_tokenizer,
'''revision''': model_args.model_revision,
'''use_auth_token''': True if model_args.use_auth_token else None,
}
if model_args.tokenizer_name:
__a = AutoTokenizer.from_pretrained(model_args.tokenizer_name , **_UpperCAmelCase )
elif model_args.model_name_or_path:
__a = AutoTokenizer.from_pretrained(model_args.model_name_or_path , **_UpperCAmelCase )
else:
raise ValueError(
'''You are instantiating a new tokenizer from scratch. This is not supported by this script.'''
'''You can do it from another script, save it, and load it from here, using --tokenizer_name.''' )
if model_args.model_name_or_path:
__a = AutoModelForMaskedLM.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=_UpperCAmelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info('''Training new model from scratch''' )
__a = AutoModelForMaskedLM.from_config(_UpperCAmelCase )
model.resize_token_embeddings(len(_UpperCAmelCase ) )
# Preprocessing the datasets.
# First we tokenize all the texts.
if training_args.do_train:
__a = datasets['''train'''].column_names
else:
__a = datasets['''validation'''].column_names
__a = '''text''' if '''text''' in column_names else column_names[0]
__a = '''max_length''' if data_args.pad_to_max_length else False
def tokenize_function(_UpperCAmelCase ):
# Remove empty lines
__a = [line for line in examples['''text'''] if len(_UpperCAmelCase ) > 0 and not line.isspace()]
return tokenizer(examples['''text'''] , padding=_UpperCAmelCase , truncation=_UpperCAmelCase , max_length=data_args.max_seq_length )
__a = datasets.map(
_UpperCAmelCase , batched=_UpperCAmelCase , num_proc=data_args.preprocessing_num_workers , remove_columns=[text_column_name] , load_from_cache_file=not data_args.overwrite_cache , )
# Add the chinese references if provided
if data_args.train_ref_file is not None:
__a = add_chinese_references(tokenized_datasets['''train'''] , data_args.train_ref_file )
if data_args.validation_ref_file is not None:
__a = add_chinese_references(
tokenized_datasets['''validation'''] , data_args.validation_ref_file )
# If we have ref files, need to avoid it removed by trainer
__a = data_args.train_ref_file or data_args.validation_ref_file
if has_ref:
__a = False
# Data collator
# This one will take care of randomly masking the tokens.
__a = DataCollatorForWholeWordMask(tokenizer=_UpperCAmelCase , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
__a = Trainer(
model=_UpperCAmelCase , args=_UpperCAmelCase , train_dataset=tokenized_datasets['''train'''] if training_args.do_train else None , eval_dataset=tokenized_datasets['''validation'''] if training_args.do_eval else None , tokenizer=_UpperCAmelCase , data_collator=_UpperCAmelCase , )
# Training
if training_args.do_train:
if last_checkpoint is not None:
__a = last_checkpoint
elif model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path ):
__a = model_args.model_name_or_path
else:
__a = None
__a = trainer.train(resume_from_checkpoint=_UpperCAmelCase )
trainer.save_model() # Saves the tokenizer too for easy upload
__a = os.path.join(training_args.output_dir , '''train_results.txt''' )
if trainer.is_world_process_zero():
with open(_UpperCAmelCase , '''w''' ) as writer:
logger.info('''***** Train results *****''' )
for key, value in sorted(train_result.metrics.items() ):
logger.info(f' {key} = {value}' )
writer.write(f'{key} = {value}\n' )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , '''trainer_state.json''' ) )
# Evaluation
__a = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
__a = trainer.evaluate()
__a = math.exp(eval_output['''eval_loss'''] )
__a = perplexity
__a = os.path.join(training_args.output_dir , '''eval_results_mlm_wwm.txt''' )
if trainer.is_world_process_zero():
with open(_UpperCAmelCase , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key, value in sorted(results.items() ):
logger.info(f' {key} = {value}' )
writer.write(f'{key} = {value}\n' )
return results
def __snake_case ( _UpperCAmelCase ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 49 |
import copy
from dataclasses import dataclass
from pathlib import Path
from typing import Dict, Optional, Union
@dataclass
class _A :
UpperCamelCase__ : Optional[Union[str, Path]] = None
UpperCamelCase__ : bool = False
UpperCamelCase__ : bool = False
UpperCamelCase__ : bool = False
UpperCamelCase__ : Optional[Dict] = None
UpperCamelCase__ : Optional[str] = None
UpperCamelCase__ : bool = False
UpperCamelCase__ : bool = False
UpperCamelCase__ : bool = False
UpperCamelCase__ : bool = True
UpperCamelCase__ : Optional[int] = None
UpperCamelCase__ : int = 1
UpperCamelCase__ : Optional[Union[str, bool]] = None
UpperCamelCase__ : bool = False
UpperCamelCase__ : Optional[Dict] = None
UpperCamelCase__ : Optional[str] = None
def _lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
return self.__class__(**{k: copy.deepcopy(__SCREAMING_SNAKE_CASE) for k, v in self.__dict__.items()})
| 49 | 1 |
'''simple docstring'''
from argparse import ArgumentParser, Namespace
from typing import Any, List, Optional
from ..pipelines import Pipeline, get_supported_tasks, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from fastapi import Body, FastAPI, HTTPException
from fastapi.routing import APIRoute
from pydantic import BaseModel
from starlette.responses import JSONResponse
from uvicorn import run
_lowerCamelCase : Dict = True
except (ImportError, AttributeError):
_lowerCamelCase : Tuple = object
def __a ( *UpperCAmelCase , **UpperCAmelCase ) ->Any:
"""simple docstring"""
pass
_lowerCamelCase : Optional[int] = False
_lowerCamelCase : Union[str, Any] = logging.get_logger('transformers-cli/serving')
def __a ( UpperCAmelCase ) ->Union[str, Any]:
"""simple docstring"""
A = pipeline(
task=args.task , model=args.model if args.model else None , config=args.config , tokenizer=args.tokenizer , device=args.device , )
return ServeCommand(a__ , args.host , args.port , args.workers )
class __UpperCAmelCase ( _a ):
'''simple docstring'''
__lowerCAmelCase = 42
class __UpperCAmelCase ( _a ):
'''simple docstring'''
__lowerCAmelCase = 42
__lowerCAmelCase = 42
class __UpperCAmelCase ( _a ):
'''simple docstring'''
__lowerCAmelCase = 42
class __UpperCAmelCase ( _a ):
'''simple docstring'''
__lowerCAmelCase = 42
class __UpperCAmelCase ( _a ):
'''simple docstring'''
@staticmethod
def A (_lowerCAmelCase : List[str] ):
A = parser.add_parser(
"""serve""" , help="""CLI tool to run inference requests through REST and GraphQL endpoints.""" )
serve_parser.add_argument(
"""--task""" , type=__lowerCAmelCase , choices=get_supported_tasks() , help="""The task to run the pipeline on""" , )
serve_parser.add_argument("""--host""" , type=__lowerCAmelCase , default="""localhost""" , help="""Interface the server will listen on.""" )
serve_parser.add_argument("""--port""" , type=__lowerCAmelCase , default=8888 , help="""Port the serving will listen to.""" )
serve_parser.add_argument("""--workers""" , type=__lowerCAmelCase , default=1 , help="""Number of http workers""" )
serve_parser.add_argument("""--model""" , type=__lowerCAmelCase , help="""Model's name or path to stored model.""" )
serve_parser.add_argument("""--config""" , type=__lowerCAmelCase , help="""Model's config name or path to stored model.""" )
serve_parser.add_argument("""--tokenizer""" , type=__lowerCAmelCase , help="""Tokenizer name to use.""" )
serve_parser.add_argument(
"""--device""" , type=__lowerCAmelCase , default=-1 , help="""Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)""" , )
serve_parser.set_defaults(func=__lowerCAmelCase )
def __init__(self : Optional[Any] , _lowerCAmelCase : Tuple , _lowerCAmelCase : int , _lowerCAmelCase : List[str] , _lowerCAmelCase : Optional[int] ):
A = pipeline
A = host
A = port
A = workers
if not _serve_dependencies_installed:
raise RuntimeError(
"""Using serve command requires FastAPI and uvicorn. """
"""Please install transformers with [serving]: pip install \"transformers[serving]\"."""
"""Or install FastAPI and uvicorn separately.""" )
else:
logger.info(F"""Serving model over {host}:{port}""" )
A = FastAPI(
routes=[
APIRoute(
"""/""" , self.model_info , response_model=__lowerCAmelCase , response_class=__lowerCAmelCase , methods=["""GET"""] , ),
APIRoute(
"""/tokenize""" , self.tokenize , response_model=__lowerCAmelCase , response_class=__lowerCAmelCase , methods=["""POST"""] , ),
APIRoute(
"""/detokenize""" , self.detokenize , response_model=__lowerCAmelCase , response_class=__lowerCAmelCase , methods=["""POST"""] , ),
APIRoute(
"""/forward""" , self.forward , response_model=__lowerCAmelCase , response_class=__lowerCAmelCase , methods=["""POST"""] , ),
] , timeout=600 , )
def A (self : str ):
run(self._app , host=self.host , port=self.port , workers=self.workers )
def A (self : List[Any] ):
return ServeModelInfoResult(infos=vars(self._pipeline.model.config ) )
def A (self : Any , _lowerCAmelCase : Union[str, Any] = Body(__lowerCAmelCase , embed=__lowerCAmelCase ) , _lowerCAmelCase : List[Any] = Body(__lowerCAmelCase , embed=__lowerCAmelCase ) ):
try:
A = self._pipeline.tokenizer.tokenize(__lowerCAmelCase )
if return_ids:
A = self._pipeline.tokenizer.convert_tokens_to_ids(__lowerCAmelCase )
return ServeTokenizeResult(tokens=__lowerCAmelCase , tokens_ids=__lowerCAmelCase )
else:
return ServeTokenizeResult(tokens=__lowerCAmelCase )
except Exception as e:
raise HTTPException(status_code=500 , detail={"""model""": """""", """error""": str(__lowerCAmelCase )} )
def A (self : Any , _lowerCAmelCase : Union[str, Any] = Body(__lowerCAmelCase , embed=__lowerCAmelCase ) , _lowerCAmelCase : Optional[int] = Body(__lowerCAmelCase , embed=__lowerCAmelCase ) , _lowerCAmelCase : Dict = Body(__lowerCAmelCase , embed=__lowerCAmelCase ) , ):
try:
A = self._pipeline.tokenizer.decode(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
return ServeDeTokenizeResult(model="""""" , text=__lowerCAmelCase )
except Exception as e:
raise HTTPException(status_code=500 , detail={"""model""": """""", """error""": str(__lowerCAmelCase )} )
async def A (self : Optional[int] , _lowerCAmelCase : int=Body(__lowerCAmelCase , embed=__lowerCAmelCase ) ):
# Check we don't have empty string
if len(__lowerCAmelCase ) == 0:
return ServeForwardResult(output=[] , attention=[] )
try:
# Forward through the model
A = self._pipeline(__lowerCAmelCase )
return ServeForwardResult(output=__lowerCAmelCase )
except Exception as e:
raise HTTPException(500 , {"""error""": str(__lowerCAmelCase )} )
| 371 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class __UpperCAmelCase ( metaclass=A__ ):
'''simple docstring'''
__lowerCAmelCase = ['''torch''', '''transformers''', '''onnx''']
def __init__(self : Tuple , *_lowerCAmelCase : Optional[Any] , **_lowerCAmelCase : Dict ):
requires_backends(self , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def A (cls : Optional[int] , *_lowerCAmelCase : Optional[Any] , **_lowerCAmelCase : Any ):
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def A (cls : List[str] , *_lowerCAmelCase : Dict , **_lowerCAmelCase : str ):
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
class __UpperCAmelCase ( metaclass=A__ ):
'''simple docstring'''
__lowerCAmelCase = ['''torch''', '''transformers''', '''onnx''']
def __init__(self : List[str] , *_lowerCAmelCase : Dict , **_lowerCAmelCase : int ):
requires_backends(self , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def A (cls : List[Any] , *_lowerCAmelCase : str , **_lowerCAmelCase : str ):
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def A (cls : List[str] , *_lowerCAmelCase : Optional[int] , **_lowerCAmelCase : List[Any] ):
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
class __UpperCAmelCase ( metaclass=A__ ):
'''simple docstring'''
__lowerCAmelCase = ['''torch''', '''transformers''', '''onnx''']
def __init__(self : Union[str, Any] , *_lowerCAmelCase : Optional[Any] , **_lowerCAmelCase : int ):
requires_backends(self , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def A (cls : Any , *_lowerCAmelCase : str , **_lowerCAmelCase : Union[str, Any] ):
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def A (cls : List[Any] , *_lowerCAmelCase : Dict , **_lowerCAmelCase : Union[str, Any] ):
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
class __UpperCAmelCase ( metaclass=A__ ):
'''simple docstring'''
__lowerCAmelCase = ['''torch''', '''transformers''', '''onnx''']
def __init__(self : List[str] , *_lowerCAmelCase : Dict , **_lowerCAmelCase : Any ):
requires_backends(self , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def A (cls : Optional[int] , *_lowerCAmelCase : Dict , **_lowerCAmelCase : Dict ):
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def A (cls : Union[str, Any] , *_lowerCAmelCase : str , **_lowerCAmelCase : List[str] ):
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
class __UpperCAmelCase ( metaclass=A__ ):
'''simple docstring'''
__lowerCAmelCase = ['''torch''', '''transformers''', '''onnx''']
def __init__(self : Union[str, Any] , *_lowerCAmelCase : Any , **_lowerCAmelCase : str ):
requires_backends(self , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def A (cls : Optional[Any] , *_lowerCAmelCase : int , **_lowerCAmelCase : Any ):
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def A (cls : Dict , *_lowerCAmelCase : Optional[Any] , **_lowerCAmelCase : int ):
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
class __UpperCAmelCase ( metaclass=A__ ):
'''simple docstring'''
__lowerCAmelCase = ['''torch''', '''transformers''', '''onnx''']
def __init__(self : Dict , *_lowerCAmelCase : List[str] , **_lowerCAmelCase : Optional[int] ):
requires_backends(self , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def A (cls : Dict , *_lowerCAmelCase : List[str] , **_lowerCAmelCase : Any ):
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def A (cls : Optional[Any] , *_lowerCAmelCase : List[str] , **_lowerCAmelCase : Tuple ):
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
| 337 | 0 |
import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
A : Union[str, Any] = {
'return_dict': False,
'output_hidden_states': True,
'output_attentions': True,
'torchscript': True,
'torch_dtype': 'float16',
'use_bfloat16': True,
'tf_legacy_loss': True,
'pruned_heads': {'a': 1},
'tie_word_embeddings': False,
'is_decoder': True,
'cross_attention_hidden_size': 1_2_8,
'add_cross_attention': True,
'tie_encoder_decoder': True,
'max_length': 5_0,
'min_length': 3,
'do_sample': True,
'early_stopping': True,
'num_beams': 3,
'num_beam_groups': 3,
'diversity_penalty': 0.5,
'temperature': 2.0,
'top_k': 1_0,
'top_p': 0.7,
'typical_p': 0.2,
'repetition_penalty': 0.8,
'length_penalty': 0.8,
'no_repeat_ngram_size': 5,
'encoder_no_repeat_ngram_size': 5,
'bad_words_ids': [1, 2, 3],
'num_return_sequences': 3,
'chunk_size_feed_forward': 5,
'output_scores': True,
'return_dict_in_generate': True,
'forced_bos_token_id': 2,
'forced_eos_token_id': 3,
'remove_invalid_values': True,
'architectures': ['BertModel'],
'finetuning_task': 'translation',
'id2label': {0: 'label'},
'label2id': {'label': '0'},
'tokenizer_class': 'BertTokenizerFast',
'prefix': 'prefix',
'bos_token_id': 6,
'pad_token_id': 7,
'eos_token_id': 8,
'sep_token_id': 9,
'decoder_start_token_id': 1_0,
'exponential_decay_length_penalty': (5, 1.01),
'suppress_tokens': [0, 1],
'begin_suppress_tokens': 2,
'task_specific_params': {'translation': 'some_params'},
'problem_type': 'regression',
}
@is_staging_test
class __A( unittest.TestCase ):
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls ) -> Optional[int]:
'''simple docstring'''
__a = TOKEN
HfFolder.save_token(_snake_case )
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls ) -> Any:
'''simple docstring'''
try:
delete_repo(token=cls._token , repo_id='''test-config''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-config-org''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''test-dynamic-config''' )
except HTTPError:
pass
def SCREAMING_SNAKE_CASE_ ( self ) -> str:
'''simple docstring'''
__a = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
config.push_to_hub('''test-config''' , use_auth_token=self._token )
__a = BertConfig.from_pretrained(F"""{USER}/test-config""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_snake_case , getattr(_snake_case , _snake_case ) )
# Reset repo
delete_repo(token=self._token , repo_id='''test-config''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(_snake_case , repo_id='''test-config''' , push_to_hub=_snake_case , use_auth_token=self._token )
__a = BertConfig.from_pretrained(F"""{USER}/test-config""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_snake_case , getattr(_snake_case , _snake_case ) )
def SCREAMING_SNAKE_CASE_ ( self ) -> List[Any]:
'''simple docstring'''
__a = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
config.push_to_hub('''valid_org/test-config-org''' , use_auth_token=self._token )
__a = BertConfig.from_pretrained('''valid_org/test-config-org''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_snake_case , getattr(_snake_case , _snake_case ) )
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-config-org''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
_snake_case , repo_id='''valid_org/test-config-org''' , push_to_hub=_snake_case , use_auth_token=self._token )
__a = BertConfig.from_pretrained('''valid_org/test-config-org''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_snake_case , getattr(_snake_case , _snake_case ) )
def SCREAMING_SNAKE_CASE_ ( self ) -> List[Any]:
'''simple docstring'''
CustomConfig.register_for_auto_class()
__a = CustomConfig(attribute=42 )
config.push_to_hub('''test-dynamic-config''' , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map , {'''AutoConfig''': '''custom_configuration.CustomConfig'''} )
__a = AutoConfig.from_pretrained(F"""{USER}/test-dynamic-config""" , trust_remote_code=_snake_case )
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__ , '''CustomConfig''' )
self.assertEqual(new_config.attribute , 42 )
class __A( unittest.TestCase ):
def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[int]:
'''simple docstring'''
__a = GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
__a = c.n_embd + 1 # int
__a = c.resid_pdrop + 1.0 # float
__a = not c.scale_attn_weights # bool
__a = c.summary_type + '''foo''' # str
c.update_from_string(
F"""n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}""" )
self.assertEqual(_snake_case , c.n_embd , '''mismatch for key: n_embd''' )
self.assertEqual(_snake_case , c.resid_pdrop , '''mismatch for key: resid_pdrop''' )
self.assertEqual(_snake_case , c.scale_attn_weights , '''mismatch for key: scale_attn_weights''' )
self.assertEqual(_snake_case , c.summary_type , '''mismatch for key: summary_type''' )
def SCREAMING_SNAKE_CASE_ ( self ) -> Any:
'''simple docstring'''
__a = PretrainedConfig()
__a = [key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
_snake_case , ['''is_encoder_decoder''', '''_name_or_path''', '''_commit_hash''', '''transformers_version'''] )
__a = [key for key, value in config_common_kwargs.items() if value == getattr(_snake_case , _snake_case )]
if len(_snake_case ) > 0:
raise ValueError(
'''The following keys are set with the default values in'''
''' `test_configuration_common.config_common_kwargs` pick another value for them:'''
F""" {', '.join(_snake_case )}.""" )
def SCREAMING_SNAKE_CASE_ ( self ) -> List[str]:
'''simple docstring'''
with self.assertRaises(_snake_case ):
# config is in subfolder, the following should not work without specifying the subfolder
__a = BertConfig.from_pretrained('''hf-internal-testing/tiny-random-bert-subfolder''' )
__a = BertConfig.from_pretrained('''hf-internal-testing/tiny-random-bert-subfolder''' , subfolder='''bert''' )
self.assertIsNotNone(_snake_case )
def SCREAMING_SNAKE_CASE_ ( self ) -> str:
'''simple docstring'''
__a = mock.Mock()
__a = 500
__a = {}
__a = HTTPError
__a = {}
# Download this model to make sure it's in the cache.
__a = BertConfig.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('''requests.Session.request''' , return_value=_snake_case ) as mock_head:
__a = BertConfig.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
# This check we did call the fake head request
mock_head.assert_called()
def SCREAMING_SNAKE_CASE_ ( self ) -> Any:
'''simple docstring'''
__a = BertConfig.from_pretrained(
'''https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json''' )
def SCREAMING_SNAKE_CASE_ ( self ) -> str:
'''simple docstring'''
__a = AutoConfig.from_pretrained('''bert-base-cased''' )
__a = ['''config.4.0.0.json''']
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(_snake_case )
__a = 2
json.dump(configuration.to_dict() , open(os.path.join(_snake_case , '''config.4.0.0.json''' ) , '''w''' ) )
# This should pick the new configuration file as the version of Transformers is > 4.0.0
__a = AutoConfig.from_pretrained(_snake_case )
self.assertEqual(new_configuration.hidden_size , 2 )
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
__a = ['''config.42.0.0.json''']
__a = 768
configuration.save_pretrained(_snake_case )
shutil.move(os.path.join(_snake_case , '''config.4.0.0.json''' ) , os.path.join(_snake_case , '''config.42.0.0.json''' ) )
__a = AutoConfig.from_pretrained(_snake_case )
self.assertEqual(new_configuration.hidden_size , 768 )
def SCREAMING_SNAKE_CASE_ ( self ) -> str:
'''simple docstring'''
__a = '''hf-internal-testing/test-two-configs'''
import transformers as new_transformers
__a = '''v4.0.0'''
__a , __a = new_transformers.models.auto.AutoConfig.from_pretrained(
_snake_case , return_unused_kwargs=_snake_case )
self.assertEqual(new_configuration.hidden_size , 2 )
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(_snake_case , {} )
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
__a = '''v3.0.0'''
__a = old_transformers.models.auto.AutoConfig.from_pretrained(_snake_case )
self.assertEqual(old_configuration.hidden_size , 768 ) | 6 |
from __future__ import annotations
import typing
from collections import Counter
def __lowerCAmelCase ( a__ ) -> typing.Counter[int]:
__a = Counter()
for base in range(1 , max_perimeter + 1 ):
for perpendicular in range(a__ , max_perimeter + 1 ):
__a = (base * base + perpendicular * perpendicular) ** 0.5
if hypotenuse == int(a__ ):
__a = int(base + perpendicular + hypotenuse )
if perimeter > max_perimeter:
continue
triplets[perimeter] += 1
return triplets
def __lowerCAmelCase ( a__ = 1000 ) -> int:
__a = pythagorean_triple(a__ )
return triplets.most_common(1 )[0][0]
if __name__ == "__main__":
print(F"Perimeter {solution()} has maximum solutions") | 6 | 1 |
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import SegformerImageProcessor, SwinConfig, UperNetConfig, UperNetForSemanticSegmentation
def snake_case_(_UpperCamelCase ) -> List[str]:
"""simple docstring"""
_snake_case = 384
_snake_case = 7
if "tiny" in model_name:
_snake_case = 96
_snake_case = (2, 2, 6, 2)
_snake_case = (3, 6, 12, 24)
elif "small" in model_name:
_snake_case = 96
_snake_case = (2, 2, 18, 2)
_snake_case = (3, 6, 12, 24)
elif "base" in model_name:
_snake_case = 128
_snake_case = (2, 2, 18, 2)
_snake_case = (4, 8, 16, 32)
_snake_case = 12
_snake_case = 512
elif "large" in model_name:
_snake_case = 192
_snake_case = (2, 2, 18, 2)
_snake_case = (6, 12, 24, 48)
_snake_case = 12
_snake_case = 768
# set label information
_snake_case = 150
_snake_case = '''huggingface/label-files'''
_snake_case = '''ade20k-id2label.json'''
_snake_case = json.load(open(hf_hub_download(_UpperCamelCase , _UpperCamelCase , repo_type='''dataset''' ) , '''r''' ) )
_snake_case = {int(_UpperCamelCase ): v for k, v in idalabel.items()}
_snake_case = {v: k for k, v in idalabel.items()}
_snake_case = SwinConfig(
embed_dim=_UpperCamelCase , depths=_UpperCamelCase , num_heads=_UpperCamelCase , window_size=_UpperCamelCase , out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] , )
_snake_case = UperNetConfig(
backbone_config=_UpperCamelCase , auxiliary_in_channels=_UpperCamelCase , num_labels=_UpperCamelCase , idalabel=_UpperCamelCase , labelaid=_UpperCamelCase , )
return config
def snake_case_(_UpperCamelCase ) -> Any:
"""simple docstring"""
_snake_case = []
# fmt: off
# stem
rename_keys.append(('''backbone.patch_embed.projection.weight''', '''backbone.embeddings.patch_embeddings.projection.weight''') )
rename_keys.append(('''backbone.patch_embed.projection.bias''', '''backbone.embeddings.patch_embeddings.projection.bias''') )
rename_keys.append(('''backbone.patch_embed.norm.weight''', '''backbone.embeddings.norm.weight''') )
rename_keys.append(('''backbone.patch_embed.norm.bias''', '''backbone.embeddings.norm.bias''') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.norm1.weight""", F"""backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.weight""") )
rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.norm1.bias""", F"""backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.bias""") )
rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_bias_table""", F"""backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table""") )
rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_index""", F"""backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index""") )
rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.weight""", F"""backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight""") )
rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.bias""", F"""backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias""") )
rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.norm2.weight""", F"""backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.weight""") )
rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.norm2.bias""", F"""backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.bias""") )
rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.weight""", F"""backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight""") )
rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.bias""", F"""backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias""") )
rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.ffn.layers.1.weight""", F"""backbone.encoder.layers.{i}.blocks.{j}.output.dense.weight""") )
rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.ffn.layers.1.bias""", F"""backbone.encoder.layers.{i}.blocks.{j}.output.dense.bias""") )
if i < 3:
rename_keys.append((F"""backbone.stages.{i}.downsample.reduction.weight""", F"""backbone.encoder.layers.{i}.downsample.reduction.weight""") )
rename_keys.append((F"""backbone.stages.{i}.downsample.norm.weight""", F"""backbone.encoder.layers.{i}.downsample.norm.weight""") )
rename_keys.append((F"""backbone.stages.{i}.downsample.norm.bias""", F"""backbone.encoder.layers.{i}.downsample.norm.bias""") )
rename_keys.append((F"""backbone.norm{i}.weight""", F"""backbone.hidden_states_norms.stage{i+1}.weight""") )
rename_keys.append((F"""backbone.norm{i}.bias""", F"""backbone.hidden_states_norms.stage{i+1}.bias""") )
# decode head
rename_keys.extend(
[
('''decode_head.conv_seg.weight''', '''decode_head.classifier.weight'''),
('''decode_head.conv_seg.bias''', '''decode_head.classifier.bias'''),
('''auxiliary_head.conv_seg.weight''', '''auxiliary_head.classifier.weight'''),
('''auxiliary_head.conv_seg.bias''', '''auxiliary_head.classifier.bias'''),
] )
# fmt: on
return rename_keys
def snake_case_(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Any:
"""simple docstring"""
_snake_case = dct.pop(_UpperCamelCase )
_snake_case = val
def snake_case_(_UpperCamelCase , _UpperCamelCase ) -> int:
"""simple docstring"""
_snake_case = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
_snake_case = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
_snake_case = state_dict.pop(F"""backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.weight""" )
_snake_case = state_dict.pop(F"""backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
_snake_case = in_proj_weight[:dim, :]
_snake_case = in_proj_bias[: dim]
_snake_case = in_proj_weight[
dim : dim * 2, :
]
_snake_case = in_proj_bias[
dim : dim * 2
]
_snake_case = in_proj_weight[
-dim :, :
]
_snake_case = in_proj_bias[-dim :]
# fmt: on
def snake_case_(_UpperCamelCase ) -> Union[str, Any]:
"""simple docstring"""
_snake_case, _snake_case = x.shape
_snake_case = x.reshape(_UpperCamelCase , 4 , in_channel // 4 )
_snake_case = x[:, [0, 2, 1, 3], :].transpose(1 , 2 ).reshape(_UpperCamelCase , _UpperCamelCase )
return x
def snake_case_(_UpperCamelCase ) -> Optional[Any]:
"""simple docstring"""
_snake_case, _snake_case = x.shape
_snake_case = x.reshape(_UpperCamelCase , in_channel // 4 , 4 )
_snake_case = x[:, :, [0, 2, 1, 3]].transpose(1 , 2 ).reshape(_UpperCamelCase , _UpperCamelCase )
return x
def snake_case_(_UpperCamelCase ) -> Any:
"""simple docstring"""
_snake_case = x.shape[0]
_snake_case = x.reshape(4 , in_channel // 4 )
_snake_case = x[[0, 2, 1, 3], :].transpose(0 , 1 ).reshape(_UpperCamelCase )
return x
def snake_case_(_UpperCamelCase ) -> List[Any]:
"""simple docstring"""
_snake_case = x.shape[0]
_snake_case = x.reshape(in_channel // 4 , 4 )
_snake_case = x[:, [0, 2, 1, 3]].transpose(0 , 1 ).reshape(_UpperCamelCase )
return x
def snake_case_(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Optional[int]:
"""simple docstring"""
_snake_case = {
'''upernet-swin-tiny''': '''https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210531_112542-e380ad3e.pth''',
'''upernet-swin-small''': '''https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210526_192015-ee2fff1c.pth''',
'''upernet-swin-base''': '''https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K_20210531_125459-429057bf.pth''',
'''upernet-swin-large''': '''https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k_20220318_091743-9ba68901.pth''',
}
_snake_case = model_name_to_url[model_name]
_snake_case = torch.hub.load_state_dict_from_url(_UpperCamelCase , map_location='''cpu''' , file_name=_UpperCamelCase )[
'''state_dict'''
]
for name, param in state_dict.items():
print(_UpperCamelCase , param.shape )
_snake_case = get_upernet_config(_UpperCamelCase )
_snake_case = UperNetForSemanticSegmentation(_UpperCamelCase )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
_snake_case = state_dict.pop(_UpperCamelCase )
if "bn" in key:
_snake_case = key.replace('''bn''' , '''batch_norm''' )
_snake_case = val
# rename keys
_snake_case = create_rename_keys(_UpperCamelCase )
for src, dest in rename_keys:
rename_key(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
read_in_q_k_v(_UpperCamelCase , config.backbone_config )
# fix downsample parameters
for key, value in state_dict.items():
if "downsample" in key:
if "reduction" in key:
_snake_case = reverse_correct_unfold_reduction_order(_UpperCamelCase )
if "norm" in key:
_snake_case = reverse_correct_unfold_norm_order(_UpperCamelCase )
model.load_state_dict(_UpperCamelCase )
# verify on image
_snake_case = '''https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg'''
_snake_case = Image.open(requests.get(_UpperCamelCase , stream=_UpperCamelCase ).raw ).convert('''RGB''' )
_snake_case = SegformerImageProcessor()
_snake_case = processor(_UpperCamelCase , return_tensors='''pt''' ).pixel_values
with torch.no_grad():
_snake_case = model(_UpperCamelCase )
_snake_case = outputs.logits
print(logits.shape )
print('''First values of logits:''' , logits[0, 0, :3, :3] )
# assert values
if model_name == "upernet-swin-tiny":
_snake_case = torch.tensor(
[[-7.5958, -7.5958, -7.4302], [-7.5958, -7.5958, -7.4302], [-7.4797, -7.4797, -7.3068]] )
elif model_name == "upernet-swin-small":
_snake_case = torch.tensor(
[[-7.1921, -7.1921, -6.9532], [-7.1921, -7.1921, -6.9532], [-7.0908, -7.0908, -6.8534]] )
elif model_name == "upernet-swin-base":
_snake_case = torch.tensor(
[[-6.5851, -6.5851, -6.4330], [-6.5851, -6.5851, -6.4330], [-6.4763, -6.4763, -6.3254]] )
elif model_name == "upernet-swin-large":
_snake_case = torch.tensor(
[[-7.5297, -7.5297, -7.3802], [-7.5297, -7.5297, -7.3802], [-7.4044, -7.4044, -7.2586]] )
print('''Logits:''' , outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] , _UpperCamelCase , atol=1E-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(_UpperCamelCase )
print(F"""Saving processor to {pytorch_dump_folder_path}""" )
processor.save_pretrained(_UpperCamelCase )
if push_to_hub:
print(F"""Pushing model and processor for {model_name} to hub""" )
model.push_to_hub(F"""openmmlab/{model_name}""" )
processor.push_to_hub(F"""openmmlab/{model_name}""" )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''upernet-swin-tiny''',
type=str,
choices=[f'''upernet-swin-{size}''' for size in ['''tiny''', '''small''', '''base''', '''large''']],
help='''Name of the Swin + UperNet model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
__A = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 366 |
from ..utils import DummyObject, requires_backends
class lowercase_ ( metaclass=__lowercase ):
UpperCamelCase_ : Optional[int] = ["speech"]
def __init__( self : str , *A__ : List[str] , **A__ : Tuple ) -> Optional[Any]:
requires_backends(self , ['''speech'''] )
class lowercase_ ( metaclass=__lowercase ):
UpperCamelCase_ : Optional[Any] = ["speech"]
def __init__( self : Dict , *A__ : int , **A__ : int ) -> Tuple:
requires_backends(self , ['''speech'''] )
| 278 | 0 |
'''simple docstring'''
from __future__ import annotations
def __a ( UpperCAmelCase , UpperCAmelCase ) ->Tuple:
"""simple docstring"""
if len(UpperCAmelCase ) <= 1 or n <= 1:
return
insert_next(UpperCAmelCase , n - 1 )
rec_insertion_sort(UpperCAmelCase , n - 1 )
def __a ( UpperCAmelCase , UpperCAmelCase ) ->int:
"""simple docstring"""
if index >= len(UpperCAmelCase ) or collection[index - 1] <= collection[index]:
return
# Swaps adjacent elements since they are not in ascending order
A , A = (
collection[index],
collection[index - 1],
)
insert_next(UpperCAmelCase , index + 1 )
if __name__ == "__main__":
_lowerCamelCase : List[Any] = input('Enter integers separated by spaces: ')
_lowerCamelCase : list[int] = [int(num) for num in numbers.split()]
rec_insertion_sort(number_list, len(number_list))
print(number_list)
| 258 |
'''simple docstring'''
import json
import os
import unittest
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class __UpperCAmelCase ( A__ , unittest.TestCase ):
'''simple docstring'''
__lowerCAmelCase = BioGptTokenizer
__lowerCAmelCase = False
def A (self : int ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
A = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""w</w>""",
"""r</w>""",
"""t</w>""",
"""lo""",
"""low""",
"""er</w>""",
"""low</w>""",
"""lowest</w>""",
"""newer</w>""",
"""wider</w>""",
"""<unk>""",
]
A = dict(zip(_lowerCAmelCase , range(len(_lowerCAmelCase ) ) ) )
A = ["""l o 123""", """lo w 1456""", """e r</w> 1789""", """"""]
A = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
A = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" ) as fp:
fp.write(json.dumps(_lowerCAmelCase ) )
with open(self.merges_file , """w""" ) as fp:
fp.write("""\n""".join(_lowerCAmelCase ) )
def A (self : Tuple , _lowerCAmelCase : List[str] ):
A = """lower newer"""
A = """lower newer"""
return input_text, output_text
def A (self : List[Any] ):
A = BioGptTokenizer(self.vocab_file , self.merges_file )
A = """lower"""
A = ["""low""", """er</w>"""]
A = tokenizer.tokenize(_lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
A = tokens + ["""<unk>"""]
A = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCAmelCase ) , _lowerCAmelCase )
@slow
def A (self : Union[str, Any] ):
A = BioGptTokenizer.from_pretrained("""microsoft/biogpt""" )
A = tokenizer.encode("""sequence builders""" , add_special_tokens=_lowerCAmelCase )
A = tokenizer.encode("""multi-sequence build""" , add_special_tokens=_lowerCAmelCase )
A = tokenizer.build_inputs_with_special_tokens(_lowerCAmelCase )
A = tokenizer.build_inputs_with_special_tokens(_lowerCAmelCase , _lowerCAmelCase )
self.assertTrue(encoded_sentence == [2] + text )
self.assertTrue(encoded_pair == [2] + text + [2] + text_a )
| 258 | 1 |
import unittest
from transformers import SqueezeBertConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
)
class _A ( __UpperCAmelCase ):
def __init__( self : Optional[int] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : List[str]=13 , __SCREAMING_SNAKE_CASE : str=7 , __SCREAMING_SNAKE_CASE : List[str]=True , __SCREAMING_SNAKE_CASE : List[str]=True , __SCREAMING_SNAKE_CASE : Any=False , __SCREAMING_SNAKE_CASE : List[str]=True , __SCREAMING_SNAKE_CASE : List[str]=99 , __SCREAMING_SNAKE_CASE : List[Any]=32 , __SCREAMING_SNAKE_CASE : Optional[int]=5 , __SCREAMING_SNAKE_CASE : Optional[int]=4 , __SCREAMING_SNAKE_CASE : Dict=64 , __SCREAMING_SNAKE_CASE : Dict="gelu" , __SCREAMING_SNAKE_CASE : Any=0.1 , __SCREAMING_SNAKE_CASE : Tuple=0.1 , __SCREAMING_SNAKE_CASE : Union[str, Any]=512 , __SCREAMING_SNAKE_CASE : List[Any]=16 , __SCREAMING_SNAKE_CASE : Union[str, Any]=2 , __SCREAMING_SNAKE_CASE : Dict=0.02 , __SCREAMING_SNAKE_CASE : Any=3 , __SCREAMING_SNAKE_CASE : Dict=4 , __SCREAMING_SNAKE_CASE : Any=None , __SCREAMING_SNAKE_CASE : Optional[Any]=2 , __SCREAMING_SNAKE_CASE : Union[str, Any]=2 , __SCREAMING_SNAKE_CASE : List[Any]=2 , __SCREAMING_SNAKE_CASE : Any=2 , __SCREAMING_SNAKE_CASE : Dict=4 , __SCREAMING_SNAKE_CASE : Optional[int]=1 , ):
'''simple docstring'''
__a = parent
__a = batch_size
__a = seq_length
__a = is_training
__a = use_input_mask
__a = use_token_type_ids
__a = use_labels
__a = vocab_size
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = intermediate_size
__a = hidden_act
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = max_position_embeddings
__a = type_vocab_size
__a = type_sequence_label_size
__a = initializer_range
__a = num_labels
__a = num_choices
__a = scope
__a = q_groups
__a = k_groups
__a = v_groups
__a = post_attention_groups
__a = intermediate_groups
__a = output_groups
def _lowerCamelCase ( self : str):
'''simple docstring'''
__a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
__a = None
if self.use_input_mask:
__a = random_attention_mask([self.batch_size, self.seq_length])
__a = None
__a = None
__a = None
if self.use_labels:
__a = ids_tensor([self.batch_size] , self.type_sequence_label_size)
__a = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
__a = ids_tensor([self.batch_size] , self.num_choices)
__a = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
return SqueezeBertConfig(
embedding_size=self.hidden_size , vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , attention_probs_dropout_prob=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , q_groups=self.q_groups , k_groups=self.k_groups , v_groups=self.v_groups , post_attention_groups=self.post_attention_groups , intermediate_groups=self.intermediate_groups , output_groups=self.output_groups , )
def _lowerCamelCase ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : List[Any]):
'''simple docstring'''
__a = SqueezeBertModel(config=__SCREAMING_SNAKE_CASE)
model.to(__SCREAMING_SNAKE_CASE)
model.eval()
__a = model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
__a = model(__SCREAMING_SNAKE_CASE)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def _lowerCamelCase ( self : str , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Dict):
'''simple docstring'''
__a = SqueezeBertForMaskedLM(config=__SCREAMING_SNAKE_CASE)
model.to(__SCREAMING_SNAKE_CASE)
model.eval()
__a = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def _lowerCamelCase ( self : Any , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Optional[int]):
'''simple docstring'''
__a = SqueezeBertForQuestionAnswering(config=__SCREAMING_SNAKE_CASE)
model.to(__SCREAMING_SNAKE_CASE)
model.eval()
__a = model(
__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , start_positions=__SCREAMING_SNAKE_CASE , end_positions=__SCREAMING_SNAKE_CASE)
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def _lowerCamelCase ( self : str , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : List[str]):
'''simple docstring'''
__a = self.num_labels
__a = SqueezeBertForSequenceClassification(__SCREAMING_SNAKE_CASE)
model.to(__SCREAMING_SNAKE_CASE)
model.eval()
__a = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def _lowerCamelCase ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Union[str, Any]):
'''simple docstring'''
__a = self.num_labels
__a = SqueezeBertForTokenClassification(config=__SCREAMING_SNAKE_CASE)
model.to(__SCREAMING_SNAKE_CASE)
model.eval()
__a = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def _lowerCamelCase ( self : Tuple , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Tuple):
'''simple docstring'''
__a = self.num_choices
__a = SqueezeBertForMultipleChoice(config=__SCREAMING_SNAKE_CASE)
model.to(__SCREAMING_SNAKE_CASE)
model.eval()
__a = input_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
__a = input_mask.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
__a = model(
__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def _lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
__a = self.prepare_config_and_inputs()
((__a) , (__a) , (__a) , (__a) , (__a) , (__a)) = config_and_inputs
__a = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class _A ( __UpperCAmelCase ,__UpperCAmelCase ,unittest.TestCase ):
UpperCamelCase__ : Union[str, Any] = (
(
SqueezeBertModel,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
)
if is_torch_available()
else None
)
UpperCamelCase__ : List[str] = (
{
'''feature-extraction''': SqueezeBertModel,
'''fill-mask''': SqueezeBertForMaskedLM,
'''question-answering''': SqueezeBertForQuestionAnswering,
'''text-classification''': SqueezeBertForSequenceClassification,
'''token-classification''': SqueezeBertForTokenClassification,
'''zero-shot''': SqueezeBertForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCamelCase__ : List[str] = False
UpperCamelCase__ : Dict = True
UpperCamelCase__ : str = False
def _lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
__a = SqueezeBertModelTester(self)
__a = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , dim=37)
def _lowerCamelCase ( self : Any):
'''simple docstring'''
self.config_tester.run_common_tests()
def _lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_model(*__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_masked_lm(*__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : int):
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_question_answering(*__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_sequence_classification(*__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_token_classification(*__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Any):
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_multiple_choice(*__SCREAMING_SNAKE_CASE)
@slow
def _lowerCamelCase ( self : Dict):
'''simple docstring'''
for model_name in SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a = SqueezeBertModel.from_pretrained(__SCREAMING_SNAKE_CASE)
self.assertIsNotNone(__SCREAMING_SNAKE_CASE)
@require_sentencepiece
@require_tokenizers
@require_torch
class _A ( unittest.TestCase ):
@slow
def _lowerCamelCase ( self : List[str]):
'''simple docstring'''
__a = SqueezeBertForSequenceClassification.from_pretrained('''squeezebert/squeezebert-mnli''')
__a = torch.tensor([[1, 29_414, 232, 328, 740, 1_140, 12_695, 69, 13, 1_588, 2]])
__a = model(__SCREAMING_SNAKE_CASE)[0]
__a = torch.Size((1, 3))
self.assertEqual(output.shape , __SCREAMING_SNAKE_CASE)
__a = torch.tensor([[0.64_01, -0.03_49, -0.60_41]])
self.assertTrue(torch.allclose(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , atol=1E-4))
| 131 |
def __snake_case ( _UpperCAmelCase = 1000000 ):
__a = limit + 1
__a = [0] * limit
for first_term in range(1 , _UpperCAmelCase ):
for n in range(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__a = first_term + n / first_term
if common_difference % 4: # d must be divisble by 4
continue
else:
common_difference /= 4
if (
first_term > common_difference
and first_term < 4 * common_difference
): # since x,y,z are positive integers
frequency[n] += 1 # so z>0 and a>d ,also 4d<a
__a = sum(1 for x in frequency[1:limit] if x == 10 )
return count
if __name__ == "__main__":
print(f'{solution() = }')
| 131 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A__ : Optional[int] = logging.get_logger(__name__)
A__ : Tuple = {
"vinvino02/glpn-kitti": "https://huggingface.co/vinvino02/glpn-kitti/resolve/main/config.json",
# See all GLPN models at https://huggingface.co/models?filter=glpn
}
class __snake_case ( __lowerCamelCase ):
_a = '''glpn'''
def __init__( self : List[str] , A_ : Optional[Any]=3 , A_ : Optional[int]=4 , A_ : Any=[2, 2, 2, 2] , A_ : str=[8, 4, 2, 1] , A_ : int=[3_2, 6_4, 1_6_0, 2_5_6] , A_ : int=[7, 3, 3, 3] , A_ : Optional[int]=[4, 2, 2, 2] , A_ : int=[1, 2, 5, 8] , A_ : int=[4, 4, 4, 4] , A_ : Any="gelu" , A_ : Dict=0.0 , A_ : str=0.0 , A_ : Optional[int]=0.02 , A_ : Optional[int]=0.1 , A_ : str=1e-6 , A_ : int=6_4 , A_ : str=1_0 , A_ : Optional[Any]=-1 , **A_ : List[str] , ):
super().__init__(**A_)
lowerCAmelCase_ : str = num_channels
lowerCAmelCase_ : Any = num_encoder_blocks
lowerCAmelCase_ : Optional[Any] = depths
lowerCAmelCase_ : int = sr_ratios
lowerCAmelCase_ : Dict = hidden_sizes
lowerCAmelCase_ : Any = patch_sizes
lowerCAmelCase_ : List[Any] = strides
lowerCAmelCase_ : int = mlp_ratios
lowerCAmelCase_ : Dict = num_attention_heads
lowerCAmelCase_ : List[str] = hidden_act
lowerCAmelCase_ : Optional[Any] = hidden_dropout_prob
lowerCAmelCase_ : Dict = attention_probs_dropout_prob
lowerCAmelCase_ : List[Any] = initializer_range
lowerCAmelCase_ : int = drop_path_rate
lowerCAmelCase_ : int = layer_norm_eps
lowerCAmelCase_ : List[str] = decoder_hidden_size
lowerCAmelCase_ : Dict = max_depth
lowerCAmelCase_ : Optional[int] = head_in_index
| 103 |
import math
def A__ ( SCREAMING_SNAKE_CASE__) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(SCREAMING_SNAKE_CASE__) + 1) , 6):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def A__ ( SCREAMING_SNAKE_CASE__ = 1_0001) -> int:
try:
__snake_case: List[str] = int(SCREAMING_SNAKE_CASE__)
except (TypeError, ValueError):
raise TypeError("""Parameter nth must be int or castable to int.""") from None
if nth <= 0:
raise ValueError("""Parameter nth must be greater than or equal to one.""")
__snake_case: list[int] = []
__snake_case: List[str] = 2
while len(SCREAMING_SNAKE_CASE__) < nth:
if is_prime(SCREAMING_SNAKE_CASE__):
primes.append(SCREAMING_SNAKE_CASE__)
num += 1
else:
num += 1
return primes[len(SCREAMING_SNAKE_CASE__) - 1]
if __name__ == "__main__":
print(f'{solution() = }')
| 111 | 0 |
import inspect
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel, VQModel
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class lowerCamelCase_ ( _A ):
'''simple docstring'''
def __init__( self : Any , __lowerCamelCase : VQModel , __lowerCamelCase : UNetaDModel , __lowerCamelCase : DDIMScheduler ) -> Optional[Any]:
super().__init__()
self.register_modules(vqvae=__lowerCamelCase , unet=__lowerCamelCase , scheduler=__lowerCamelCase )
@torch.no_grad()
def __call__( self : int , __lowerCamelCase : int = 1 , __lowerCamelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , __lowerCamelCase : float = 0.0 , __lowerCamelCase : int = 50 , __lowerCamelCase : Optional[str] = "pil" , __lowerCamelCase : bool = True , **__lowerCamelCase : Optional[Any] , ) -> Union[Tuple, ImagePipelineOutput]:
A : List[Any] = randn_tensor(
(batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , generator=__lowerCamelCase , )
A : List[str] = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
A : Union[str, Any] = latents * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(__lowerCamelCase )
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
A : List[str] = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
A : List[Any] = {}
if accepts_eta:
A : Dict = eta
for t in self.progress_bar(self.scheduler.timesteps ):
A : Optional[Any] = self.scheduler.scale_model_input(__lowerCamelCase , __lowerCamelCase )
# predict the noise residual
A : Union[str, Any] = self.unet(__lowerCamelCase , __lowerCamelCase ).sample
# compute the previous noisy sample x_t -> x_t-1
A : Dict = self.scheduler.step(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase ).prev_sample
# decode the image latents with the VAE
A : Any = self.vqvae.decode(__lowerCamelCase ).sample
A : Optional[int] = (image / 2 + 0.5).clamp(0 , 1 )
A : Dict = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
A : Optional[int] = self.numpy_to_pil(__lowerCamelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__lowerCamelCase ) | 256 |
from collections import deque
from .hash_table import HashTable
class lowerCamelCase_ ( _A ):
'''simple docstring'''
def __init__( self : Optional[int] , *__lowerCamelCase : int , **__lowerCamelCase : Tuple ) -> Optional[Any]:
super().__init__(*__lowerCamelCase , **__lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : List[Any] ) -> Optional[int]:
A : Any = deque([] ) if self.values[key] is None else self.values[key]
self.values[key].appendleft(__lowerCamelCase )
A : int = self.values[key]
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Optional[int]:
return (
sum(self.charge_factor - len(__lowerCamelCase ) for slot in self.values )
/ self.size_table
* self.charge_factor
)
def SCREAMING_SNAKE_CASE__ ( self : Dict , __lowerCamelCase : int , __lowerCamelCase : Tuple=None ) -> List[str]:
if not (
len(self.values[key] ) == self.charge_factor and self.values.count(__lowerCamelCase ) == 0
):
return key
return super()._collision_resolution(__lowerCamelCase , __lowerCamelCase ) | 256 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase : Dict = logging.get_logger(__name__)
__lowerCamelCase : Optional[int] = {
'''google/vivit-b-16x2-kinetics400''': (
'''https://huggingface.co/google/vivit-b-16x2-kinetics400/resolve/main/config.json'''
),
# See all Vivit models at https://huggingface.co/models?filter=vivit
}
class A__ ( UpperCAmelCase__ ):
_UpperCAmelCase :List[Any] = "vivit"
def __init__( self , A_=224 , A_=32 , A_=[2, 16, 16] , A_=3 , A_=768 , A_=12 , A_=12 , A_=3072 , A_="gelu_fast" , A_=0.0 , A_=0.0 , A_=0.02 , A_=1e-06 , A_=True , **A_ , ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = hidden_size
UpperCamelCase : Dict = num_hidden_layers
UpperCamelCase : List[str] = num_attention_heads
UpperCamelCase : Tuple = intermediate_size
UpperCamelCase : Any = hidden_act
UpperCamelCase : Any = hidden_dropout_prob
UpperCamelCase : Union[str, Any] = attention_probs_dropout_prob
UpperCamelCase : List[str] = initializer_range
UpperCamelCase : Optional[int] = layer_norm_eps
UpperCamelCase : List[Any] = image_size
UpperCamelCase : int = num_frames
UpperCamelCase : Any = tubelet_size
UpperCamelCase : Optional[int] = num_channels
UpperCamelCase : List[Any] = qkv_bias
super().__init__(**A_ )
| 52 |
'''simple docstring'''
import logging
import os
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
from filelock import FileLock
from transformers import PreTrainedTokenizer, is_tf_available, is_torch_available
A__: int = logging.getLogger(__name__)
@dataclass
class A__ :
__UpperCamelCase : str
__UpperCamelCase : List[str]
__UpperCamelCase : Optional[List[str]]
@dataclass
class A__ :
__UpperCamelCase : List[int]
__UpperCamelCase : List[int]
__UpperCamelCase : Optional[List[int]] = None
__UpperCamelCase : Optional[List[int]] = None
class A__ ( UpperCAmelCase__ ):
__UpperCamelCase : str = "train"
__UpperCamelCase : Tuple = "dev"
__UpperCamelCase : str = "test"
class A__ :
@staticmethod
def __UpperCAmelCase ( SCREAMING_SNAKE_CASE :Dict , SCREAMING_SNAKE_CASE :Union[Split, str] ) -> List[InputExample]:
'''simple docstring'''
raise NotImplementedError
@staticmethod
def __UpperCAmelCase ( SCREAMING_SNAKE_CASE :str ) -> List[str]:
'''simple docstring'''
raise NotImplementedError
@staticmethod
def __UpperCAmelCase ( SCREAMING_SNAKE_CASE :List[InputExample] , SCREAMING_SNAKE_CASE :List[str] , SCREAMING_SNAKE_CASE :int , SCREAMING_SNAKE_CASE :PreTrainedTokenizer , SCREAMING_SNAKE_CASE :str=False , SCREAMING_SNAKE_CASE :Optional[Any]="[CLS]" , SCREAMING_SNAKE_CASE :Optional[int]=1 , SCREAMING_SNAKE_CASE :Any="[SEP]" , SCREAMING_SNAKE_CASE :List[Any]=False , SCREAMING_SNAKE_CASE :Union[str, Any]=False , SCREAMING_SNAKE_CASE :List[str]=0 , SCREAMING_SNAKE_CASE :str=0 , SCREAMING_SNAKE_CASE :Dict=-1_0_0 , SCREAMING_SNAKE_CASE :Optional[int]=0 , SCREAMING_SNAKE_CASE :Tuple=True , ) -> List[InputFeatures]:
'''simple docstring'''
_a : str ={label: i for i, label in enumerate(SCREAMING_SNAKE_CASE )}
_a : Tuple =[]
for ex_index, example in enumerate(SCREAMING_SNAKE_CASE ):
if ex_index % 1_0_0_0_0 == 0:
logger.info("""Writing example %d of %d""" , SCREAMING_SNAKE_CASE , len(SCREAMING_SNAKE_CASE ) )
_a : Optional[Any] =[]
_a : List[Any] =[]
for word, label in zip(example.words , example.labels ):
_a : Optional[int] =tokenizer.tokenize(SCREAMING_SNAKE_CASE )
# bert-base-multilingual-cased sometimes output "nothing ([]) when calling tokenize with just a space.
if len(SCREAMING_SNAKE_CASE ) > 0:
tokens.extend(SCREAMING_SNAKE_CASE )
# Use the real label id for the first token of the word, and padding ids for the remaining tokens
label_ids.extend([label_map[label]] + [pad_token_label_id] * (len(SCREAMING_SNAKE_CASE ) - 1) )
# Account for [CLS] and [SEP] with "- 2" and with "- 3" for RoBERTa.
_a : Optional[int] =tokenizer.num_special_tokens_to_add()
if len(SCREAMING_SNAKE_CASE ) > max_seq_length - special_tokens_count:
_a : List[Any] =tokens[: (max_seq_length - special_tokens_count)]
_a : Tuple =label_ids[: (max_seq_length - special_tokens_count)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens += [sep_token]
label_ids += [pad_token_label_id]
if sep_token_extra:
# roberta uses an extra separator b/w pairs of sentences
tokens += [sep_token]
label_ids += [pad_token_label_id]
_a : Dict =[sequence_a_segment_id] * len(SCREAMING_SNAKE_CASE )
if cls_token_at_end:
tokens += [cls_token]
label_ids += [pad_token_label_id]
segment_ids += [cls_token_segment_id]
else:
_a : Any =[cls_token] + tokens
_a : Dict =[pad_token_label_id] + label_ids
_a : Union[str, Any] =[cls_token_segment_id] + segment_ids
_a : List[str] =tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE )
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
_a : Optional[int] =[1 if mask_padding_with_zero else 0] * len(SCREAMING_SNAKE_CASE )
# Zero-pad up to the sequence length.
_a : Union[str, Any] =max_seq_length - len(SCREAMING_SNAKE_CASE )
if pad_on_left:
_a : Optional[Any] =([pad_token] * padding_length) + input_ids
_a : Optional[int] =([0 if mask_padding_with_zero else 1] * padding_length) + input_mask
_a : Union[str, Any] =([pad_token_segment_id] * padding_length) + segment_ids
_a : Dict =([pad_token_label_id] * padding_length) + label_ids
else:
input_ids += [pad_token] * padding_length
input_mask += [0 if mask_padding_with_zero else 1] * padding_length
segment_ids += [pad_token_segment_id] * padding_length
label_ids += [pad_token_label_id] * padding_length
assert len(SCREAMING_SNAKE_CASE ) == max_seq_length
assert len(SCREAMING_SNAKE_CASE ) == max_seq_length
assert len(SCREAMING_SNAKE_CASE ) == max_seq_length
assert len(SCREAMING_SNAKE_CASE ) == max_seq_length
if ex_index < 5:
logger.info("""*** Example ***""" )
logger.info("""guid: %s""" , example.guid )
logger.info("""tokens: %s""" , """ """.join([str(SCREAMING_SNAKE_CASE ) for x in tokens] ) )
logger.info("""input_ids: %s""" , """ """.join([str(SCREAMING_SNAKE_CASE ) for x in input_ids] ) )
logger.info("""input_mask: %s""" , """ """.join([str(SCREAMING_SNAKE_CASE ) for x in input_mask] ) )
logger.info("""segment_ids: %s""" , """ """.join([str(SCREAMING_SNAKE_CASE ) for x in segment_ids] ) )
logger.info("""label_ids: %s""" , """ """.join([str(SCREAMING_SNAKE_CASE ) for x in label_ids] ) )
if "token_type_ids" not in tokenizer.model_input_names:
_a : Tuple =None
features.append(
InputFeatures(
input_ids=SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , token_type_ids=SCREAMING_SNAKE_CASE , label_ids=SCREAMING_SNAKE_CASE ) )
return features
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
class A__ ( UpperCAmelCase__ ):
__UpperCamelCase : List[InputFeatures]
__UpperCamelCase : int = nn.CrossEntropyLoss().ignore_index
def __init__( self :Dict , SCREAMING_SNAKE_CASE :TokenClassificationTask , SCREAMING_SNAKE_CASE :str , SCREAMING_SNAKE_CASE :PreTrainedTokenizer , SCREAMING_SNAKE_CASE :List[str] , SCREAMING_SNAKE_CASE :str , SCREAMING_SNAKE_CASE :Optional[int] = None , SCREAMING_SNAKE_CASE :int=False , SCREAMING_SNAKE_CASE :Split = Split.train , ) -> List[str]:
'''simple docstring'''
# Load data features from cache or dataset file
_a : Optional[Any] =os.path.join(
SCREAMING_SNAKE_CASE , """cached_{}_{}_{}""".format(mode.value , tokenizer.__class__.__name__ , str(SCREAMING_SNAKE_CASE ) ) , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
_a : List[str] =cached_features_file + """.lock"""
with FileLock(SCREAMING_SNAKE_CASE ):
if os.path.exists(SCREAMING_SNAKE_CASE ) and not overwrite_cache:
logger.info(f"Loading features from cached file {cached_features_file}" )
_a : Any =torch.load(SCREAMING_SNAKE_CASE )
else:
logger.info(f"Creating features from dataset file at {data_dir}" )
_a : Any =token_classification_task.read_examples_from_file(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# TODO clean up all this to leverage built-in features of tokenizers
_a : List[str] =token_classification_task.convert_examples_to_features(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , cls_token_at_end=bool(model_type in ["""xlnet"""] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ["""xlnet"""] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=SCREAMING_SNAKE_CASE , pad_on_left=bool(tokenizer.padding_side == """left""" ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info(f"Saving features into cached file {cached_features_file}" )
torch.save(self.features , SCREAMING_SNAKE_CASE )
def __len__( self :Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
return len(self.features )
def __getitem__( self :Dict , SCREAMING_SNAKE_CASE :int ) -> InputFeatures:
'''simple docstring'''
return self.features[i]
if is_tf_available():
import tensorflow as tf
class A__ :
__UpperCamelCase : List[InputFeatures]
__UpperCamelCase : int = -100
def __init__( self :str , SCREAMING_SNAKE_CASE :TokenClassificationTask , SCREAMING_SNAKE_CASE :str , SCREAMING_SNAKE_CASE :PreTrainedTokenizer , SCREAMING_SNAKE_CASE :List[str] , SCREAMING_SNAKE_CASE :str , SCREAMING_SNAKE_CASE :Optional[int] = None , SCREAMING_SNAKE_CASE :str=False , SCREAMING_SNAKE_CASE :Split = Split.train , ) -> Any:
'''simple docstring'''
_a : Tuple =token_classification_task.read_examples_from_file(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# TODO clean up all this to leverage built-in features of tokenizers
_a : List[Any] =token_classification_task.convert_examples_to_features(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , cls_token_at_end=bool(model_type in ["""xlnet"""] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ["""xlnet"""] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=SCREAMING_SNAKE_CASE , pad_on_left=bool(tokenizer.padding_side == """left""" ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
def gen():
for ex in self.features:
if ex.token_type_ids is None:
yield (
{"input_ids": ex.input_ids, "attention_mask": ex.attention_mask},
ex.label_ids,
)
else:
yield (
{
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label_ids,
)
if "token_type_ids" not in tokenizer.model_input_names:
_a : Union[str, Any] =tf.data.Dataset.from_generator(
SCREAMING_SNAKE_CASE , ({"""input_ids""": tf.intaa, """attention_mask""": tf.intaa}, tf.intaa) , (
{"""input_ids""": tf.TensorShape([None] ), """attention_mask""": tf.TensorShape([None] )},
tf.TensorShape([None] ),
) , )
else:
_a : Union[str, Any] =tf.data.Dataset.from_generator(
SCREAMING_SNAKE_CASE , ({"""input_ids""": tf.intaa, """attention_mask""": tf.intaa, """token_type_ids""": tf.intaa}, tf.intaa) , (
{
"""input_ids""": tf.TensorShape([None] ),
"""attention_mask""": tf.TensorShape([None] ),
"""token_type_ids""": tf.TensorShape([None] ),
},
tf.TensorShape([None] ),
) , )
def __UpperCAmelCase ( self :Tuple ) -> Any:
'''simple docstring'''
_a : List[Any] =self.dataset.apply(tf.data.experimental.assert_cardinality(len(self.features ) ) )
return self.dataset
def __len__( self :str ) -> Optional[int]:
'''simple docstring'''
return len(self.features )
def __getitem__( self :int , SCREAMING_SNAKE_CASE :str ) -> InputFeatures:
'''simple docstring'''
return self.features[i]
| 276 | 0 |
"""simple docstring"""
from typing import Callable, List, Optional, Union
import PIL
import torch
from transformers import (
CLIPImageProcessor,
CLIPSegForImageSegmentation,
CLIPSegProcessor,
CLIPTextModel,
CLIPTokenizer,
)
from diffusers import DiffusionPipeline
from diffusers.configuration_utils import FrozenDict
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionInpaintPipeline
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import deprecate, is_accelerate_available, logging
UpperCAmelCase : List[str] = logging.get_logger(__name__) # pylint: disable=invalid-name
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ):
def __init__( self : Any , lowerCAmelCase_ : CLIPSegForImageSegmentation , lowerCAmelCase_ : CLIPSegProcessor , lowerCAmelCase_ : AutoencoderKL , lowerCAmelCase_ : CLIPTextModel , lowerCAmelCase_ : CLIPTokenizer , lowerCAmelCase_ : UNetaDConditionModel , lowerCAmelCase_ : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , lowerCAmelCase_ : StableDiffusionSafetyChecker , lowerCAmelCase_ : CLIPImageProcessor , ):
"""simple docstring"""
super().__init__()
if hasattr(scheduler.config , """steps_offset""") and scheduler.config.steps_offset != 1:
lowercase_ = (
F'''The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`'''
F''' should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure '''
"""to update the config accordingly as leaving `steps_offset` might led to incorrect results"""
""" in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,"""
""" it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`"""
""" file"""
)
deprecate("""steps_offset!=1""" , """1.0.0""" , lowerCAmelCase_ , standard_warn=lowerCAmelCase_)
lowercase_ = dict(scheduler.config)
lowercase_ = 1
lowercase_ = FrozenDict(lowerCAmelCase_)
if hasattr(scheduler.config , """skip_prk_steps""") and scheduler.config.skip_prk_steps is False:
lowercase_ = (
F'''The configuration file of this scheduler: {scheduler} has not set the configuration'''
""" `skip_prk_steps`. `skip_prk_steps` should be set to True in the configuration file. Please make"""
""" sure to update the config accordingly as not setting `skip_prk_steps` in the config might lead to"""
""" incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face"""
""" Hub, it would be very nice if you could open a Pull request for the"""
""" `scheduler/scheduler_config.json` file"""
)
deprecate("""skip_prk_steps not set""" , """1.0.0""" , lowerCAmelCase_ , standard_warn=lowerCAmelCase_)
lowercase_ = dict(scheduler.config)
lowercase_ = True
lowercase_ = FrozenDict(lowerCAmelCase_)
if safety_checker is None:
logger.warning(
F'''You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure'''
""" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"""
""" results in services or applications open to the public. Both the diffusers team and Hugging Face"""
""" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"""
""" it only for use-cases that involve analyzing network behavior or auditing its results. For more"""
""" information, please have a look at https://github.com/huggingface/diffusers/pull/254 .""")
self.register_modules(
segmentation_model=lowerCAmelCase_ , segmentation_processor=lowerCAmelCase_ , vae=lowerCAmelCase_ , text_encoder=lowerCAmelCase_ , tokenizer=lowerCAmelCase_ , unet=lowerCAmelCase_ , scheduler=lowerCAmelCase_ , safety_checker=lowerCAmelCase_ , feature_extractor=lowerCAmelCase_ , )
def _UpperCAmelCase ( self : str , lowerCAmelCase_ : Optional[Union[str, int]] = "auto"):
"""simple docstring"""
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
lowercase_ = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(lowerCAmelCase_)
def _UpperCAmelCase ( self : Dict):
"""simple docstring"""
self.enable_attention_slicing(lowerCAmelCase_)
def _UpperCAmelCase ( self : Tuple):
"""simple docstring"""
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("""Please install accelerate via `pip install accelerate`""")
lowercase_ = torch.device("""cuda""")
for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.safety_checker]:
if cpu_offloaded_model is not None:
cpu_offload(lowerCAmelCase_ , lowerCAmelCase_)
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def _UpperCAmelCase ( self : int):
"""simple docstring"""
if self.device != torch.device("""meta""") or not hasattr(self.unet , """_hf_hook"""):
return self.device
for module in self.unet.modules():
if (
hasattr(lowerCAmelCase_ , """_hf_hook""")
and hasattr(module._hf_hook , """execution_device""")
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device)
return self.device
@torch.no_grad()
def __call__( self : Any , lowerCAmelCase_ : Union[str, List[str]] , lowerCAmelCase_ : Union[torch.FloatTensor, PIL.Image.Image] , lowerCAmelCase_ : str , lowerCAmelCase_ : int = 5_1_2 , lowerCAmelCase_ : int = 5_1_2 , lowerCAmelCase_ : int = 5_0 , lowerCAmelCase_ : float = 7.5 , lowerCAmelCase_ : Optional[Union[str, List[str]]] = None , lowerCAmelCase_ : Optional[int] = 1 , lowerCAmelCase_ : float = 0.0 , lowerCAmelCase_ : Optional[torch.Generator] = None , lowerCAmelCase_ : Optional[torch.FloatTensor] = None , lowerCAmelCase_ : Optional[str] = "pil" , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , lowerCAmelCase_ : int = 1 , **lowerCAmelCase_ : Tuple , ):
"""simple docstring"""
lowercase_ = self.segmentation_processor(
text=[text] , images=[image] , padding="""max_length""" , return_tensors="""pt""").to(self.device)
lowercase_ = self.segmentation_model(**lowerCAmelCase_)
lowercase_ = torch.sigmoid(outputs.logits).cpu().detach().unsqueeze(-1).numpy()
lowercase_ = self.numpy_to_pil(lowerCAmelCase_)[0].resize(image.size)
# Run inpainting pipeline with the generated mask
lowercase_ = StableDiffusionInpaintPipeline(
vae=self.vae , text_encoder=self.text_encoder , tokenizer=self.tokenizer , unet=self.unet , scheduler=self.scheduler , safety_checker=self.safety_checker , feature_extractor=self.feature_extractor , )
return inpainting_pipeline(
prompt=lowerCAmelCase_ , image=lowerCAmelCase_ , mask_image=lowerCAmelCase_ , height=lowerCAmelCase_ , width=lowerCAmelCase_ , num_inference_steps=lowerCAmelCase_ , guidance_scale=lowerCAmelCase_ , negative_prompt=lowerCAmelCase_ , num_images_per_prompt=lowerCAmelCase_ , eta=lowerCAmelCase_ , generator=lowerCAmelCase_ , latents=lowerCAmelCase_ , output_type=lowerCAmelCase_ , return_dict=lowerCAmelCase_ , callback=lowerCAmelCase_ , callback_steps=lowerCAmelCase_ , )
| 313 |
"""simple docstring"""
import argparse
import json
from typing import List
from ltp import LTP
from transformers import BertTokenizer
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> Any:
'''simple docstring'''
if (
(cp >= 0x4e00 and cp <= 0x9fff)
or (cp >= 0x3400 and cp <= 0x4dbf) #
or (cp >= 0x20000 and cp <= 0x2a6df) #
or (cp >= 0x2a700 and cp <= 0x2b73f) #
or (cp >= 0x2b740 and cp <= 0x2b81f) #
or (cp >= 0x2b820 and cp <= 0x2ceaf) #
or (cp >= 0xf900 and cp <= 0xfaff)
or (cp >= 0x2f800 and cp <= 0x2fa1f) #
): #
return True
return False
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> List[str]:
'''simple docstring'''
for char in word:
lowercase_ = ord(__lowerCAmelCase )
if not _is_chinese_char(__lowerCAmelCase ):
return 0
return 1
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> Any:
'''simple docstring'''
lowercase_ = set()
for token in tokens:
lowercase_ = len(__lowerCAmelCase ) > 1 and is_chinese(__lowerCAmelCase )
if chinese_word:
word_set.add(__lowerCAmelCase )
lowercase_ = list(__lowerCAmelCase )
return word_list
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> List[str]:
'''simple docstring'''
if not chinese_word_set:
return bert_tokens
lowercase_ = max([len(__lowerCAmelCase ) for w in chinese_word_set] )
lowercase_ = bert_tokens
lowercase_ , lowercase_ = 0, len(__lowerCAmelCase )
while start < end:
lowercase_ = True
if is_chinese(bert_word[start] ):
lowercase_ = min(end - start , __lowerCAmelCase )
for i in range(__lowerCAmelCase , 1 , -1 ):
lowercase_ = """""".join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
lowercase_ = """##""" + bert_word[j]
lowercase_ = start + i
lowercase_ = False
break
if single_word:
start += 1
return bert_word
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> str:
'''simple docstring'''
lowercase_ = []
for i in range(0 , len(__lowerCAmelCase ) , 1_00 ):
lowercase_ = ltp_tokenizer.seg(lines[i : i + 1_00] )[0]
lowercase_ = [get_chinese_word(__lowerCAmelCase ) for r in res]
ltp_res.extend(__lowerCAmelCase )
assert len(__lowerCAmelCase ) == len(__lowerCAmelCase )
lowercase_ = []
for i in range(0 , len(__lowerCAmelCase ) , 1_00 ):
lowercase_ = bert_tokenizer(lines[i : i + 1_00] , add_special_tokens=__lowerCAmelCase , truncation=__lowerCAmelCase , max_length=5_12 )
bert_res.extend(res["""input_ids"""] )
assert len(__lowerCAmelCase ) == len(__lowerCAmelCase )
lowercase_ = []
for input_ids, chinese_word in zip(__lowerCAmelCase , __lowerCAmelCase ):
lowercase_ = []
for id in input_ids:
lowercase_ = bert_tokenizer._convert_id_to_token(__lowerCAmelCase )
input_tokens.append(__lowerCAmelCase )
lowercase_ = add_sub_symbol(__lowerCAmelCase , __lowerCAmelCase )
lowercase_ = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(__lowerCAmelCase ):
if token[:2] == "##":
lowercase_ = token[2:]
# save chinese tokens' pos
if len(__lowerCAmelCase ) == 1 and _is_chinese_char(ord(__lowerCAmelCase ) ):
ref_id.append(__lowerCAmelCase )
ref_ids.append(__lowerCAmelCase )
assert len(__lowerCAmelCase ) == len(__lowerCAmelCase )
return ref_ids
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> int:
'''simple docstring'''
with open(args.file_name , """r""" , encoding="""utf-8""" ) as f:
lowercase_ = f.readlines()
lowercase_ = [line.strip() for line in data if len(__lowerCAmelCase ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
lowercase_ = LTP(args.ltp ) # faster in GPU device
lowercase_ = BertTokenizer.from_pretrained(args.bert )
lowercase_ = prepare_ref(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
with open(args.save_path , """w""" , encoding="""utf-8""" ) as f:
lowercase_ = [json.dumps(__lowerCAmelCase ) + """\n""" for ref in ref_ids]
f.writelines(__lowerCAmelCase )
if __name__ == "__main__":
UpperCAmelCase : List[Any] = argparse.ArgumentParser(description="prepare_chinese_ref")
parser.add_argument(
"--file_name",
type=str,
default="./resources/chinese-demo.txt",
help="file need process, same as training data in lm",
)
parser.add_argument(
"--ltp", type=str, default="./resources/ltp", help="resources for LTP tokenizer, usually a path"
)
parser.add_argument("--bert", type=str, default="./resources/robert", help="resources for Bert tokenizer")
parser.add_argument("--save_path", type=str, default="./resources/ref.txt", help="path to save res")
UpperCAmelCase : int = parser.parse_args()
main(args)
| 313 | 1 |
from typing import Any, Dict, Optional
import torch
import torch.nn.functional as F
from torch import nn
from ..utils import maybe_allow_in_graph
from .activations import get_activation
from .attention_processor import Attention
from .embeddings import CombinedTimestepLabelEmbeddings
@maybe_allow_in_graph
class snake_case_ ( nn.Module ):
'''simple docstring'''
def __init__( self : str , _UpperCamelCase : int , _UpperCamelCase : int , _UpperCamelCase : int , _UpperCamelCase : str=0.0 , _UpperCamelCase : Optional[int] = None , _UpperCamelCase : str = "geglu" , _UpperCamelCase : Optional[int] = None , _UpperCamelCase : bool = False , _UpperCamelCase : bool = False , _UpperCamelCase : bool = False , _UpperCamelCase : bool = False , _UpperCamelCase : bool = True , _UpperCamelCase : str = "layer_norm" , _UpperCamelCase : bool = False , ) ->str:
super().__init__()
snake_case_ = only_cross_attention
snake_case_ = (num_embeds_ada_norm is not None) and norm_type == '''ada_norm_zero'''
snake_case_ = (num_embeds_ada_norm is not None) and norm_type == '''ada_norm'''
if norm_type in ("ada_norm", "ada_norm_zero") and num_embeds_ada_norm is None:
raise ValueError(
f'''`norm_type` is set to {norm_type}, but `num_embeds_ada_norm` is not defined. Please make sure to'''
f''' define `num_embeds_ada_norm` if setting `norm_type` to {norm_type}.''' )
# Define 3 blocks. Each block has its own normalization layer.
# 1. Self-Attn
if self.use_ada_layer_norm:
snake_case_ = AdaLayerNorm(_UpperCamelCase , _UpperCamelCase )
elif self.use_ada_layer_norm_zero:
snake_case_ = AdaLayerNormZero(_UpperCamelCase , _UpperCamelCase )
else:
snake_case_ = nn.LayerNorm(_UpperCamelCase , elementwise_affine=_UpperCamelCase )
snake_case_ = Attention(
query_dim=_UpperCamelCase , heads=_UpperCamelCase , dim_head=_UpperCamelCase , dropout=_UpperCamelCase , bias=_UpperCamelCase , cross_attention_dim=cross_attention_dim if only_cross_attention else None , upcast_attention=_UpperCamelCase , )
# 2. Cross-Attn
if cross_attention_dim is not None or double_self_attention:
# We currently only use AdaLayerNormZero for self attention where there will only be one attention block.
# I.e. the number of returned modulation chunks from AdaLayerZero would not make sense if returned during
# the second cross attention block.
snake_case_ = (
AdaLayerNorm(_UpperCamelCase , _UpperCamelCase )
if self.use_ada_layer_norm
else nn.LayerNorm(_UpperCamelCase , elementwise_affine=_UpperCamelCase )
)
snake_case_ = Attention(
query_dim=_UpperCamelCase , cross_attention_dim=cross_attention_dim if not double_self_attention else None , heads=_UpperCamelCase , dim_head=_UpperCamelCase , dropout=_UpperCamelCase , bias=_UpperCamelCase , upcast_attention=_UpperCamelCase , ) # is self-attn if encoder_hidden_states is none
else:
snake_case_ = None
snake_case_ = None
# 3. Feed-forward
snake_case_ = nn.LayerNorm(_UpperCamelCase , elementwise_affine=_UpperCamelCase )
snake_case_ = FeedForward(_UpperCamelCase , dropout=_UpperCamelCase , activation_fn=_UpperCamelCase , final_dropout=_UpperCamelCase )
# let chunk size default to None
snake_case_ = None
snake_case_ = 0
def snake_case__( self : str , _UpperCamelCase : Optional[int] , _UpperCamelCase : int ) ->Any:
# Sets chunk feed-forward
snake_case_ = chunk_size
snake_case_ = dim
def snake_case__( self : Optional[Any] , _UpperCamelCase : torch.FloatTensor , _UpperCamelCase : Optional[torch.FloatTensor] = None , _UpperCamelCase : Optional[torch.FloatTensor] = None , _UpperCamelCase : Optional[torch.FloatTensor] = None , _UpperCamelCase : Optional[torch.LongTensor] = None , _UpperCamelCase : Dict[str, Any] = None , _UpperCamelCase : Optional[torch.LongTensor] = None , ) ->Optional[Any]:
# Notice that normalization is always applied before the real computation in the following blocks.
# 1. Self-Attention
if self.use_ada_layer_norm:
snake_case_ = self.norma(_UpperCamelCase , _UpperCamelCase )
elif self.use_ada_layer_norm_zero:
snake_case_, snake_case_, snake_case_, snake_case_, snake_case_ = self.norma(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , hidden_dtype=hidden_states.dtype )
else:
snake_case_ = self.norma(_UpperCamelCase )
snake_case_ = cross_attention_kwargs if cross_attention_kwargs is not None else {}
snake_case_ = self.attna(
_UpperCamelCase , encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None , attention_mask=_UpperCamelCase , **_UpperCamelCase , )
if self.use_ada_layer_norm_zero:
snake_case_ = gate_msa.unsqueeze(1 ) * attn_output
snake_case_ = attn_output + hidden_states
# 2. Cross-Attention
if self.attna is not None:
snake_case_ = (
self.norma(_UpperCamelCase , _UpperCamelCase ) if self.use_ada_layer_norm else self.norma(_UpperCamelCase )
)
snake_case_ = self.attna(
_UpperCamelCase , encoder_hidden_states=_UpperCamelCase , attention_mask=_UpperCamelCase , **_UpperCamelCase , )
snake_case_ = attn_output + hidden_states
# 3. Feed-forward
snake_case_ = self.norma(_UpperCamelCase )
if self.use_ada_layer_norm_zero:
snake_case_ = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None]
if self._chunk_size is not None:
# "feed_forward_chunk_size" can be used to save memory
if norm_hidden_states.shape[self._chunk_dim] % self._chunk_size != 0:
raise ValueError(
f'''`hidden_states` dimension to be chunked: {norm_hidden_states.shape[self._chunk_dim]} has to be divisible by chunk size: {self._chunk_size}. Make sure to set an appropriate `chunk_size` when calling `unet.enable_forward_chunking`.''' )
snake_case_ = norm_hidden_states.shape[self._chunk_dim] // self._chunk_size
snake_case_ = torch.cat(
[self.ff(_UpperCamelCase ) for hid_slice in norm_hidden_states.chunk(_UpperCamelCase , dim=self._chunk_dim )] , dim=self._chunk_dim , )
else:
snake_case_ = self.ff(_UpperCamelCase )
if self.use_ada_layer_norm_zero:
snake_case_ = gate_mlp.unsqueeze(1 ) * ff_output
snake_case_ = ff_output + hidden_states
return hidden_states
class snake_case_ ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[Any] , _UpperCamelCase : int , _UpperCamelCase : Optional[int] = None , _UpperCamelCase : int = 4 , _UpperCamelCase : float = 0.0 , _UpperCamelCase : str = "geglu" , _UpperCamelCase : bool = False , ) ->List[str]:
super().__init__()
snake_case_ = int(dim * mult )
snake_case_ = dim_out if dim_out is not None else dim
if activation_fn == "gelu":
snake_case_ = GELU(_UpperCamelCase , _UpperCamelCase )
if activation_fn == "gelu-approximate":
snake_case_ = GELU(_UpperCamelCase , _UpperCamelCase , approximate='''tanh''' )
elif activation_fn == "geglu":
snake_case_ = GEGLU(_UpperCamelCase , _UpperCamelCase )
elif activation_fn == "geglu-approximate":
snake_case_ = ApproximateGELU(_UpperCamelCase , _UpperCamelCase )
snake_case_ = nn.ModuleList([] )
# project in
self.net.append(_UpperCamelCase )
# project dropout
self.net.append(nn.Dropout(_UpperCamelCase ) )
# project out
self.net.append(nn.Linear(_UpperCamelCase , _UpperCamelCase ) )
# FF as used in Vision Transformer, MLP-Mixer, etc. have a final dropout
if final_dropout:
self.net.append(nn.Dropout(_UpperCamelCase ) )
def snake_case__( self : Optional[Any] , _UpperCamelCase : Union[str, Any] ) ->Tuple:
for module in self.net:
snake_case_ = module(_UpperCamelCase )
return hidden_states
class snake_case_ ( nn.Module ):
'''simple docstring'''
def __init__( self : List[str] , _UpperCamelCase : int , _UpperCamelCase : int , _UpperCamelCase : str = "none" ) ->int:
super().__init__()
snake_case_ = nn.Linear(_UpperCamelCase , _UpperCamelCase )
snake_case_ = approximate
def snake_case__( self : Tuple , _UpperCamelCase : int ) ->Dict:
if gate.device.type != "mps":
return F.gelu(_UpperCamelCase , approximate=self.approximate )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) , approximate=self.approximate ).to(dtype=gate.dtype )
def snake_case__( self : Any , _UpperCamelCase : List[str] ) ->List[Any]:
snake_case_ = self.proj(_UpperCamelCase )
snake_case_ = self.gelu(_UpperCamelCase )
return hidden_states
class snake_case_ ( nn.Module ):
'''simple docstring'''
def __init__( self : Tuple , _UpperCamelCase : int , _UpperCamelCase : int ) ->Dict:
super().__init__()
snake_case_ = nn.Linear(_UpperCamelCase , dim_out * 2 )
def snake_case__( self : Union[str, Any] , _UpperCamelCase : Dict ) ->Optional[int]:
if gate.device.type != "mps":
return F.gelu(_UpperCamelCase )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) ).to(dtype=gate.dtype )
def snake_case__( self : Optional[Any] , _UpperCamelCase : Dict ) ->List[str]:
snake_case_, snake_case_ = self.proj(_UpperCamelCase ).chunk(2 , dim=-1 )
return hidden_states * self.gelu(_UpperCamelCase )
class snake_case_ ( nn.Module ):
'''simple docstring'''
def __init__( self : int , _UpperCamelCase : int , _UpperCamelCase : int ) ->Union[str, Any]:
super().__init__()
snake_case_ = nn.Linear(_UpperCamelCase , _UpperCamelCase )
def snake_case__( self : str , _UpperCamelCase : Optional[int] ) ->int:
snake_case_ = self.proj(_UpperCamelCase )
return x * torch.sigmoid(1.702 * x )
class snake_case_ ( nn.Module ):
'''simple docstring'''
def __init__( self : str , _UpperCamelCase : Optional[int] , _UpperCamelCase : Tuple ) ->Union[str, Any]:
super().__init__()
snake_case_ = nn.Embedding(_UpperCamelCase , _UpperCamelCase )
snake_case_ = nn.SiLU()
snake_case_ = nn.Linear(_UpperCamelCase , embedding_dim * 2 )
snake_case_ = nn.LayerNorm(_UpperCamelCase , elementwise_affine=_UpperCamelCase )
def snake_case__( self : Optional[Any] , _UpperCamelCase : Any , _UpperCamelCase : List[str] ) ->Union[str, Any]:
snake_case_ = self.linear(self.silu(self.emb(_UpperCamelCase ) ) )
snake_case_, snake_case_ = torch.chunk(_UpperCamelCase , 2 )
snake_case_ = self.norm(_UpperCamelCase ) * (1 + scale) + shift
return x
class snake_case_ ( nn.Module ):
'''simple docstring'''
def __init__( self : int , _UpperCamelCase : int , _UpperCamelCase : Any ) ->str:
super().__init__()
snake_case_ = CombinedTimestepLabelEmbeddings(_UpperCamelCase , _UpperCamelCase )
snake_case_ = nn.SiLU()
snake_case_ = nn.Linear(_UpperCamelCase , 6 * embedding_dim , bias=_UpperCamelCase )
snake_case_ = nn.LayerNorm(_UpperCamelCase , elementwise_affine=_UpperCamelCase , eps=1e-6 )
def snake_case__( self : Tuple , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Optional[int] , _UpperCamelCase : Optional[Any]=None ) ->Optional[Any]:
snake_case_ = self.linear(self.silu(self.emb(_UpperCamelCase , _UpperCamelCase , hidden_dtype=_UpperCamelCase ) ) )
snake_case_, snake_case_, snake_case_, snake_case_, snake_case_, snake_case_ = emb.chunk(6 , dim=1 )
snake_case_ = self.norm(_UpperCamelCase ) * (1 + scale_msa[:, None]) + shift_msa[:, None]
return x, gate_msa, shift_mlp, scale_mlp, gate_mlp
class snake_case_ ( nn.Module ):
'''simple docstring'''
def __init__( self : List[str] , _UpperCamelCase : int , _UpperCamelCase : int , _UpperCamelCase : int , _UpperCamelCase : Optional[str] = None , _UpperCamelCase : float = 1e-5 ) ->List[str]:
super().__init__()
snake_case_ = num_groups
snake_case_ = eps
if act_fn is None:
snake_case_ = None
else:
snake_case_ = get_activation(_UpperCamelCase )
snake_case_ = nn.Linear(_UpperCamelCase , out_dim * 2 )
def snake_case__( self : List[Any] , _UpperCamelCase : Dict , _UpperCamelCase : Any ) ->Any:
if self.act:
snake_case_ = self.act(_UpperCamelCase )
snake_case_ = self.linear(_UpperCamelCase )
snake_case_ = emb[:, :, None, None]
snake_case_, snake_case_ = emb.chunk(2 , dim=1 )
snake_case_ = F.group_norm(_UpperCamelCase , self.num_groups , eps=self.eps )
snake_case_ = x * (1 + scale) + shift
return x | 8 |
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
snake_case_ = []
if len(SCREAMING_SNAKE_CASE__ ) == 1:
return [nums.copy()]
for _ in range(len(SCREAMING_SNAKE_CASE__ ) ):
snake_case_ = nums.pop(0 )
snake_case_ = permute(SCREAMING_SNAKE_CASE__ )
for perm in permutations:
perm.append(SCREAMING_SNAKE_CASE__ )
result.extend(SCREAMING_SNAKE_CASE__ )
nums.append(SCREAMING_SNAKE_CASE__ )
return result
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
def backtrack(SCREAMING_SNAKE_CASE__ ):
if start == len(SCREAMING_SNAKE_CASE__ ) - 1:
output.append(nums[:] )
else:
for i in range(SCREAMING_SNAKE_CASE__ , len(SCREAMING_SNAKE_CASE__ ) ):
snake_case_, snake_case_ = nums[i], nums[start]
backtrack(start + 1 )
snake_case_, snake_case_ = nums[i], nums[start] # backtrack
snake_case_ = []
backtrack(0 )
return output
if __name__ == "__main__":
import doctest
# use res to print the data in permute2 function
lowerCAmelCase_ = permutea([1, 2, 3])
print(res)
doctest.testmod() | 8 | 1 |
# Copyright 2022 The HuggingFace Team and The OpenBMB Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowercase : List[str] = {
'configuration_cpmant': ['CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'CpmAntConfig'],
'tokenization_cpmant': ['CpmAntTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Tuple = [
'CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST',
'CpmAntForCausalLM',
'CpmAntModel',
'CpmAntPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig
from .tokenization_cpmant import CpmAntTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_cpmant import (
CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST,
CpmAntForCausalLM,
CpmAntModel,
CpmAntPreTrainedModel,
)
else:
import sys
lowercase : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 151 |
import unittest
from transformers import DebertaVaTokenizer, DebertaVaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
lowercase : Optional[int] = get_tests_dir('fixtures/spiece.model')
@require_sentencepiece
@require_tokenizers
class lowerCamelCase__ ( __lowercase , unittest.TestCase):
'''simple docstring'''
_A = DebertaVaTokenizer
_A = DebertaVaTokenizerFast
_A = True
_A = True
def _lowerCamelCase ( self :int ) -> int:
super().setUp()
# We have a SentencePiece fixture for testing
__UpperCamelCase : Any = DebertaVaTokenizer(a , unk_token="<unk>" )
tokenizer.save_pretrained(self.tmpdirname )
def _lowerCamelCase ( self :Optional[int] , a :List[str] ) -> List[str]:
__UpperCamelCase : Any = "this is a test"
__UpperCamelCase : Optional[int] = "this is a test"
return input_text, output_text
def _lowerCamelCase ( self :str ) -> Any:
__UpperCamelCase : Optional[Any] = "<pad>"
__UpperCamelCase : Union[str, Any] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(a ) , a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(a ) , a )
def _lowerCamelCase ( self :Union[str, Any] ) -> Tuple:
__UpperCamelCase : Dict = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<pad>" )
self.assertEqual(vocab_keys[1] , "<unk>" )
self.assertEqual(vocab_keys[-1] , "[PAD]" )
self.assertEqual(len(a ) , 3_0_0_0_1 )
def _lowerCamelCase ( self :Union[str, Any] ) -> Optional[Any]:
self.assertEqual(self.get_tokenizer().vocab_size , 3_0_0_0_0 )
def _lowerCamelCase ( self :List[Any] ) -> str:
# fmt: off
__UpperCamelCase : int = " \tHeLLo!how \n Are yoU? "
__UpperCamelCase : Optional[int] = ["▁hello", "!", "how", "▁are", "▁you", "?"]
# fmt: on
__UpperCamelCase : Dict = DebertaVaTokenizer(a , do_lower_case=a )
__UpperCamelCase : int = tokenizer.convert_ids_to_tokens(tokenizer.encode(a , add_special_tokens=a ) )
self.assertListEqual(a , a )
__UpperCamelCase : List[Any] = DebertaVaTokenizerFast(a , do_lower_case=a )
__UpperCamelCase : Tuple = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(a , add_special_tokens=a ) )
self.assertListEqual(a , a )
@unittest.skip("There is an inconsistency between slow and fast tokenizer due to a bug in the fast one." )
def _lowerCamelCase ( self :Dict ) -> Optional[Any]:
pass
@unittest.skip("There is an inconsistency between slow and fast tokenizer due to a bug in the fast one." )
def _lowerCamelCase ( self :str ) -> Any:
pass
def _lowerCamelCase ( self :Tuple ) -> Dict:
# fmt: off
__UpperCamelCase : Optional[int] = "I was born in 92000, and this is falsé."
__UpperCamelCase : Optional[int] = ["▁", "<unk>", "▁was", "▁born", "▁in", "▁9", "2000", "▁", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "▁", ".", ]
# fmt: on
__UpperCamelCase : Dict = DebertaVaTokenizer(a , split_by_punct=a )
__UpperCamelCase : List[Any] = tokenizer.convert_ids_to_tokens(tokenizer.encode(a , add_special_tokens=a ) )
self.assertListEqual(a , a )
__UpperCamelCase : Optional[Any] = DebertaVaTokenizerFast(a , split_by_punct=a )
__UpperCamelCase : List[Any] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(a , add_special_tokens=a ) )
self.assertListEqual(a , a )
def _lowerCamelCase ( self :List[Any] ) -> str:
# fmt: off
__UpperCamelCase : Dict = "I was born in 92000, and this is falsé."
__UpperCamelCase : Any = ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", "▁", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "▁", ".", ]
# fmt: on
__UpperCamelCase : Any = DebertaVaTokenizer(a , do_lower_case=a , split_by_punct=a )
__UpperCamelCase : Optional[int] = tokenizer.convert_ids_to_tokens(tokenizer.encode(a , add_special_tokens=a ) )
self.assertListEqual(a , a )
__UpperCamelCase : Dict = DebertaVaTokenizerFast(a , do_lower_case=a , split_by_punct=a )
__UpperCamelCase : List[str] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(a , add_special_tokens=a ) )
self.assertListEqual(a , a )
def _lowerCamelCase ( self :Dict ) -> Any:
# fmt: off
__UpperCamelCase : Optional[int] = "I was born in 92000, and this is falsé."
__UpperCamelCase : Tuple = ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", ".", ]
# fmt: on
__UpperCamelCase : Optional[int] = DebertaVaTokenizer(a , do_lower_case=a , split_by_punct=a )
__UpperCamelCase : str = tokenizer.convert_ids_to_tokens(tokenizer.encode(a , add_special_tokens=a ) )
self.assertListEqual(a , a )
__UpperCamelCase : List[Any] = DebertaVaTokenizerFast(a , do_lower_case=a , split_by_punct=a )
__UpperCamelCase : Tuple = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(a , add_special_tokens=a ) )
self.assertListEqual(a , a )
def _lowerCamelCase ( self :List[str] ) -> Tuple:
# fmt: off
__UpperCamelCase : Dict = "I was born in 92000, and this is falsé."
__UpperCamelCase : List[str] = ["▁", "<unk>", "▁was", "▁born", "▁in", "▁9", "2000", "▁", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "▁", ".", ]
# fmt: on
__UpperCamelCase : List[str] = DebertaVaTokenizer(a , do_lower_case=a , split_by_punct=a )
__UpperCamelCase : int = tokenizer.convert_ids_to_tokens(tokenizer.encode(a , add_special_tokens=a ) )
self.assertListEqual(a , a )
__UpperCamelCase : List[str] = DebertaVaTokenizerFast(a , do_lower_case=a , split_by_punct=a )
__UpperCamelCase : Optional[int] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(a , add_special_tokens=a ) )
self.assertListEqual(a , a )
def _lowerCamelCase ( self :Union[str, Any] ) -> Any:
# fmt: off
__UpperCamelCase : Optional[int] = " \tHeLLo!how \n Are yoU? "
__UpperCamelCase : str = ["▁", "<unk>", "e", "<unk>", "o", "!", "how", "▁", "<unk>", "re", "▁yo", "<unk>", "?"]
# fmt: on
__UpperCamelCase : int = DebertaVaTokenizer(a , do_lower_case=a , split_by_punct=a )
__UpperCamelCase : Union[str, Any] = tokenizer.convert_ids_to_tokens(tokenizer.encode(a , add_special_tokens=a ) )
self.assertListEqual(a , a )
__UpperCamelCase : Tuple = DebertaVaTokenizerFast(a , do_lower_case=a , split_by_punct=a )
__UpperCamelCase : int = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(a , add_special_tokens=a ) )
self.assertListEqual(a , a )
def _lowerCamelCase ( self :int ) -> Any:
__UpperCamelCase : Tuple = self.get_tokenizer()
__UpperCamelCase : List[Any] = self.get_rust_tokenizer()
__UpperCamelCase : Dict = "I was born in 92000, and this is falsé."
__UpperCamelCase : List[str] = tokenizer.convert_ids_to_tokens(tokenizer.encode(a , add_special_tokens=a ) )
__UpperCamelCase : Dict = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(a , add_special_tokens=a ) )
self.assertListEqual(a , a )
__UpperCamelCase : str = tokenizer.encode(a , add_special_tokens=a )
__UpperCamelCase : Union[str, Any] = rust_tokenizer.encode(a , add_special_tokens=a )
self.assertListEqual(a , a )
__UpperCamelCase : Optional[int] = self.get_rust_tokenizer()
__UpperCamelCase : List[Any] = tokenizer.encode(a )
__UpperCamelCase : Union[str, Any] = rust_tokenizer.encode(a )
self.assertListEqual(a , a )
def _lowerCamelCase ( self :List[Any] ) -> List[str]:
__UpperCamelCase : Optional[int] = "This is a test"
__UpperCamelCase : List[Any] = [1_3, 1, 4_3_9_8, 2_5, 2_1, 1_2_8_9]
__UpperCamelCase : Tuple = ["▁", "T", "his", "▁is", "▁a", "▁test"]
__UpperCamelCase : Union[str, Any] = ["▁", "<unk>", "his", "▁is", "▁a", "▁test"]
__UpperCamelCase : Union[str, Any] = DebertaVaTokenizer(a , keep_accents=a )
__UpperCamelCase : int = DebertaVaTokenizerFast(a , keep_accents=a )
__UpperCamelCase : Tuple = tokenizer.encode(a , add_special_tokens=a )
self.assertListEqual(a , a )
__UpperCamelCase : List[str] = tokenizer.tokenize(a )
self.assertListEqual(a , a )
__UpperCamelCase : List[Any] = tokenizer.convert_ids_to_tokens(a )
self.assertListEqual(a , a )
__UpperCamelCase : List[Any] = rust_tokenizer.encode(a , add_special_tokens=a )
self.assertListEqual(a , a )
__UpperCamelCase : Optional[Any] = rust_tokenizer.tokenize(a )
self.assertListEqual(a , a )
__UpperCamelCase : Optional[Any] = rust_tokenizer.convert_ids_to_tokens(a )
self.assertListEqual(a , a )
# fmt: off
__UpperCamelCase : Optional[int] = "I was born in 92000, and this is falsé."
__UpperCamelCase : int = [1_3, 1, 2_3, 3_8_6, 1_9, 5_6_1, 3_0_5_0, 1_5, 1_7, 4_8, 2_5, 8_2_5_6, 1_8, 1, 9]
__UpperCamelCase : Optional[int] = ["▁", "I", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "é", ".", ]
__UpperCamelCase : Union[str, Any] = ["▁", "<unk>", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", ".", ]
# fmt: on
__UpperCamelCase : List[str] = tokenizer.encode(a , add_special_tokens=a )
self.assertListEqual(a , a )
__UpperCamelCase : Dict = tokenizer.tokenize(a )
self.assertListEqual(a , a )
__UpperCamelCase : Optional[int] = tokenizer.convert_ids_to_tokens(a )
self.assertListEqual(a , a )
__UpperCamelCase : Dict = rust_tokenizer.encode(a , add_special_tokens=a )
self.assertListEqual(a , a )
__UpperCamelCase : int = rust_tokenizer.tokenize(a )
self.assertListEqual(a , a )
__UpperCamelCase : Optional[int] = rust_tokenizer.convert_ids_to_tokens(a )
self.assertListEqual(a , a )
def _lowerCamelCase ( self :Union[str, Any] ) -> str:
__UpperCamelCase : List[Any] = DebertaVaTokenizer(a )
__UpperCamelCase : Optional[int] = tokenizer.encode("sequence builders" )
__UpperCamelCase : Optional[int] = tokenizer.encode("multi-sequence build" )
__UpperCamelCase : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(a )
__UpperCamelCase : Optional[int] = tokenizer.build_inputs_with_special_tokens(a , a )
self.assertEqual([tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] , a )
self.assertEqual(
[tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [tokenizer.sep_token_id] , a , )
@slow
def _lowerCamelCase ( self :Dict ) -> int:
# fmt: off
__UpperCamelCase : Dict = {"input_ids": [[1, 3_9_8_6_7, 3_6, 1_9_3_9_0, 4_8_6, 2_7, 3_5_0_5_2, 8_1_4_3_6, 1_8, 6_0_6_8_5, 1_2_2_5, 7, 3_5_0_5_2, 8_1_4_3_6, 1_8, 9_3_6_7, 1_6_8_9_9, 1_8, 1_5_9_3_7, 5_3, 5_9_4, 7_7_3, 1_8, 1_6_2_8_7, 3_0_4_6_5, 3_6, 1_5_9_3_7, 6, 4_1_1_3_9, 3_8, 3_6_9_7_9, 6_0_7_6_3, 1_9_1, 6, 3_4_1_3_2, 9_9, 6, 5_0_5_3_8, 3_9_0, 4_3_2_3_0, 6, 3_4_1_3_2, 2_7_7_9, 2_0_8_5_0, 1_4, 6_9_9, 1_0_7_2, 1_1_9_4, 3_6, 3_8_2, 1_0_9_0_1, 5_3, 7, 6_9_9, 1_0_7_2, 2_0_8_4, 3_6, 2_0_4_2_2, 6_3_0, 5_3, 1_9, 1_0_5, 3_0_4_9, 1_8_9_6, 1_0_5_3, 1_6_8_9_9, 1_5_0_6, 1_1, 3_7_9_7_8, 4_2_4_3, 7, 1_2_3_7, 3_1_8_6_9, 2_0_0, 1_6_5_6_6, 6_5_4, 6, 3_5_0_5_2, 8_1_4_3_6, 7, 5_5_6_3_0, 1_3_5_9_3, 4, 2], [1, 2_6, 1_5_0_1_1, 1_3, 6_6_7, 8, 1_0_5_3, 1_8, 2_3_6_1_1, 1_2_3_7, 7_2_3_5_6, 1_2_8_2_0, 3_4, 1_0_4_1_3_4, 1_2_0_9, 3_5, 1_3_3_1_3, 6_6_2_7, 2_1, 2_0_2, 3_4_7, 7, 1_6_4, 2_3_9_9, 1_1, 4_6, 4_4_8_5, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 5, 1_2_3_2, 2_8_6_4, 1_5_7_8_5, 1_4_9_5_1, 1_0_5, 5, 8_5_8_1, 1_2_5_0, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "token_type_ids": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=a , model_name="microsoft/deberta-v2-xlarge" , revision="ad6e42c1532ddf3a15c39246b63f5559d558b670" , ) | 151 | 1 |
'''simple docstring'''
# Author: OMKAR PATHAK, Nwachukwu Chidiebere
# Use a Python dictionary to construct the graph.
from __future__ import annotations
from pprint import pformat
from typing import Generic, TypeVar
_lowerCAmelCase = TypeVar('''T''')
class lowerCAmelCase_( Generic[T] ):
'''simple docstring'''
def __init__( self ,__UpperCAmelCase = True ) -> None:
lowerCAmelCase__ : dict[T, list[T]] = {} # dictionary of lists
lowerCAmelCase__ : Optional[int] = directed
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ) -> GraphAdjacencyList[T]:
if not self.directed: # For undirected graphs
# if both source vertex and destination vertex are both present in the
# adjacency list, add destination vertex to source vertex list of adjacent
# vertices and add source vertex to destination vertex list of adjacent
# vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(__UpperCAmelCase )
self.adj_list[destination_vertex].append(__UpperCAmelCase )
# if only source vertex is present in adjacency list, add destination vertex
# to source vertex list of adjacent vertices, then create a new vertex with
# destination vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(__UpperCAmelCase )
lowerCAmelCase__ : List[str] = [source_vertex]
# if only destination vertex is present in adjacency list, add source vertex
# to destination vertex list of adjacent vertices, then create a new vertex
# with source vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif destination_vertex in self.adj_list:
self.adj_list[destination_vertex].append(__UpperCAmelCase )
lowerCAmelCase__ : str = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and assign a list
# containing the destination vertex as it's first adjacent vertex also
# create a new vertex with destination vertex as key and assign a list
# containing the source vertex as it's first adjacent vertex.
else:
lowerCAmelCase__ : List[Any] = [destination_vertex]
lowerCAmelCase__ : str = [source_vertex]
else: # For directed graphs
# if both source vertex and destination vertex are present in adjacency
# list, add destination vertex to source vertex list of adjacent vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(__UpperCAmelCase )
# if only source vertex is present in adjacency list, add destination
# vertex to source vertex list of adjacent vertices and create a new vertex
# with destination vertex as key, which has no adjacent vertex
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(__UpperCAmelCase )
lowerCAmelCase__ : List[str] = []
# if only destination vertex is present in adjacency list, create a new
# vertex with source vertex as key and assign a list containing destination
# vertex as first adjacent vertex
elif destination_vertex in self.adj_list:
lowerCAmelCase__ : List[str] = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and a list containing
# destination vertex as it's first adjacent vertex. Then create a new vertex
# with destination vertex as key, which has no adjacent vertex
else:
lowerCAmelCase__ : Dict = [destination_vertex]
lowerCAmelCase__ : Optional[Any] = []
return self
def __repr__( self ) -> str:
return pformat(self.adj_list )
| 37 |
def UpperCAmelCase_ ( _A ):
'''simple docstring'''
return " ".join(input_str.split()[::-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 314 | 0 |
"""simple docstring"""
import logging
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import librosa
import torch
from datasets import DatasetDict, load_dataset
from packaging import version
from torch import nn
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForPreTraining,
is_apex_available,
trainer_utils,
)
from transformers.models.wavaveca.modeling_wavaveca import _compute_mask_indices
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse("""1.6"""):
lowerCamelCase = True
from torch.cuda.amp import autocast
lowerCamelCase = logging.getLogger(__name__)
@dataclass
class lowercase__ :
'''simple docstring'''
UpperCamelCase = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
UpperCamelCase = field(
default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
UpperCamelCase = field(
default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Whether to freeze the feature extractor layers of the model.'''} )
UpperCamelCase = field(
default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Whether to log verbose messages or not.'''} , )
UpperCamelCase = field(
default=2.0 , metadata={'''help''': '''Maximum temperature for gumbel softmax.'''} )
UpperCamelCase = field(
default=0.5 , metadata={'''help''': '''Minimum temperature for gumbel softmax.'''} )
UpperCamelCase = field(
default=0.9_9_9_9_9_5 , metadata={'''help''': '''Decay of gumbel temperature during training.'''} )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ):
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
UpperCAmelCase_ = logging.WARNING
if model_args.verbose_logging:
UpperCAmelCase_ = logging.DEBUG
elif trainer_utils.is_main_process(training_args.local_rank ):
UpperCAmelCase_ = logging.INFO
logger.setLevel(lowerCAmelCase__ )
@dataclass
class lowercase__ :
'''simple docstring'''
UpperCamelCase = field(
default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''The name of the dataset to use (via the datasets library).'''} )
UpperCamelCase = field(
default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} )
UpperCamelCase = field(
default='''train''' , metadata={
'''help''': '''The name of the training data set split to use (via the datasets library). Defaults to \'train\''''
} , )
UpperCamelCase = field(
default='''validation''' , metadata={
'''help''': (
'''The name of the validation data set split to use (via the datasets library). Defaults to \'validation\''''
)
} , )
UpperCamelCase = field(
default='''file''' , metadata={'''help''': '''Column in the dataset that contains speech file path. Defaults to \'file\''''} , )
UpperCamelCase = field(
default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Overwrite the cached preprocessed datasets or not.'''} )
UpperCamelCase = field(
default=1 , metadata={
'''help''': '''The percentage of the train set used as validation set in case there\'s no validation split'''
} , )
UpperCamelCase = field(
default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''The number of processes to use for the preprocessing.'''} , )
UpperCamelCase = field(
default=2_0.0 , metadata={'''help''': '''Filter audio files that are longer than `max_duration_in_seconds` seconds'''} )
@dataclass
class lowercase__ :
'''simple docstring'''
UpperCamelCase = 42
UpperCamelCase = 42
UpperCamelCase = "longest"
UpperCamelCase = None
UpperCamelCase = None
def __call__( self : Optional[int] , _UpperCAmelCase : List[Dict[str, Union[List[int], torch.Tensor]]] ) -> Dict[str, torch.Tensor]:
'''simple docstring'''
UpperCAmelCase_ = self.feature_extractor.pad(
_UpperCAmelCase , max_length=self.max_length , padding=self.padding , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="pt" , )
UpperCAmelCase_ = self.model._get_feat_extract_output_lengths(batch["input_values"].shape[-1] )
UpperCAmelCase_ = batch["input_values"].shape[0]
# make sure that no loss is computed on padded inputs
if batch["attention_mask"] is not None:
# compute real output lengths according to convolution formula
UpperCAmelCase_ = self.model._get_feat_extract_output_lengths(batch["attention_mask"].sum(-1 ) ).to(
torch.long )
UpperCAmelCase_ = torch.zeros(
(batch_size, mask_indices_seq_length) , dtype=torch.long , device=batch["input_values"].device )
# these two operations makes sure that all values
# before the output lengths indices are attended to
UpperCAmelCase_ = 1
UpperCAmelCase_ = attention_mask.flip([-1] ).cumsum(-1 ).flip([-1] ).bool()
# sample randomly masked indices
UpperCAmelCase_ = _compute_mask_indices(
(batch_size, mask_indices_seq_length) , self.model.config.mask_time_prob , self.model.config.mask_time_length , attention_mask=_UpperCAmelCase , min_masks=2 , )
return batch
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : Optional[Any] , *_UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : str=1 , _UpperCAmelCase : Optional[int]=0 , _UpperCAmelCase : Union[str, Any]=1.0 , **_UpperCAmelCase : List[str] ) -> int:
'''simple docstring'''
super().__init__(*_UpperCAmelCase , **_UpperCAmelCase )
UpperCAmelCase_ = 0
UpperCAmelCase_ = max_gumbel_temp
UpperCAmelCase_ = min_gumbel_temp
UpperCAmelCase_ = gumbel_temp_decay
def lowercase__ ( self : Optional[int] , _UpperCAmelCase : nn.Module , _UpperCAmelCase : Dict[str, Union[torch.Tensor, Any]] ) -> torch.Tensor:
'''simple docstring'''
model.train()
UpperCAmelCase_ = self._prepare_inputs(_UpperCAmelCase )
if self.use_amp:
with autocast():
UpperCAmelCase_ = self.compute_loss(_UpperCAmelCase , _UpperCAmelCase )
else:
UpperCAmelCase_ = self.compute_loss(_UpperCAmelCase , _UpperCAmelCase )
if self.args.n_gpu > 1 or self.deepspeed:
if model.module.config.ctc_loss_reduction == "mean":
UpperCAmelCase_ = loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
UpperCAmelCase_ = loss.sum() / (inputs["mask_time_indices"]).sum()
else:
raise ValueError(F"""{model.config.ctc_loss_reduction} is not valid. Choose one of ['mean', 'sum']""" )
if self.args.gradient_accumulation_steps > 1:
UpperCAmelCase_ = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(_UpperCAmelCase ).backward()
elif self.use_apex:
with amp.scale_loss(_UpperCAmelCase , self.optimizer ) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(_UpperCAmelCase )
else:
loss.backward()
self.num_update_step += 1
# make sure gumbel softmax temperature is decayed
if self.args.n_gpu > 1 or self.deepspeed:
model.module.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step , self.min_gumbel_temp ) )
else:
model.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step , self.min_gumbel_temp ) )
return loss.detach()
def a__ ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
UpperCAmelCase_ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = parser.parse_args_into_dataclasses()
configure_logger(lowerCAmelCase__ , lowerCAmelCase__ )
# Downloading and loading a dataset from the hub.
UpperCAmelCase_ = load_dataset(data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
if "validation" not in datasets.keys():
# make sure only "validation" and "train" keys remain"
UpperCAmelCase_ = DatasetDict()
UpperCAmelCase_ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=f"""{data_args.train_split_name}[:{data_args.validation_split_percentage}%]""" , cache_dir=model_args.cache_dir , )
UpperCAmelCase_ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=f"""{data_args.train_split_name}[{data_args.validation_split_percentage}%:]""" , cache_dir=model_args.cache_dir , )
else:
# make sure only "validation" and "train" keys remain"
UpperCAmelCase_ = DatasetDict()
UpperCAmelCase_ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split="validation" , cache_dir=model_args.cache_dir , )
UpperCAmelCase_ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=f"""{data_args.train_split_name}""" , cache_dir=model_args.cache_dir , )
# only normalized-inputs-training is supported
UpperCAmelCase_ = WavaVecaFeatureExtractor.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , do_normalize=lowerCAmelCase__ )
def prepare_dataset(lowerCAmelCase__ ):
# check that all files have the correct sampling rate
UpperCAmelCase_ , UpperCAmelCase_ = librosa.load(batch[data_args.speech_file_column] , sr=feature_extractor.sampling_rate )
return batch
# load audio files into numpy arrays
UpperCAmelCase_ = datasets.map(
lowerCAmelCase__ , num_proc=data_args.preprocessing_num_workers , remove_columns=datasets["train"].column_names )
# filter audio files that are too long
UpperCAmelCase_ = vectorized_datasets.filter(
lambda lowerCAmelCase__ : len(data["speech"] ) < int(data_args.max_duration_in_seconds * feature_extractor.sampling_rate ) )
def normalize(lowerCAmelCase__ ):
return feature_extractor(batch["speech"] , sampling_rate=feature_extractor.sampling_rate )
# normalize and transform to `BatchFeatures`
UpperCAmelCase_ = vectorized_datasets.map(
lowerCAmelCase__ , batched=lowerCAmelCase__ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , remove_columns=vectorized_datasets["train"].column_names , )
# pretraining is only supported for "newer" stable layer norm architecture
# apply_spec_augment has to be True, mask_feature_prob has to be 0.0
UpperCAmelCase_ = WavaVecaConfig.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , gradient_checkpointing=training_args.gradient_checkpointing , )
if not config.do_stable_layer_norm or config.feat_extract_norm != "layer":
raise ValueError(
"PreTraining is only supported for ``config.do_stable_layer_norm=True`` and"
" ``config.feat_extract_norm='layer'" )
UpperCAmelCase_ = WavaVecaForPreTraining(lowerCAmelCase__ )
UpperCAmelCase_ = DataCollatorForWavaVecaPretraining(model=lowerCAmelCase__ , feature_extractor=lowerCAmelCase__ )
UpperCAmelCase_ = WavaVecaPreTrainer(
model=lowerCAmelCase__ , data_collator=lowerCAmelCase__ , args=lowerCAmelCase__ , train_dataset=vectorized_datasets["train"] , eval_dataset=vectorized_datasets["validation"] , tokenizer=lowerCAmelCase__ , max_gumbel_temp=model_args.max_gumbel_temperature , min_gumbel_temp=model_args.min_gumbel_temperature , gumbel_temp_decay=model_args.gumbel_temperature_decay , )
trainer.train()
if __name__ == "__main__":
main()
| 241 |
"""simple docstring"""
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartTokenizer, MBartTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
lowerCamelCase = get_tests_dir("""fixtures/test_sentencepiece.model""")
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
lowerCamelCase = 250_004
lowerCamelCase = 250_020
@require_sentencepiece
@require_tokenizers
class lowercase__ ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = MBartTokenizer
UpperCamelCase = MBartTokenizerFast
UpperCamelCase = True
UpperCamelCase = True
def lowercase__ ( self : int ) -> Union[str, Any]:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase_ = MBartTokenizer(_UpperCAmelCase , keep_accents=_UpperCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
def lowercase__ ( self : Any ) -> int:
'''simple docstring'''
UpperCAmelCase_ = MBartTokenizer(_UpperCAmelCase , keep_accents=_UpperCAmelCase )
UpperCAmelCase_ = tokenizer.tokenize("This is a test" )
self.assertListEqual(_UpperCAmelCase , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
UpperCAmelCase_ = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
_UpperCAmelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
UpperCAmelCase_ = tokenizer.convert_tokens_to_ids(_UpperCAmelCase )
self.assertListEqual(
_UpperCAmelCase , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
UpperCAmelCase_ = tokenizer.convert_ids_to_tokens(_UpperCAmelCase )
self.assertListEqual(
_UpperCAmelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
def lowercase__ ( self : Optional[Any] ) -> Dict:
'''simple docstring'''
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
UpperCAmelCase_ = (self.rust_tokenizer_class, "hf-internal-testing/tiny-random-mbart", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
UpperCAmelCase_ = self.rust_tokenizer_class.from_pretrained(_UpperCAmelCase , **_UpperCAmelCase )
UpperCAmelCase_ = self.tokenizer_class.from_pretrained(_UpperCAmelCase , **_UpperCAmelCase )
UpperCAmelCase_ = tempfile.mkdtemp()
UpperCAmelCase_ = tokenizer_r.save_pretrained(_UpperCAmelCase )
UpperCAmelCase_ = tokenizer_p.save_pretrained(_UpperCAmelCase )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files ) )
UpperCAmelCase_ = tuple(f for f in tokenizer_r_files if "tokenizer.json" not in f )
self.assertSequenceEqual(_UpperCAmelCase , _UpperCAmelCase )
# Checks everything loads correctly in the same way
UpperCAmelCase_ = tokenizer_r.from_pretrained(_UpperCAmelCase )
UpperCAmelCase_ = tokenizer_p.from_pretrained(_UpperCAmelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(_UpperCAmelCase , _UpperCAmelCase ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(_UpperCAmelCase )
# Save tokenizer rust, legacy_format=True
UpperCAmelCase_ = tempfile.mkdtemp()
UpperCAmelCase_ = tokenizer_r.save_pretrained(_UpperCAmelCase , legacy_format=_UpperCAmelCase )
UpperCAmelCase_ = tokenizer_p.save_pretrained(_UpperCAmelCase )
# Checks it save with the same files
self.assertSequenceEqual(_UpperCAmelCase , _UpperCAmelCase )
# Checks everything loads correctly in the same way
UpperCAmelCase_ = tokenizer_r.from_pretrained(_UpperCAmelCase )
UpperCAmelCase_ = tokenizer_p.from_pretrained(_UpperCAmelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(_UpperCAmelCase , _UpperCAmelCase ) )
shutil.rmtree(_UpperCAmelCase )
# Save tokenizer rust, legacy_format=False
UpperCAmelCase_ = tempfile.mkdtemp()
UpperCAmelCase_ = tokenizer_r.save_pretrained(_UpperCAmelCase , legacy_format=_UpperCAmelCase )
UpperCAmelCase_ = tokenizer_p.save_pretrained(_UpperCAmelCase )
# Checks it saved the tokenizer.json file
self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
UpperCAmelCase_ = tokenizer_r.from_pretrained(_UpperCAmelCase )
UpperCAmelCase_ = tokenizer_p.from_pretrained(_UpperCAmelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(_UpperCAmelCase , _UpperCAmelCase ) )
shutil.rmtree(_UpperCAmelCase )
@require_torch
@require_sentencepiece
@require_tokenizers
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = '''facebook/mbart-large-en-ro'''
UpperCamelCase = [
''' UN Chief Says There Is No Military Solution in Syria''',
''' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.''',
]
UpperCamelCase = [
'''Şeful ONU declară că nu există o soluţie militară în Siria''',
'''Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei'''
''' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor'''
''' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.''',
]
UpperCamelCase = [82_74, 12_78_73, 2_59_16, 7, 86_22, 20_71, 4_38, 6_74_85, 53, 18_78_95, 23, 5_17_12, 2, EN_CODE]
@classmethod
def lowercase__ ( cls : Optional[Any] ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ = MBartTokenizer.from_pretrained(
cls.checkpoint_name , src_lang="en_XX" , tgt_lang="ro_RO" )
UpperCAmelCase_ = 1
return cls
def lowercase__ ( self : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["ar_AR"] , 250001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["en_EN"] , 250004 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["ro_RO"] , 250020 )
def lowercase__ ( self : Any ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , _UpperCAmelCase )
def lowercase__ ( self : Any ) -> str:
'''simple docstring'''
self.assertIn(_UpperCAmelCase , self.tokenizer.all_special_ids )
UpperCAmelCase_ = [RO_CODE, 884, 9019, 96, 9, 916, 86792, 36, 18743, 15596, 5, 2]
UpperCAmelCase_ = self.tokenizer.decode(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase )
UpperCAmelCase_ = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=_UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
self.assertNotIn(self.tokenizer.eos_token , _UpperCAmelCase )
def lowercase__ ( self : Tuple ) -> Any:
'''simple docstring'''
UpperCAmelCase_ = ["this is gunna be a long sentence " * 20]
assert isinstance(src_text[0] , _UpperCAmelCase )
UpperCAmelCase_ = 10
UpperCAmelCase_ = self.tokenizer(_UpperCAmelCase , max_length=_UpperCAmelCase , truncation=_UpperCAmelCase ).input_ids[0]
self.assertEqual(ids[-2] , 2 )
self.assertEqual(ids[-1] , _UpperCAmelCase )
self.assertEqual(len(_UpperCAmelCase ) , _UpperCAmelCase )
def lowercase__ ( self : int ) -> int:
'''simple docstring'''
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["<mask>", "ar_AR"] ) , [250026, 250001] )
def lowercase__ ( self : Any ) -> Any:
'''simple docstring'''
UpperCAmelCase_ = tempfile.mkdtemp()
UpperCAmelCase_ = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(_UpperCAmelCase )
UpperCAmelCase_ = MBartTokenizer.from_pretrained(_UpperCAmelCase )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , _UpperCAmelCase )
@require_torch
def lowercase__ ( self : List[Any] ) -> int:
'''simple docstring'''
UpperCAmelCase_ = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=_UpperCAmelCase , return_tensors="pt" )
UpperCAmelCase_ = shift_tokens_right(batch["labels"] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][-2:].tolist() == [2, EN_CODE]
assert batch.decoder_input_ids[1][0].tolist() == RO_CODE
assert batch.decoder_input_ids[1][-1] == 2
assert batch.labels[1][-2:].tolist() == [2, RO_CODE]
@require_torch
def lowercase__ ( self : Any ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=_UpperCAmelCase , truncation=_UpperCAmelCase , max_length=len(self.expected_src_tokens ) , return_tensors="pt" , )
UpperCAmelCase_ = shift_tokens_right(batch["labels"] , self.tokenizer.pad_token_id )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
self.assertEqual((2, 14) , batch.input_ids.shape )
self.assertEqual((2, 14) , batch.attention_mask.shape )
UpperCAmelCase_ = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , _UpperCAmelCase )
self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, EN_CODE] )
def lowercase__ ( self : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ = self.tokenizer(self.src_text , padding=_UpperCAmelCase , truncation=_UpperCAmelCase , max_length=3 , return_tensors="pt" )
UpperCAmelCase_ = self.tokenizer(
text_target=self.tgt_text , padding=_UpperCAmelCase , truncation=_UpperCAmelCase , max_length=10 , return_tensors="pt" )
UpperCAmelCase_ = targets["input_ids"]
UpperCAmelCase_ = shift_tokens_right(_UpperCAmelCase , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def lowercase__ ( self : Optional[int] ) -> Any:
'''simple docstring'''
UpperCAmelCase_ = self.tokenizer._build_translation_inputs(
"A test" , return_tensors="pt" , src_lang="en_XX" , tgt_lang="ar_AR" )
self.assertEqual(
nested_simplify(_UpperCAmelCase ) , {
# A, test, EOS, en_XX
"input_ids": [[62, 3034, 2, 250004]],
"attention_mask": [[1, 1, 1, 1]],
# ar_AR
"forced_bos_token_id": 250001,
} , )
| 241 | 1 |
"""simple docstring"""
import tempfile
import unittest
import numpy as np
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_flax
if is_flax_available():
import os
from flax.core.frozen_dict import unfreeze
from flax.traverse_util import flatten_dict
from transformers import FlaxBertModel
_lowercase : List[Any] = "0.12" # assumed parallelism: 8
@require_flax
@is_staging_test
class _UpperCAmelCase ( unittest.TestCase ):
@classmethod
def a ( cls : List[Any] ):
__UpperCAmelCase = TOKEN
HfFolder.save_token(__SCREAMING_SNAKE_CASE )
@classmethod
def a ( cls : Optional[Any] ):
try:
delete_repo(token=cls._token , repo_id='''test-model-flax''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-model-flax-org''' )
except HTTPError:
pass
def a ( self : Any ):
__UpperCAmelCase = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
__UpperCAmelCase = FlaxBertModel(__SCREAMING_SNAKE_CASE )
model.push_to_hub('''test-model-flax''' , use_auth_token=self._token )
__UpperCAmelCase = FlaxBertModel.from_pretrained(F'''{USER}/test-model-flax''' )
__UpperCAmelCase = flatten_dict(unfreeze(model.params ) )
__UpperCAmelCase = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
__UpperCAmelCase = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(__SCREAMING_SNAKE_CASE , 1E-3 , msg=F'''{key} not identical''' )
# Reset repo
delete_repo(token=self._token , repo_id='''test-model-flax''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(__SCREAMING_SNAKE_CASE , repo_id='''test-model-flax''' , push_to_hub=__SCREAMING_SNAKE_CASE , use_auth_token=self._token )
__UpperCAmelCase = FlaxBertModel.from_pretrained(F'''{USER}/test-model-flax''' )
__UpperCAmelCase = flatten_dict(unfreeze(model.params ) )
__UpperCAmelCase = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
__UpperCAmelCase = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(__SCREAMING_SNAKE_CASE , 1E-3 , msg=F'''{key} not identical''' )
def a ( self : Optional[int] ):
__UpperCAmelCase = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
__UpperCAmelCase = FlaxBertModel(__SCREAMING_SNAKE_CASE )
model.push_to_hub('''valid_org/test-model-flax-org''' , use_auth_token=self._token )
__UpperCAmelCase = FlaxBertModel.from_pretrained('''valid_org/test-model-flax-org''' )
__UpperCAmelCase = flatten_dict(unfreeze(model.params ) )
__UpperCAmelCase = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
__UpperCAmelCase = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(__SCREAMING_SNAKE_CASE , 1E-3 , msg=F'''{key} not identical''' )
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-model-flax-org''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(
__SCREAMING_SNAKE_CASE , repo_id='''valid_org/test-model-flax-org''' , push_to_hub=__SCREAMING_SNAKE_CASE , use_auth_token=self._token )
__UpperCAmelCase = FlaxBertModel.from_pretrained('''valid_org/test-model-flax-org''' )
__UpperCAmelCase = flatten_dict(unfreeze(model.params ) )
__UpperCAmelCase = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
__UpperCAmelCase = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(__SCREAMING_SNAKE_CASE , 1E-3 , msg=F'''{key} not identical''' )
def lowercase__ ( snake_case_ :Dict , snake_case_ :Dict ):
__UpperCAmelCase = True
__UpperCAmelCase = flatten_dict(modela.params )
__UpperCAmelCase = flatten_dict(modela.params )
for key in flat_params_a.keys():
if np.sum(np.abs(flat_params_a[key] - flat_params_a[key] ) ) > 1E-4:
__UpperCAmelCase = False
return models_are_equal
@require_flax
class _UpperCAmelCase ( unittest.TestCase ):
def a ( self : Optional[int] ):
__UpperCAmelCase = BertConfig.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''' )
__UpperCAmelCase = FlaxBertModel(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase = '''bert'''
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
__UpperCAmelCase = FlaxBertModel.from_pretrained(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase = FlaxBertModel.from_pretrained(__SCREAMING_SNAKE_CASE , subfolder=__SCREAMING_SNAKE_CASE )
self.assertTrue(check_models_equal(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
def a ( self : List[Any] ):
__UpperCAmelCase = BertConfig.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''' )
__UpperCAmelCase = FlaxBertModel(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase = '''bert'''
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , max_shard_size='''10KB''' )
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
__UpperCAmelCase = FlaxBertModel.from_pretrained(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase = FlaxBertModel.from_pretrained(__SCREAMING_SNAKE_CASE , subfolder=__SCREAMING_SNAKE_CASE )
self.assertTrue(check_models_equal(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
def a ( self : Optional[int] ):
__UpperCAmelCase = '''bert'''
__UpperCAmelCase = '''hf-internal-testing/tiny-random-bert-subfolder'''
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
__UpperCAmelCase = FlaxBertModel.from_pretrained(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase = FlaxBertModel.from_pretrained(__SCREAMING_SNAKE_CASE , subfolder=__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
def a ( self : List[str] ):
__UpperCAmelCase = '''bert'''
__UpperCAmelCase = '''hf-internal-testing/tiny-random-bert-sharded-subfolder'''
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
__UpperCAmelCase = FlaxBertModel.from_pretrained(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase = FlaxBertModel.from_pretrained(__SCREAMING_SNAKE_CASE , subfolder=__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
| 332 |
'''simple docstring'''
import importlib
import inspect
import json
import os
import re
import shutil
import sys
from pathlib import Path
from typing import Dict, Optional, Union
from urllib import request
from huggingface_hub import HfFolder, cached_download, hf_hub_download, model_info
from packaging import version
from .. import __version__
from . import DIFFUSERS_DYNAMIC_MODULE_NAME, HF_MODULES_CACHE, logging
_lowercase : Optional[Any] = (
"https://raw.githubusercontent.com/huggingface/diffusers/{revision}/examples/community/{pipeline}.py"
)
_lowercase : Tuple = logging.get_logger(__name__) # pylint: disable=invalid-name
def snake_case_ ( ):
"""simple docstring"""
lowercase_ : Tuple = '''https://pypi.org/pypi/diffusers/json'''
lowercase_ : Tuple = json.loads(request.urlopen(__SCREAMING_SNAKE_CASE ).read() )['''releases'''].keys()
return sorted(__SCREAMING_SNAKE_CASE , key=lambda __SCREAMING_SNAKE_CASE : version.Version(__SCREAMING_SNAKE_CASE ) )
def snake_case_ ( ):
"""simple docstring"""
if HF_MODULES_CACHE in sys.path:
return
sys.path.append(__SCREAMING_SNAKE_CASE )
os.makedirs(__SCREAMING_SNAKE_CASE , exist_ok=__SCREAMING_SNAKE_CASE )
lowercase_ : List[Any] = Path(__SCREAMING_SNAKE_CASE ) / '''__init__.py'''
if not init_path.exists():
init_path.touch()
def snake_case_ ( __SCREAMING_SNAKE_CASE : Union[str, os.PathLike] ):
"""simple docstring"""
init_hf_modules()
lowercase_ : Optional[int] = Path(__SCREAMING_SNAKE_CASE ) / name
# If the parent module does not exist yet, recursively create it.
if not dynamic_module_path.parent.exists():
create_dynamic_module(dynamic_module_path.parent )
os.makedirs(__SCREAMING_SNAKE_CASE , exist_ok=__SCREAMING_SNAKE_CASE )
lowercase_ : str = dynamic_module_path / '''__init__.py'''
if not init_path.exists():
init_path.touch()
def snake_case_ ( __SCREAMING_SNAKE_CASE : List[str] ):
"""simple docstring"""
with open(__SCREAMING_SNAKE_CASE , '''r''' , encoding='''utf-8''' ) as f:
lowercase_ : int = f.read()
# Imports of the form `import .xxx`
lowercase_ : List[Any] = re.findall('''^\s*import\s+\.(\S+)\s*$''' , __SCREAMING_SNAKE_CASE , flags=re.MULTILINE )
# Imports of the form `from .xxx import yyy`
relative_imports += re.findall('''^\s*from\s+\.(\S+)\s+import''' , __SCREAMING_SNAKE_CASE , flags=re.MULTILINE )
# Unique-ify
return list(set(__SCREAMING_SNAKE_CASE ) )
def snake_case_ ( __SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
lowercase_ : int = False
lowercase_ : Any = [module_file]
lowercase_ : Dict = []
# Let's recurse through all relative imports
while not no_change:
lowercase_ : Dict = []
for f in files_to_check:
new_imports.extend(get_relative_imports(__SCREAMING_SNAKE_CASE ) )
lowercase_ : Union[str, Any] = Path(__SCREAMING_SNAKE_CASE ).parent
lowercase_ : Optional[int] = [str(module_path / m ) for m in new_imports]
lowercase_ : str = [f for f in new_import_files if f not in all_relative_imports]
lowercase_ : int = [F'''{f}.py''' for f in new_import_files]
lowercase_ : Optional[Any] = len(__SCREAMING_SNAKE_CASE ) == 0
all_relative_imports.extend(__SCREAMING_SNAKE_CASE )
return all_relative_imports
def snake_case_ ( __SCREAMING_SNAKE_CASE : Any ):
"""simple docstring"""
with open(__SCREAMING_SNAKE_CASE , '''r''' , encoding='''utf-8''' ) as f:
lowercase_ : Union[str, Any] = f.read()
# Imports of the form `import xxx`
lowercase_ : Any = re.findall('''^\s*import\s+(\S+)\s*$''' , __SCREAMING_SNAKE_CASE , flags=re.MULTILINE )
# Imports of the form `from xxx import yyy`
imports += re.findall('''^\s*from\s+(\S+)\s+import''' , __SCREAMING_SNAKE_CASE , flags=re.MULTILINE )
# Only keep the top-level module
lowercase_ : List[str] = [imp.split('''.''' )[0] for imp in imports if not imp.startswith('''.''' )]
# Unique-ify and test we got them all
lowercase_ : Any = list(set(__SCREAMING_SNAKE_CASE ) )
lowercase_ : Optional[Any] = []
for imp in imports:
try:
importlib.import_module(__SCREAMING_SNAKE_CASE )
except ImportError:
missing_packages.append(__SCREAMING_SNAKE_CASE )
if len(__SCREAMING_SNAKE_CASE ) > 0:
raise ImportError(
'''This modeling file requires the following packages that were not found in your environment: '''
F'''{', '.join(__SCREAMING_SNAKE_CASE )}. Run `pip install {' '.join(__SCREAMING_SNAKE_CASE )}`''' )
return get_relative_imports(__SCREAMING_SNAKE_CASE )
def snake_case_ ( __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Optional[Any] ):
"""simple docstring"""
lowercase_ : List[Any] = module_path.replace(os.path.sep , '''.''' )
lowercase_ : Any = importlib.import_module(__SCREAMING_SNAKE_CASE )
if class_name is None:
return find_pipeline_class(__SCREAMING_SNAKE_CASE )
return getattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def snake_case_ ( __SCREAMING_SNAKE_CASE : Dict ):
"""simple docstring"""
from ..pipelines import DiffusionPipeline
lowercase_ : int = dict(inspect.getmembers(__SCREAMING_SNAKE_CASE , inspect.isclass ) )
lowercase_ : Optional[Any] = None
for cls_name, cls in cls_members.items():
if (
cls_name != DiffusionPipeline.__name__
and issubclass(cls , __SCREAMING_SNAKE_CASE )
and cls.__module__.split('''.''' )[0] != "diffusers"
):
if pipeline_class is not None:
raise ValueError(
F'''Multiple classes that inherit from {DiffusionPipeline.__name__} have been found:'''
F''' {pipeline_class.__name__}, and {cls_name}. Please make sure to define only one in'''
F''' {loaded_module}.''' )
lowercase_ : List[Any] = cls
return pipeline_class
def snake_case_ ( __SCREAMING_SNAKE_CASE : Union[str, os.PathLike] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[Union[str, os.PathLike]] = None , __SCREAMING_SNAKE_CASE : bool = False , __SCREAMING_SNAKE_CASE : bool = False , __SCREAMING_SNAKE_CASE : Optional[Dict[str, str]] = None , __SCREAMING_SNAKE_CASE : Optional[Union[bool, str]] = None , __SCREAMING_SNAKE_CASE : Optional[str] = None , __SCREAMING_SNAKE_CASE : bool = False , ):
"""simple docstring"""
lowercase_ : Dict = str(__SCREAMING_SNAKE_CASE )
lowercase_ : Tuple = os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if os.path.isfile(__SCREAMING_SNAKE_CASE ):
lowercase_ : Dict = module_file_or_url
lowercase_ : int = '''local'''
elif pretrained_model_name_or_path.count('''/''' ) == 0:
lowercase_ : Optional[int] = get_diffusers_versions()
# cut ".dev0"
lowercase_ : List[Any] = '''v''' + '''.'''.join(__version__.split('''.''' )[:3] )
# retrieve github version that matches
if revision is None:
lowercase_ : List[str] = latest_version if latest_version[1:] in available_versions else '''main'''
logger.info(F'''Defaulting to latest_version: {revision}.''' )
elif revision in available_versions:
lowercase_ : List[str] = F'''v{revision}'''
elif revision == "main":
lowercase_ : Optional[Any] = revision
else:
raise ValueError(
F'''`custom_revision`: {revision} does not exist. Please make sure to choose one of'''
F''' {', '.join(available_versions + ['main'] )}.''' )
# community pipeline on GitHub
lowercase_ : Tuple = COMMUNITY_PIPELINES_URL.format(revision=__SCREAMING_SNAKE_CASE , pipeline=__SCREAMING_SNAKE_CASE )
try:
lowercase_ : Optional[Any] = cached_download(
__SCREAMING_SNAKE_CASE , cache_dir=__SCREAMING_SNAKE_CASE , force_download=__SCREAMING_SNAKE_CASE , proxies=__SCREAMING_SNAKE_CASE , resume_download=__SCREAMING_SNAKE_CASE , local_files_only=__SCREAMING_SNAKE_CASE , use_auth_token=__SCREAMING_SNAKE_CASE , )
lowercase_ : Tuple = '''git'''
lowercase_ : Tuple = pretrained_model_name_or_path + '''.py'''
except EnvironmentError:
logger.error(F'''Could not locate the {module_file} inside {pretrained_model_name_or_path}.''' )
raise
else:
try:
# Load from URL or cache if already cached
lowercase_ : str = hf_hub_download(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , cache_dir=__SCREAMING_SNAKE_CASE , force_download=__SCREAMING_SNAKE_CASE , proxies=__SCREAMING_SNAKE_CASE , resume_download=__SCREAMING_SNAKE_CASE , local_files_only=__SCREAMING_SNAKE_CASE , use_auth_token=__SCREAMING_SNAKE_CASE , )
lowercase_ : Optional[Any] = os.path.join('''local''' , '''--'''.join(pretrained_model_name_or_path.split('''/''' ) ) )
except EnvironmentError:
logger.error(F'''Could not locate the {module_file} inside {pretrained_model_name_or_path}.''' )
raise
# Check we have all the requirements in our environment
lowercase_ : Tuple = check_imports(__SCREAMING_SNAKE_CASE )
# Now we move the module inside our cached dynamic modules.
lowercase_ : str = DIFFUSERS_DYNAMIC_MODULE_NAME + os.path.sep + submodule
create_dynamic_module(__SCREAMING_SNAKE_CASE )
lowercase_ : Any = Path(__SCREAMING_SNAKE_CASE ) / full_submodule
if submodule == "local" or submodule == "git":
# We always copy local files (we could hash the file to see if there was a change, and give them the name of
# that hash, to only copy when there is a modification but it seems overkill for now).
# The only reason we do the copy is to avoid putting too many folders in sys.path.
shutil.copy(__SCREAMING_SNAKE_CASE , submodule_path / module_file )
for module_needed in modules_needed:
lowercase_ : Union[str, Any] = F'''{module_needed}.py'''
shutil.copy(os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , submodule_path / module_needed )
else:
# Get the commit hash
# TODO: we will get this info in the etag soon, so retrieve it from there and not here.
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowercase_ : Tuple = use_auth_token
elif use_auth_token is True:
lowercase_ : List[Any] = HfFolder.get_token()
else:
lowercase_ : Optional[Any] = None
lowercase_ : Optional[int] = model_info(__SCREAMING_SNAKE_CASE , revision=__SCREAMING_SNAKE_CASE , token=__SCREAMING_SNAKE_CASE ).sha
# The module file will end up being placed in a subfolder with the git hash of the repo. This way we get the
# benefit of versioning.
lowercase_ : int = submodule_path / commit_hash
lowercase_ : Tuple = full_submodule + os.path.sep + commit_hash
create_dynamic_module(__SCREAMING_SNAKE_CASE )
if not (submodule_path / module_file).exists():
shutil.copy(__SCREAMING_SNAKE_CASE , submodule_path / module_file )
# Make sure we also have every file with relative
for module_needed in modules_needed:
if not (submodule_path / module_needed).exists():
get_cached_module_file(
__SCREAMING_SNAKE_CASE , F'''{module_needed}.py''' , cache_dir=__SCREAMING_SNAKE_CASE , force_download=__SCREAMING_SNAKE_CASE , resume_download=__SCREAMING_SNAKE_CASE , proxies=__SCREAMING_SNAKE_CASE , use_auth_token=__SCREAMING_SNAKE_CASE , revision=__SCREAMING_SNAKE_CASE , local_files_only=__SCREAMING_SNAKE_CASE , )
return os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def snake_case_ ( __SCREAMING_SNAKE_CASE : Union[str, os.PathLike] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[str] = None , __SCREAMING_SNAKE_CASE : Optional[Union[str, os.PathLike]] = None , __SCREAMING_SNAKE_CASE : bool = False , __SCREAMING_SNAKE_CASE : bool = False , __SCREAMING_SNAKE_CASE : Optional[Dict[str, str]] = None , __SCREAMING_SNAKE_CASE : Optional[Union[bool, str]] = None , __SCREAMING_SNAKE_CASE : Optional[str] = None , __SCREAMING_SNAKE_CASE : bool = False , **__SCREAMING_SNAKE_CASE : Optional[Any] , ):
"""simple docstring"""
lowercase_ : Optional[Any] = get_cached_module_file(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , cache_dir=__SCREAMING_SNAKE_CASE , force_download=__SCREAMING_SNAKE_CASE , resume_download=__SCREAMING_SNAKE_CASE , proxies=__SCREAMING_SNAKE_CASE , use_auth_token=__SCREAMING_SNAKE_CASE , revision=__SCREAMING_SNAKE_CASE , local_files_only=__SCREAMING_SNAKE_CASE , )
return get_class_in_module(__SCREAMING_SNAKE_CASE , final_module.replace('''.py''' , '''''' ) )
| 93 | 0 |
'''simple docstring'''
def __lowerCAmelCase ( UpperCamelCase__ ) -> tuple[int, int]:
try:
__lowerCamelCase = float(UpperCamelCase__ )
except ValueError:
raise ValueError('''Please enter a valid number''' )
__lowerCamelCase = decimal - int(UpperCamelCase__ )
if fractional_part == 0:
return int(UpperCamelCase__ ), 1
else:
__lowerCamelCase = len(str(UpperCamelCase__ ).split('''.''' )[1] )
__lowerCamelCase = int(decimal * (10**number_of_frac_digits) )
__lowerCamelCase = 10**number_of_frac_digits
__lowerCamelCase , __lowerCamelCase = denominator, numerator
while True:
__lowerCamelCase = dividend % divisor
if remainder == 0:
break
__lowerCamelCase , __lowerCamelCase = divisor, remainder
__lowerCamelCase , __lowerCamelCase = numerator / divisor, denominator / divisor
return int(UpperCamelCase__ ), int(UpperCamelCase__ )
if __name__ == "__main__":
print(f'{decimal_to_fraction(2) = }')
print(f'{decimal_to_fraction(89.0) = }')
print(f'{decimal_to_fraction("67") = }')
print(f'{decimal_to_fraction("45.0") = }')
print(f'{decimal_to_fraction(1.5) = }')
print(f'{decimal_to_fraction("6.25") = }')
print(f'{decimal_to_fraction("78td") = }')
| 237 | '''simple docstring'''
import unittest
from transformers import DebertaVaTokenizer, DebertaVaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
__UpperCAmelCase =get_tests_dir("fixtures/spiece.model")
@require_sentencepiece
@require_tokenizers
class a__ ( UpperCAmelCase__ , unittest.TestCase ):
lowerCamelCase : Optional[Any] =DebertaVaTokenizer
lowerCamelCase : Optional[int] =DebertaVaTokenizerFast
lowerCamelCase : Optional[Any] =True
lowerCamelCase : Tuple =True
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
__lowerCamelCase = DebertaVaTokenizer(a , unk_token='''<unk>''' )
tokenizer.save_pretrained(self.tmpdirname )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , a : Dict ):
"""simple docstring"""
__lowerCamelCase = '''this is a test'''
__lowerCamelCase = '''this is a test'''
return input_text, output_text
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
"""simple docstring"""
__lowerCamelCase = '''<pad>'''
__lowerCamelCase = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(a ) , a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(a ) , a )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
"""simple docstring"""
__lowerCamelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<pad>''' )
self.assertEqual(vocab_keys[1] , '''<unk>''' )
self.assertEqual(vocab_keys[-1] , '''[PAD]''' )
self.assertEqual(len(a ) , 3_00_01 )
def SCREAMING_SNAKE_CASE__ ( self : int ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 3_00_00 )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
"""simple docstring"""
__lowerCamelCase = ''' \tHeLLo!how \n Are yoU? '''
__lowerCamelCase = ['''▁hello''', '''!''', '''how''', '''▁are''', '''▁you''', '''?''']
# fmt: on
__lowerCamelCase = DebertaVaTokenizer(a , do_lower_case=a )
__lowerCamelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(a , add_special_tokens=a ) )
self.assertListEqual(a , a )
__lowerCamelCase = DebertaVaTokenizerFast(a , do_lower_case=a )
__lowerCamelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(a , add_special_tokens=a ) )
self.assertListEqual(a , a )
@unittest.skip('''There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.''' )
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
"""simple docstring"""
pass
@unittest.skip('''There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.''' )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
"""simple docstring"""
__lowerCamelCase = '''I was born in 92000, and this is falsé.'''
__lowerCamelCase = ['''▁''', '''<unk>''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', '''▁''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''▁''', '''.''', ]
# fmt: on
__lowerCamelCase = DebertaVaTokenizer(a , split_by_punct=a )
__lowerCamelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(a , add_special_tokens=a ) )
self.assertListEqual(a , a )
__lowerCamelCase = DebertaVaTokenizerFast(a , split_by_punct=a )
__lowerCamelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(a , add_special_tokens=a ) )
self.assertListEqual(a , a )
def SCREAMING_SNAKE_CASE__ ( self : Any ):
"""simple docstring"""
__lowerCamelCase = '''I was born in 92000, and this is falsé.'''
__lowerCamelCase = ['''▁i''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', '''▁''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''▁''', '''.''', ]
# fmt: on
__lowerCamelCase = DebertaVaTokenizer(a , do_lower_case=a , split_by_punct=a )
__lowerCamelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(a , add_special_tokens=a ) )
self.assertListEqual(a , a )
__lowerCamelCase = DebertaVaTokenizerFast(a , do_lower_case=a , split_by_punct=a )
__lowerCamelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(a , add_special_tokens=a ) )
self.assertListEqual(a , a )
def SCREAMING_SNAKE_CASE__ ( self : str ):
"""simple docstring"""
__lowerCamelCase = '''I was born in 92000, and this is falsé.'''
__lowerCamelCase = ['''▁i''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''.''', ]
# fmt: on
__lowerCamelCase = DebertaVaTokenizer(a , do_lower_case=a , split_by_punct=a )
__lowerCamelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(a , add_special_tokens=a ) )
self.assertListEqual(a , a )
__lowerCamelCase = DebertaVaTokenizerFast(a , do_lower_case=a , split_by_punct=a )
__lowerCamelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(a , add_special_tokens=a ) )
self.assertListEqual(a , a )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
"""simple docstring"""
__lowerCamelCase = '''I was born in 92000, and this is falsé.'''
__lowerCamelCase = ['''▁''', '''<unk>''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', '''▁''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''▁''', '''.''', ]
# fmt: on
__lowerCamelCase = DebertaVaTokenizer(a , do_lower_case=a , split_by_punct=a )
__lowerCamelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(a , add_special_tokens=a ) )
self.assertListEqual(a , a )
__lowerCamelCase = DebertaVaTokenizerFast(a , do_lower_case=a , split_by_punct=a )
__lowerCamelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(a , add_special_tokens=a ) )
self.assertListEqual(a , a )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
"""simple docstring"""
__lowerCamelCase = ''' \tHeLLo!how \n Are yoU? '''
__lowerCamelCase = ['''▁''', '''<unk>''', '''e''', '''<unk>''', '''o''', '''!''', '''how''', '''▁''', '''<unk>''', '''re''', '''▁yo''', '''<unk>''', '''?''']
# fmt: on
__lowerCamelCase = DebertaVaTokenizer(a , do_lower_case=a , split_by_punct=a )
__lowerCamelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(a , add_special_tokens=a ) )
self.assertListEqual(a , a )
__lowerCamelCase = DebertaVaTokenizerFast(a , do_lower_case=a , split_by_punct=a )
__lowerCamelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(a , add_special_tokens=a ) )
self.assertListEqual(a , a )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
"""simple docstring"""
__lowerCamelCase = self.get_tokenizer()
__lowerCamelCase = self.get_rust_tokenizer()
__lowerCamelCase = '''I was born in 92000, and this is falsé.'''
__lowerCamelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(a , add_special_tokens=a ) )
__lowerCamelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(a , add_special_tokens=a ) )
self.assertListEqual(a , a )
__lowerCamelCase = tokenizer.encode(a , add_special_tokens=a )
__lowerCamelCase = rust_tokenizer.encode(a , add_special_tokens=a )
self.assertListEqual(a , a )
__lowerCamelCase = self.get_rust_tokenizer()
__lowerCamelCase = tokenizer.encode(a )
__lowerCamelCase = rust_tokenizer.encode(a )
self.assertListEqual(a , a )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
"""simple docstring"""
__lowerCamelCase = '''This is a test'''
__lowerCamelCase = [13, 1, 43_98, 25, 21, 12_89]
__lowerCamelCase = ['''▁''', '''T''', '''his''', '''▁is''', '''▁a''', '''▁test''']
__lowerCamelCase = ['''▁''', '''<unk>''', '''his''', '''▁is''', '''▁a''', '''▁test''']
__lowerCamelCase = DebertaVaTokenizer(a , keep_accents=a )
__lowerCamelCase = DebertaVaTokenizerFast(a , keep_accents=a )
__lowerCamelCase = tokenizer.encode(a , add_special_tokens=a )
self.assertListEqual(a , a )
__lowerCamelCase = tokenizer.tokenize(a )
self.assertListEqual(a , a )
__lowerCamelCase = tokenizer.convert_ids_to_tokens(a )
self.assertListEqual(a , a )
__lowerCamelCase = rust_tokenizer.encode(a , add_special_tokens=a )
self.assertListEqual(a , a )
__lowerCamelCase = rust_tokenizer.tokenize(a )
self.assertListEqual(a , a )
__lowerCamelCase = rust_tokenizer.convert_ids_to_tokens(a )
self.assertListEqual(a , a )
# fmt: off
__lowerCamelCase = '''I was born in 92000, and this is falsé.'''
__lowerCamelCase = [13, 1, 23, 3_86, 19, 5_61, 30_50, 15, 17, 48, 25, 82_56, 18, 1, 9]
__lowerCamelCase = ['''▁''', '''I''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''é''', '''.''', ]
__lowerCamelCase = ['''▁''', '''<unk>''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''.''', ]
# fmt: on
__lowerCamelCase = tokenizer.encode(a , add_special_tokens=a )
self.assertListEqual(a , a )
__lowerCamelCase = tokenizer.tokenize(a )
self.assertListEqual(a , a )
__lowerCamelCase = tokenizer.convert_ids_to_tokens(a )
self.assertListEqual(a , a )
__lowerCamelCase = rust_tokenizer.encode(a , add_special_tokens=a )
self.assertListEqual(a , a )
__lowerCamelCase = rust_tokenizer.tokenize(a )
self.assertListEqual(a , a )
__lowerCamelCase = rust_tokenizer.convert_ids_to_tokens(a )
self.assertListEqual(a , a )
def SCREAMING_SNAKE_CASE__ ( self : str ):
"""simple docstring"""
__lowerCamelCase = DebertaVaTokenizer(a )
__lowerCamelCase = tokenizer.encode('''sequence builders''' )
__lowerCamelCase = tokenizer.encode('''multi-sequence build''' )
__lowerCamelCase = tokenizer.build_inputs_with_special_tokens(a )
__lowerCamelCase = tokenizer.build_inputs_with_special_tokens(a , a )
self.assertEqual([tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] , a )
self.assertEqual(
[tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [tokenizer.sep_token_id] , a , )
@slow
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
"""simple docstring"""
__lowerCamelCase = {'''input_ids''': [[1, 3_98_67, 36, 1_93_90, 4_86, 27, 3_50_52, 8_14_36, 18, 6_06_85, 12_25, 7, 3_50_52, 8_14_36, 18, 93_67, 1_68_99, 18, 1_59_37, 53, 5_94, 7_73, 18, 1_62_87, 3_04_65, 36, 1_59_37, 6, 4_11_39, 38, 3_69_79, 6_07_63, 1_91, 6, 3_41_32, 99, 6, 5_05_38, 3_90, 4_32_30, 6, 3_41_32, 27_79, 2_08_50, 14, 6_99, 10_72, 11_94, 36, 3_82, 1_09_01, 53, 7, 6_99, 10_72, 20_84, 36, 2_04_22, 6_30, 53, 19, 1_05, 30_49, 18_96, 10_53, 1_68_99, 15_06, 11, 3_79_78, 42_43, 7, 12_37, 3_18_69, 2_00, 1_65_66, 6_54, 6, 3_50_52, 8_14_36, 7, 5_56_30, 1_35_93, 4, 2], [1, 26, 1_50_11, 13, 6_67, 8, 10_53, 18, 2_36_11, 12_37, 7_23_56, 1_28_20, 34, 10_41_34, 12_09, 35, 1_33_13, 66_27, 21, 2_02, 3_47, 7, 1_64, 23_99, 11, 46, 44_85, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 5, 12_32, 28_64, 1_57_85, 1_49_51, 1_05, 5, 85_81, 12_50, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''token_type_ids''': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=a , model_name='''microsoft/deberta-v2-xlarge''' , revision='''ad6e42c1532ddf3a15c39246b63f5559d558b670''' , )
| 237 | 1 |
'''simple docstring'''
import argparse
import logging
from collections import namedtuple
import torch
from model_bertabs import BertAbsSummarizer
from models.model_builder import AbsSummarizer # The authors' implementation
from transformers import BertTokenizer
logging.basicConfig(level=logging.INFO)
lowercase__ : str = logging.getLogger(__name__)
lowercase__ : List[str] = 'Hello world! cécé herlolip'
lowercase__ : List[Any] = namedtuple(
'BertAbsConfig',
[
'temp_dir',
'large',
'use_bert_emb',
'finetune_bert',
'encoder',
'share_emb',
'max_pos',
'enc_layers',
'enc_hidden_size',
'enc_heads',
'enc_ff_size',
'enc_dropout',
'dec_layers',
'dec_hidden_size',
'dec_heads',
'dec_ff_size',
'dec_dropout',
],
)
def a__ ( lowercase : List[str], lowercase : Optional[Any] ) -> Dict:
"""simple docstring"""
_UpperCamelCase = BertAbsConfig(
temp_dir='''.''', finetune_bert=lowercase, large=lowercase, share_emb=lowercase, use_bert_emb=lowercase, encoder='''bert''', max_pos=512, enc_layers=6, enc_hidden_size=512, enc_heads=8, enc_ff_size=512, enc_dropout=0.2, dec_layers=6, dec_hidden_size=768, dec_heads=8, dec_ff_size=2048, dec_dropout=0.2, )
_UpperCamelCase = torch.load(lowercase, lambda lowercase, lowercase : storage )
_UpperCamelCase = AbsSummarizer(lowercase, torch.device('''cpu''' ), lowercase )
original.eval()
_UpperCamelCase = BertAbsSummarizer(lowercase, torch.device('''cpu''' ) )
new_model.eval()
# -------------------
# Convert the weights
# -------------------
logging.info('''convert the model''' )
new_model.bert.load_state_dict(original.bert.state_dict() )
new_model.decoder.load_state_dict(original.decoder.state_dict() )
new_model.generator.load_state_dict(original.generator.state_dict() )
# ----------------------------------
# Make sure the outpus are identical
# ----------------------------------
logging.info('''Make sure that the models\' outputs are identical''' )
_UpperCamelCase = BertTokenizer.from_pretrained('''bert-base-uncased''' )
# prepare the model inputs
_UpperCamelCase = tokenizer.encode('''This is sample éàalj\'-.''' )
encoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(lowercase )) )
_UpperCamelCase = torch.tensor(lowercase ).unsqueeze(0 )
_UpperCamelCase = tokenizer.encode('''This is sample 3 éàalj\'-.''' )
decoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(lowercase )) )
_UpperCamelCase = torch.tensor(lowercase ).unsqueeze(0 )
# failsafe to make sure the weights reset does not affect the
# loaded weights.
assert torch.max(torch.abs(original.generator[0].weight - new_model.generator[0].weight ) ) == 0
# forward pass
_UpperCamelCase = encoder_input_ids
_UpperCamelCase = decoder_input_ids
_UpperCamelCase = _UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = _UpperCamelCase = None
_UpperCamelCase = _UpperCamelCase = None
_UpperCamelCase = None
# The original model does not apply the geneator layer immediatly but rather in
# the beam search (where it combines softmax + linear layer). Since we already
# apply the softmax in our generation process we only apply the linear layer here.
# We make sure that the outputs of the full stack are identical
_UpperCamelCase = original(lowercase, lowercase, lowercase, lowercase, lowercase, lowercase, lowercase )[0]
_UpperCamelCase = original.generator(lowercase )
_UpperCamelCase = new_model(
lowercase, lowercase, lowercase, lowercase, lowercase )[0]
_UpperCamelCase = new_model.generator(lowercase )
_UpperCamelCase = torch.max(torch.abs(output_converted_model - output_original_model ) ).item()
print('''Maximum absolute difference beween weights: {:.2f}'''.format(lowercase ) )
_UpperCamelCase = torch.max(torch.abs(output_converted_generator - output_original_generator ) ).item()
print('''Maximum absolute difference beween weights: {:.2f}'''.format(lowercase ) )
_UpperCamelCase = torch.allclose(lowercase, lowercase, atol=1e-3 )
if are_identical:
logging.info('''all weights are equal up to 1e-3''' )
else:
raise ValueError('''the weights are different. The new model is likely different from the original one.''' )
# The model has been saved with torch.save(model) and this is bound to the exact
# directory structure. We save the state_dict instead.
logging.info('''saving the model\'s state dictionary''' )
torch.save(
new_model.state_dict(), '''./bertabs-finetuned-cnndm-extractive-abstractive-summarization/pytorch_model.bin''' )
if __name__ == "__main__":
lowercase__ : Any = argparse.ArgumentParser()
parser.add_argument(
'--bertabs_checkpoint_path',
default=None,
type=str,
required=True,
help='Path the official PyTorch dump.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=True,
help='Path to the output PyTorch model.',
)
lowercase__ : int = parser.parse_args()
convert_bertabs_checkpoints(
args.bertabs_checkpoint_path,
args.pytorch_dump_folder_path,
)
| 324 |
'''simple docstring'''
import argparse
import torch
from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
lowercase__ : Optional[int] = logging.get_logger(__name__)
lowercase__ : str = [
['attention', 'attn'],
['encoder_attention', 'encoder_attn'],
['q_lin', 'q_proj'],
['k_lin', 'k_proj'],
['v_lin', 'v_proj'],
['out_lin', 'out_proj'],
['norm_embeddings', 'layernorm_embedding'],
['position_embeddings', 'embed_positions'],
['embeddings', 'embed_tokens'],
['ffn.lin', 'fc'],
]
def a__ ( lowercase : str ) -> Dict:
"""simple docstring"""
if k == "embeddings.weight":
return "shared.weight"
for parlai_name, hf_name in PATTERNS:
_UpperCamelCase = k.replace(lowercase, lowercase )
if k.startswith('''encoder''' ):
_UpperCamelCase = k.replace('''.attn''', '''.self_attn''' )
_UpperCamelCase = k.replace('''norm1''', '''self_attn_layer_norm''' )
_UpperCamelCase = k.replace('''norm2''', '''final_layer_norm''' )
elif k.startswith('''decoder''' ):
_UpperCamelCase = k.replace('''norm1''', '''self_attn_layer_norm''' )
_UpperCamelCase = k.replace('''norm2''', '''encoder_attn_layer_norm''' )
_UpperCamelCase = k.replace('''norm3''', '''final_layer_norm''' )
return k
def a__ ( lowercase : List[str] ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = [
'''model.encoder.layernorm_embedding.weight''',
'''model.encoder.layernorm_embedding.bias''',
'''model.decoder.layernorm_embedding.weight''',
'''model.decoder.layernorm_embedding.bias''',
]
for k in keys:
_UpperCamelCase = sd.pop(lowercase )
_UpperCamelCase = k.replace('''layernorm_embedding''', '''layer_norm''' )
assert new_k not in sd
_UpperCamelCase = v
lowercase__ : str = ['START']
@torch.no_grad()
def a__ ( lowercase : Optional[int], lowercase : List[str], lowercase : List[str] ) -> Dict:
"""simple docstring"""
_UpperCamelCase = torch.load(lowercase, map_location='''cpu''' )
_UpperCamelCase = model['''model''']
_UpperCamelCase = BlenderbotConfig.from_json_file(lowercase )
_UpperCamelCase = BlenderbotForConditionalGeneration(lowercase )
_UpperCamelCase = m.model.state_dict().keys()
_UpperCamelCase = []
_UpperCamelCase = {}
for k, v in sd.items():
if k in IGNORE_KEYS:
continue
_UpperCamelCase = rename_state_dict_key(lowercase )
if new_k not in valid_keys:
failures.append([k, new_k] )
else:
_UpperCamelCase = v
if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm
rename_layernorm_keys(lowercase )
m.model.load_state_dict(lowercase, strict=lowercase )
m.half()
m.save_pretrained(lowercase )
if __name__ == "__main__":
lowercase__ : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--src_path', type=str, help='like blenderbot-model.bin')
parser.add_argument('--save_dir', default='hf_blenderbot', type=str, help='Where to save converted model.')
parser.add_argument(
'--hf_config_json', default='blenderbot-3b-config.json', type=str, help='Path to config to use'
)
lowercase__ : Optional[Any] = parser.parse_args()
convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
| 324 | 1 |
import json
import os
import tempfile
from transformers.testing_utils import check_json_file_has_correct_format
class __lowerCAmelCase :
lowerCamelCase_ : List[Any] = None
def lowerCamelCase (self ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_dict )
snake_case_ : List[str] = json.loads(feat_extract.to_json_string() )
for key, value in self.feat_extract_dict.items():
self.assertEqual(obj[key] , __magic_name__ )
def lowerCamelCase (self ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Tuple = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case_ : Optional[Any] = os.path.join(__magic_name__ , '''feat_extract.json''' )
feat_extract_first.to_json_file(__magic_name__ )
snake_case_ : List[Any] = self.feature_extraction_class.from_json_file(__magic_name__ )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def lowerCamelCase (self ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Optional[int] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case_ : Optional[Any] = feat_extract_first.save_pretrained(__magic_name__ )[0]
check_json_file_has_correct_format(__magic_name__ )
snake_case_ : Tuple = self.feature_extraction_class.from_pretrained(__magic_name__ )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def lowerCamelCase (self ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : List[str] = self.feature_extraction_class()
self.assertIsNotNone(__magic_name__ )
| 354 |
import asyncio
import os
import shutil
import subprocess
import sys
import tempfile
import unittest
from distutils.util import strtobool
from functools import partial
from pathlib import Path
from typing import List, Union
from unittest import mock
import torch
from ..state import AcceleratorState, PartialState
from ..utils import (
gather,
is_bnb_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_mps_available,
is_safetensors_available,
is_tensorboard_available,
is_torch_version,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase=False ) -> int:
"""simple docstring"""
try:
snake_case_ : Optional[Any] = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
snake_case_ : Tuple = default
else:
# KEY is set, convert it to True or False.
try:
snake_case_ : Union[str, Any] = strtobool(_UpperCamelCase )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f'''If set, {key} must be yes or no.''' )
return _value
lowerCAmelCase_ = parse_flag_from_env('''RUN_SLOW''', default=False)
def lowerCamelCase_ ( _UpperCamelCase ) -> List[Any]:
"""simple docstring"""
return unittest.skip('''Test was skipped''' )(_UpperCamelCase )
def lowerCamelCase_ ( _UpperCamelCase ) -> Optional[int]:
"""simple docstring"""
return unittest.skipUnless(_run_slow_tests , '''test is slow''' )(_UpperCamelCase )
def lowerCamelCase_ ( _UpperCamelCase ) -> Dict:
"""simple docstring"""
return unittest.skipUnless(not torch.cuda.is_available() , '''test requires only a CPU''' )(_UpperCamelCase )
def lowerCamelCase_ ( _UpperCamelCase ) -> int:
"""simple docstring"""
return unittest.skipUnless(torch.cuda.is_available() , '''test requires a GPU''' )(_UpperCamelCase )
def lowerCamelCase_ ( _UpperCamelCase ) -> str:
"""simple docstring"""
return unittest.skipUnless(is_xpu_available() , '''test requires a XPU''' )(_UpperCamelCase )
def lowerCamelCase_ ( _UpperCamelCase ) -> List[Any]:
"""simple docstring"""
return unittest.skipUnless(is_mps_available() , '''test requires a `mps` backend support in `torch`''' )(_UpperCamelCase )
def lowerCamelCase_ ( _UpperCamelCase ) -> Optional[Any]:
"""simple docstring"""
return unittest.skipUnless(
is_transformers_available() and is_datasets_available() , '''test requires the Hugging Face suite''' )(_UpperCamelCase )
def lowerCamelCase_ ( _UpperCamelCase ) -> Any:
"""simple docstring"""
return unittest.skipUnless(is_bnb_available() , '''test requires the bitsandbytes library''' )(_UpperCamelCase )
def lowerCamelCase_ ( _UpperCamelCase ) -> List[Any]:
"""simple docstring"""
return unittest.skipUnless(is_tpu_available() , '''test requires TPU''' )(_UpperCamelCase )
def lowerCamelCase_ ( _UpperCamelCase ) -> List[Any]:
"""simple docstring"""
return unittest.skipUnless(torch.cuda.device_count() == 1 , '''test requires a GPU''' )(_UpperCamelCase )
def lowerCamelCase_ ( _UpperCamelCase ) -> int:
"""simple docstring"""
return unittest.skipUnless(torch.xpu.device_count() == 1 , '''test requires a XPU''' )(_UpperCamelCase )
def lowerCamelCase_ ( _UpperCamelCase ) -> Any:
"""simple docstring"""
return unittest.skipUnless(torch.cuda.device_count() > 1 , '''test requires multiple GPUs''' )(_UpperCamelCase )
def lowerCamelCase_ ( _UpperCamelCase ) -> Any:
"""simple docstring"""
return unittest.skipUnless(torch.xpu.device_count() > 1 , '''test requires multiple XPUs''' )(_UpperCamelCase )
def lowerCamelCase_ ( _UpperCamelCase ) -> str:
"""simple docstring"""
return unittest.skipUnless(is_safetensors_available() , '''test requires safetensors''' )(_UpperCamelCase )
def lowerCamelCase_ ( _UpperCamelCase ) -> int:
"""simple docstring"""
return unittest.skipUnless(is_deepspeed_available() , '''test requires DeepSpeed''' )(_UpperCamelCase )
def lowerCamelCase_ ( _UpperCamelCase ) -> Any:
"""simple docstring"""
return unittest.skipUnless(is_torch_version('''>=''' , '''1.12.0''' ) , '''test requires torch version >= 1.12.0''' )(_UpperCamelCase )
def lowerCamelCase_ ( _UpperCamelCase=None , _UpperCamelCase=None ) -> Tuple:
"""simple docstring"""
if test_case is None:
return partial(_UpperCamelCase , version=_UpperCamelCase )
return unittest.skipUnless(is_torch_version('''>=''' , _UpperCamelCase ) , f'''test requires torch version >= {version}''' )(_UpperCamelCase )
def lowerCamelCase_ ( _UpperCamelCase ) -> int:
"""simple docstring"""
return unittest.skipUnless(is_tensorboard_available() , '''test requires Tensorboard''' )(_UpperCamelCase )
def lowerCamelCase_ ( _UpperCamelCase ) -> Optional[int]:
"""simple docstring"""
return unittest.skipUnless(is_wandb_available() , '''test requires wandb''' )(_UpperCamelCase )
def lowerCamelCase_ ( _UpperCamelCase ) -> Union[str, Any]:
"""simple docstring"""
return unittest.skipUnless(is_comet_ml_available() , '''test requires comet_ml''' )(_UpperCamelCase )
lowerCAmelCase_ = (
any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available()
)
def lowerCamelCase_ ( _UpperCamelCase ) -> Union[str, Any]:
"""simple docstring"""
return unittest.skipUnless(
_atleast_one_tracker_available , '''test requires at least one tracker to be available and for `comet_ml` to not be installed''' , )(_UpperCamelCase )
class __lowerCAmelCase ( unittest.TestCase ):
lowerCamelCase_ : str = True
@classmethod
def lowerCamelCase (cls ) -> List[Any]:
'''simple docstring'''
snake_case_ : Any = tempfile.mkdtemp()
@classmethod
def lowerCamelCase (cls ) -> List[Any]:
'''simple docstring'''
if os.path.exists(cls.tmpdir ):
shutil.rmtree(cls.tmpdir )
def lowerCamelCase (self ) -> Optional[int]:
'''simple docstring'''
if self.clear_on_setup:
for path in Path(self.tmpdir ).glob('''**/*''' ):
if path.is_file():
path.unlink()
elif path.is_dir():
shutil.rmtree(__magic_name__ )
class __lowerCAmelCase ( unittest.TestCase ):
def lowerCamelCase (self ) -> int:
'''simple docstring'''
super().tearDown()
# Reset the state of the AcceleratorState singleton.
AcceleratorState._reset_state()
PartialState._reset_state()
class __lowerCAmelCase ( unittest.TestCase ):
def lowerCamelCase (self , __magic_name__ ) -> Dict:
'''simple docstring'''
snake_case_ : str = mocks if isinstance(__magic_name__ , (tuple, list) ) else [mocks]
for m in self.mocks:
m.start()
self.addCleanup(m.stop )
def lowerCamelCase_ ( _UpperCamelCase ) -> List[str]:
"""simple docstring"""
snake_case_ : Optional[Any] = AcceleratorState()
snake_case_ : List[str] = tensor[None].clone().to(state.device )
snake_case_ : Optional[Any] = gather(_UpperCamelCase ).cpu()
snake_case_ : Optional[Any] = tensor[0].cpu()
for i in range(tensors.shape[0] ):
if not torch.equal(tensors[i] , _UpperCamelCase ):
return False
return True
class __lowerCAmelCase :
def __init__(self , __magic_name__ , __magic_name__ , __magic_name__ ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Any = returncode
snake_case_ : List[Any] = stdout
snake_case_ : Tuple = stderr
async def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> str:
"""simple docstring"""
while True:
snake_case_ : Tuple = await stream.readline()
if line:
callback(_UpperCamelCase )
else:
break
async def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=False , _UpperCamelCase=False ) -> _RunOutput:
"""simple docstring"""
if echo:
print('''\nRunning: ''' , ''' '''.join(_UpperCamelCase ) )
snake_case_ : List[str] = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=_UpperCamelCase , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=_UpperCamelCase , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
snake_case_ : List[Any] = []
snake_case_ : List[Any] = []
def tee(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase="" ):
snake_case_ : Union[str, Any] = line.decode('''utf-8''' ).rstrip()
sink.append(_UpperCamelCase )
if not quiet:
print(_UpperCamelCase , _UpperCamelCase , file=_UpperCamelCase )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
asyncio.create_task(_read_stream(p.stdout , lambda _UpperCamelCase : tee(_UpperCamelCase , _UpperCamelCase , sys.stdout , label='''stdout:''' ) ) ),
asyncio.create_task(_read_stream(p.stderr , lambda _UpperCamelCase : tee(_UpperCamelCase , _UpperCamelCase , sys.stderr , label='''stderr:''' ) ) ),
] , timeout=_UpperCamelCase , )
return _RunOutput(await p.wait() , _UpperCamelCase , _UpperCamelCase )
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=180 , _UpperCamelCase=False , _UpperCamelCase=True ) -> _RunOutput:
"""simple docstring"""
snake_case_ : List[str] = asyncio.get_event_loop()
snake_case_ : List[Any] = loop.run_until_complete(
_stream_subprocess(_UpperCamelCase , env=_UpperCamelCase , stdin=_UpperCamelCase , timeout=_UpperCamelCase , quiet=_UpperCamelCase , echo=_UpperCamelCase ) )
snake_case_ : Optional[int] = ''' '''.join(_UpperCamelCase )
if result.returncode > 0:
snake_case_ : Union[str, Any] = '''\n'''.join(result.stderr )
raise RuntimeError(
f'''\'{cmd_str}\' failed with returncode {result.returncode}\n\n'''
f'''The combined stderr from workers follows:\n{stderr}''' )
return result
class __lowerCAmelCase ( _a ):
pass
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase=False ) -> List[Any]:
"""simple docstring"""
try:
snake_case_ : List[str] = subprocess.check_output(_UpperCamelCase , stderr=subprocess.STDOUT )
if return_stdout:
if hasattr(_UpperCamelCase , '''decode''' ):
snake_case_ : Tuple = output.decode('''utf-8''' )
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
f'''Command `{" ".join(_UpperCamelCase )}` failed with the following error:\n\n{e.output.decode()}''' ) from e
| 279 | 0 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import center_crop, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowercase__ : Optional[Any] = logging.get_logger(__name__)
class UpperCamelCase__ ( __UpperCAmelCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = ["""pixel_values"""]
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE_ : List[str] = True , SCREAMING_SNAKE_CASE_ : Optional[Any] = None , SCREAMING_SNAKE_CASE_ : Optional[Any] = PIL.Image.BICUBIC , SCREAMING_SNAKE_CASE_ : Union[str, Any] = True , SCREAMING_SNAKE_CASE_ : List[str] = None , SCREAMING_SNAKE_CASE_ : int = 1 / 2_5_5 , SCREAMING_SNAKE_CASE_ : int = True , SCREAMING_SNAKE_CASE_ : str = True , SCREAMING_SNAKE_CASE_ : List[str] = None , SCREAMING_SNAKE_CASE_ : Union[str, Any] = None , **SCREAMING_SNAKE_CASE_ : List[str] , ):
super().__init__(**UpperCamelCase__ )
lowerCAmelCase_ : int = size if size is not None else {'height': 2_5_6, 'width': 2_5_6}
lowerCAmelCase_ : Tuple = get_size_dict(UpperCamelCase__ )
lowerCAmelCase_ : Union[str, Any] = crop_size if crop_size is not None else {'height': 2_2_4, 'width': 2_2_4}
lowerCAmelCase_ : Union[str, Any] = get_size_dict(UpperCamelCase__ , param_name='crop_size' )
lowerCAmelCase_ : Optional[Any] = do_resize
lowerCAmelCase_ : Optional[int] = size
lowerCAmelCase_ : List[Any] = resample
lowerCAmelCase_ : Any = do_center_crop
lowerCAmelCase_ : Optional[int] = crop_size
lowerCAmelCase_ : int = do_rescale
lowerCAmelCase_ : Union[str, Any] = rescale_factor
lowerCAmelCase_ : Union[str, Any] = do_normalize
lowerCAmelCase_ : Any = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowerCAmelCase_ : int = image_std if image_std is not None else IMAGENET_STANDARD_STD
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] = PIL.Image.BICUBIC , SCREAMING_SNAKE_CASE_ : Optional[Any] = None , **SCREAMING_SNAKE_CASE_ : int , ):
lowerCAmelCase_ : Dict = get_size_dict(UpperCamelCase__ )
if "height" not in size or "width" not in size:
raise ValueError(F"The size dictionary must have keys 'height' and 'width'. Got {size.keys()}" )
return resize(
UpperCamelCase__ , size=(size['height'], size['width']) , resample=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : int , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Optional[int] = None , **SCREAMING_SNAKE_CASE_ : Union[str, Any] , ):
lowerCAmelCase_ : List[Any] = get_size_dict(UpperCamelCase__ )
if "height" not in size or "width" not in size:
raise ValueError(F"The size dictionary must have keys 'height' and 'width'. Got {size.keys()}" )
return center_crop(UpperCamelCase__ , size=(size['height'], size['width']) , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : Dict , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : List[Any] = None , **SCREAMING_SNAKE_CASE_ : List[str] , ):
return rescale(UpperCamelCase__ , scale=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : List[str] = None , **SCREAMING_SNAKE_CASE_ : Union[str, Any] , ):
return normalize(UpperCamelCase__ , mean=UpperCamelCase__ , std=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : Tuple , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Tuple = None , SCREAMING_SNAKE_CASE_ : Optional[Any] = None , SCREAMING_SNAKE_CASE_ : Dict=None , SCREAMING_SNAKE_CASE_ : str = None , SCREAMING_SNAKE_CASE_ : str = None , SCREAMING_SNAKE_CASE_ : int = None , SCREAMING_SNAKE_CASE_ : Optional[int] = None , SCREAMING_SNAKE_CASE_ : Optional[int] = None , SCREAMING_SNAKE_CASE_ : List[str] = None , SCREAMING_SNAKE_CASE_ : Optional[int] = None , SCREAMING_SNAKE_CASE_ : int = None , SCREAMING_SNAKE_CASE_ : Any = ChannelDimension.FIRST , **SCREAMING_SNAKE_CASE_ : Any , ):
lowerCAmelCase_ : Any = do_resize if do_resize is not None else self.do_resize
lowerCAmelCase_ : int = resample if resample is not None else self.resample
lowerCAmelCase_ : Optional[Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
lowerCAmelCase_ : Optional[int] = do_rescale if do_rescale is not None else self.do_rescale
lowerCAmelCase_ : int = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCAmelCase_ : List[str] = do_normalize if do_normalize is not None else self.do_normalize
lowerCAmelCase_ : Tuple = image_mean if image_mean is not None else self.image_mean
lowerCAmelCase_ : Dict = image_std if image_std is not None else self.image_std
lowerCAmelCase_ : Optional[Any] = size if size is not None else self.size
lowerCAmelCase_ : Optional[int] = get_size_dict(UpperCamelCase__ )
lowerCAmelCase_ : Union[str, Any] = crop_size if crop_size is not None else self.crop_size
lowerCAmelCase_ : Optional[int] = get_size_dict(UpperCamelCase__ , param_name='crop_size' )
lowerCAmelCase_ : Dict = make_list_of_images(UpperCamelCase__ )
if not valid_images(UpperCamelCase__ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
lowerCAmelCase_ : Dict = [to_numpy_array(UpperCamelCase__ ) for image in images]
if do_resize:
lowerCAmelCase_ : int = [self.resize(image=UpperCamelCase__ , size=UpperCamelCase__ , resample=UpperCamelCase__ ) for image in images]
if do_center_crop:
lowerCAmelCase_ : Optional[Any] = [self.center_crop(image=UpperCamelCase__ , size=UpperCamelCase__ ) for image in images]
if do_rescale:
lowerCAmelCase_ : Optional[Any] = [self.rescale(image=UpperCamelCase__ , scale=UpperCamelCase__ ) for image in images]
if do_normalize:
lowerCAmelCase_ : int = [self.normalize(image=UpperCamelCase__ , mean=UpperCamelCase__ , std=UpperCamelCase__ ) for image in images]
lowerCAmelCase_ : Tuple = [to_channel_dimension_format(UpperCamelCase__ , UpperCamelCase__ ) for image in images]
lowerCAmelCase_ : List[Any] = {'pixel_values': images}
return BatchFeature(data=UpperCamelCase__ , tensor_type=UpperCamelCase__ )
| 224 |
from functools import lru_cache
@lru_cache
def __UpperCamelCase ( _A ):
if num < 0:
raise ValueError('''Number should not be negative.''' )
return 1 if num in (0, 1) else num * factorial(num - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 278 | 0 |
'''simple docstring'''
class A : # Public class to implement a graph
def __init__( self : Optional[Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : list[list[bool]] ) -> None:
"""simple docstring"""
_a = row
_a = col
_a = graph
def __lowerCAmelCase ( self : Union[str, Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : list[list[bool]] ) -> bool:
"""simple docstring"""
return (
0 <= i < self.ROW
and 0 <= j < self.COL
and not visited[i][j]
and self.graph[i][j]
)
def __lowerCAmelCase ( self : Tuple , lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : list[list[bool]] ) -> None:
"""simple docstring"""
_a = [-1, -1, -1, 0, 0, 1, 1, 1] # Coordinate order
_a = [-1, 0, 1, -1, 1, -1, 0, 1]
_a = True # Make those cells visited
for k in range(8 ):
if self.is_safe(i + row_nbr[k] , j + col_nbr[k] , lowerCAmelCase_ ):
self.diffs(i + row_nbr[k] , j + col_nbr[k] , lowerCAmelCase_ )
def __lowerCAmelCase ( self : Union[str, Any] ) -> int: # And finally, count all islands.
"""simple docstring"""
_a = [[False for j in range(self.COL )] for i in range(self.ROW )]
_a = 0
for i in range(self.ROW ):
for j in range(self.COL ):
if visited[i][j] is False and self.graph[i][j] == 1:
self.diffs(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
count += 1
return count
| 179 |
'''simple docstring'''
def snake_case_ (UpperCamelCase : str , UpperCamelCase : Any ):
'''simple docstring'''
return (pointa[0] - pointa[0]) ** 2 + (pointa[1] - pointa[1]) ** 2
def snake_case_ (UpperCamelCase : Any , UpperCamelCase : str=0 ):
'''simple docstring'''
return sorted(UpperCamelCase , key=lambda UpperCamelCase : x[column] )
def snake_case_ (UpperCamelCase : Optional[int] , UpperCamelCase : Any , UpperCamelCase : Union[str, Any]=float('''inf''' ) ):
'''simple docstring'''
for i in range(points_counts - 1 ):
for j in range(i + 1 , UpperCamelCase ):
_a = euclidean_distance_sqr(points[i] , points[j] )
if current_dis < min_dis:
_a = current_dis
return min_dis
def snake_case_ (UpperCamelCase : int , UpperCamelCase : Tuple , UpperCamelCase : List[str]=float('''inf''' ) ):
'''simple docstring'''
for i in range(min(6 , points_counts - 1 ) , UpperCamelCase ):
for j in range(max(0 , i - 6 ) , UpperCamelCase ):
_a = euclidean_distance_sqr(points[i] , points[j] )
if current_dis < min_dis:
_a = current_dis
return min_dis
def snake_case_ (UpperCamelCase : int , UpperCamelCase : List[Any] , UpperCamelCase : int ):
'''simple docstring'''
if points_counts <= 3:
return dis_between_closest_pair(UpperCamelCase , UpperCamelCase )
# recursion
_a = points_counts // 2
_a = closest_pair_of_points_sqr(
UpperCamelCase , points_sorted_on_y[:mid] , UpperCamelCase )
_a = closest_pair_of_points_sqr(
UpperCamelCase , points_sorted_on_y[mid:] , points_counts - mid )
_a = min(UpperCamelCase , UpperCamelCase )
_a = []
for point in points_sorted_on_x:
if abs(point[0] - points_sorted_on_x[mid][0] ) < closest_pair_dis:
cross_strip.append(UpperCamelCase )
_a = dis_between_closest_in_strip(
UpperCamelCase , len(UpperCamelCase ) , UpperCamelCase )
return min(UpperCamelCase , UpperCamelCase )
def snake_case_ (UpperCamelCase : Optional[int] , UpperCamelCase : List[str] ):
'''simple docstring'''
_a = column_based_sort(UpperCamelCase , column=0 )
_a = column_based_sort(UpperCamelCase , column=1 )
return (
closest_pair_of_points_sqr(
UpperCamelCase , UpperCamelCase , UpperCamelCase )
) ** 0.5
if __name__ == "__main__":
_snake_case : int = [(2, 3), (12, 30), (40, 50), (5, 1), (12, 10), (3, 4)]
print('Distance:', closest_pair_of_points(points, len(points)))
| 179 | 1 |
def lowerCamelCase_ ( _a ):
"""simple docstring"""
lowerCAmelCase__ : Optional[Any] = [0] * len(_a )
for i in range(1 , len(_a ) ):
# use last results for better performance - dynamic programming
lowerCAmelCase__ : Union[str, Any] = prefix_result[i - 1]
while j > 0 and input_string[i] != input_string[j]:
lowerCAmelCase__ : Optional[int] = prefix_result[j - 1]
if input_string[i] == input_string[j]:
j += 1
lowerCAmelCase__ : List[str] = j
return prefix_result
def lowerCamelCase_ ( _a ):
"""simple docstring"""
return max(prefix_function(_a ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 131 |
import collections
import gzip
import os
import urllib
import numpy
from tensorflow.python.framework import dtypes, random_seed
from tensorflow.python.platform import gfile
from tensorflow.python.util.deprecation import deprecated
lowerCamelCase = collections.namedtuple('''_Datasets''', ['''train''', '''validation''', '''test'''])
# CVDF mirror of http://yann.lecun.com/exdb/mnist/
lowerCamelCase = '''https://storage.googleapis.com/cvdf-datasets/mnist/'''
def lowerCamelCase_ ( _a ):
"""simple docstring"""
lowerCAmelCase__ : Dict = numpy.dtype(numpy.uintaa ).newbyteorder('''>''' )
return numpy.frombuffer(bytestream.read(4 ) , dtype=_a )[0]
@deprecated(_a , '''Please use tf.data to implement this functionality.''' )
def lowerCamelCase_ ( _a ):
"""simple docstring"""
print('''Extracting''' , f.name )
with gzip.GzipFile(fileobj=_a ) as bytestream:
lowerCAmelCase__ : Any = _readaa(_a )
if magic != 2_051:
raise ValueError(
'''Invalid magic number %d in MNIST image file: %s''' % (magic, f.name) )
lowerCAmelCase__ : Any = _readaa(_a )
lowerCAmelCase__ : Tuple = _readaa(_a )
lowerCAmelCase__ : List[Any] = _readaa(_a )
lowerCAmelCase__ : Union[str, Any] = bytestream.read(rows * cols * num_images )
lowerCAmelCase__ : List[Any] = numpy.frombuffer(_a , dtype=numpy.uinta )
lowerCAmelCase__ : int = data.reshape(_a , _a , _a , 1 )
return data
@deprecated(_a , '''Please use tf.one_hot on tensors.''' )
def lowerCamelCase_ ( _a , _a ):
"""simple docstring"""
lowerCAmelCase__ : List[Any] = labels_dense.shape[0]
lowerCAmelCase__ : Optional[Any] = numpy.arange(_a ) * num_classes
lowerCAmelCase__ : str = numpy.zeros((num_labels, num_classes) )
lowerCAmelCase__ : Optional[Any] = 1
return labels_one_hot
@deprecated(_a , '''Please use tf.data to implement this functionality.''' )
def lowerCamelCase_ ( _a , _a=False , _a=10 ):
"""simple docstring"""
print('''Extracting''' , f.name )
with gzip.GzipFile(fileobj=_a ) as bytestream:
lowerCAmelCase__ : Optional[int] = _readaa(_a )
if magic != 2_049:
raise ValueError(
'''Invalid magic number %d in MNIST label file: %s''' % (magic, f.name) )
lowerCAmelCase__ : Union[str, Any] = _readaa(_a )
lowerCAmelCase__ : Tuple = bytestream.read(_a )
lowerCAmelCase__ : Dict = numpy.frombuffer(_a , dtype=numpy.uinta )
if one_hot:
return _dense_to_one_hot(_a , _a )
return labels
class _a :
@deprecated(
_SCREAMING_SNAKE_CASE , '''Please use alternatives such as official/mnist/_DataSet.py'''
''' from tensorflow/models.''' , )
def __init__( self : Dict , _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Tuple=False , _SCREAMING_SNAKE_CASE : Any=False , _SCREAMING_SNAKE_CASE : Optional[Any]=dtypes.floataa , _SCREAMING_SNAKE_CASE : List[str]=True , _SCREAMING_SNAKE_CASE : List[str]=None , )-> List[Any]:
lowerCAmelCase__ , lowerCAmelCase__ : List[Any] = random_seed.get_seed(_SCREAMING_SNAKE_CASE )
# If op level seed is not set, use whatever graph level seed is returned
numpy.random.seed(seeda if seed is None else seeda )
lowerCAmelCase__ : Optional[int] = dtypes.as_dtype(_SCREAMING_SNAKE_CASE ).base_dtype
if dtype not in (dtypes.uinta, dtypes.floataa):
raise TypeError('''Invalid image dtype %r, expected uint8 or float32''' % dtype )
if fake_data:
lowerCAmelCase__ : int = 1_0000
lowerCAmelCase__ : List[Any] = one_hot
else:
assert (
images.shape[0] == labels.shape[0]
), F'images.shape: {images.shape} labels.shape: {labels.shape}'
lowerCAmelCase__ : List[Any] = images.shape[0]
# Convert shape from [num examples, rows, columns, depth]
# to [num examples, rows*columns] (assuming depth == 1)
if reshape:
assert images.shape[3] == 1
lowerCAmelCase__ : Tuple = images.reshape(
images.shape[0] , images.shape[1] * images.shape[2] )
if dtype == dtypes.floataa:
# Convert from [0, 255] -> [0.0, 1.0].
lowerCAmelCase__ : Any = images.astype(numpy.floataa )
lowerCAmelCase__ : Any = numpy.multiply(_SCREAMING_SNAKE_CASE , 1.0 / 255.0 )
lowerCAmelCase__ : Tuple = images
lowerCAmelCase__ : Tuple = labels
lowerCAmelCase__ : List[Any] = 0
lowerCAmelCase__ : Tuple = 0
@property
def UpperCAmelCase__( self : Tuple )-> Dict:
return self._images
@property
def UpperCAmelCase__( self : Tuple )-> Optional[int]:
return self._labels
@property
def UpperCAmelCase__( self : Tuple )-> Dict:
return self._num_examples
@property
def UpperCAmelCase__( self : Tuple )-> Any:
return self._epochs_completed
def UpperCAmelCase__( self : List[Any] , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Dict=False , _SCREAMING_SNAKE_CASE : Optional[int]=True )-> List[str]:
if fake_data:
lowerCAmelCase__ : Dict = [1] * 784
lowerCAmelCase__ : Union[str, Any] = [1] + [0] * 9 if self.one_hot else 0
return (
[fake_image for _ in range(_SCREAMING_SNAKE_CASE )],
[fake_label for _ in range(_SCREAMING_SNAKE_CASE )],
)
lowerCAmelCase__ : str = self._index_in_epoch
# Shuffle for the first epoch
if self._epochs_completed == 0 and start == 0 and shuffle:
lowerCAmelCase__ : Union[str, Any] = numpy.arange(self._num_examples )
numpy.random.shuffle(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : List[Any] = self.images[perma]
lowerCAmelCase__ : Tuple = self.labels[perma]
# Go to the next epoch
if start + batch_size > self._num_examples:
# Finished epoch
self._epochs_completed += 1
# Get the rest examples in this epoch
lowerCAmelCase__ : Any = self._num_examples - start
lowerCAmelCase__ : List[str] = self._images[start : self._num_examples]
lowerCAmelCase__ : Tuple = self._labels[start : self._num_examples]
# Shuffle the data
if shuffle:
lowerCAmelCase__ : Union[str, Any] = numpy.arange(self._num_examples )
numpy.random.shuffle(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : str = self.images[perm]
lowerCAmelCase__ : List[Any] = self.labels[perm]
# Start next epoch
lowerCAmelCase__ : Dict = 0
lowerCAmelCase__ : Union[str, Any] = batch_size - rest_num_examples
lowerCAmelCase__ : Any = self._index_in_epoch
lowerCAmelCase__ : Optional[Any] = self._images[start:end]
lowerCAmelCase__ : Optional[Any] = self._labels[start:end]
return (
numpy.concatenate((images_rest_part, images_new_part) , axis=0 ),
numpy.concatenate((labels_rest_part, labels_new_part) , axis=0 ),
)
else:
self._index_in_epoch += batch_size
lowerCAmelCase__ : Dict = self._index_in_epoch
return self._images[start:end], self._labels[start:end]
@deprecated(_a , '''Please write your own downloading logic.''' )
def lowerCamelCase_ ( _a , _a , _a ):
"""simple docstring"""
if not gfile.Exists(_a ):
gfile.MakeDirs(_a )
lowerCAmelCase__ : str = os.path.join(_a , _a )
if not gfile.Exists(_a ):
urllib.request.urlretrieve(_a , _a ) # noqa: S310
with gfile.GFile(_a ) as f:
lowerCAmelCase__ : Optional[Any] = f.size()
print('''Successfully downloaded''' , _a , _a , '''bytes.''' )
return filepath
@deprecated(
_a , '''Please use alternatives such as:''' ''' tensorflow_datasets.load(\'mnist\')''' )
def lowerCamelCase_ ( _a , _a=False , _a=False , _a=dtypes.floataa , _a=True , _a=5_000 , _a=None , _a=DEFAULT_SOURCE_URL , ):
"""simple docstring"""
if fake_data:
def fake():
return _DataSet(
[] , [] , fake_data=_a , one_hot=_a , dtype=_a , seed=_a )
lowerCAmelCase__ : Tuple = fake()
lowerCAmelCase__ : Union[str, Any] = fake()
lowerCAmelCase__ : Tuple = fake()
return _Datasets(train=_a , validation=_a , test=_a )
if not source_url: # empty string check
lowerCAmelCase__ : Optional[Any] = DEFAULT_SOURCE_URL
lowerCAmelCase__ : Tuple = '''train-images-idx3-ubyte.gz'''
lowerCAmelCase__ : Dict = '''train-labels-idx1-ubyte.gz'''
lowerCAmelCase__ : List[str] = '''t10k-images-idx3-ubyte.gz'''
lowerCAmelCase__ : Optional[int] = '''t10k-labels-idx1-ubyte.gz'''
lowerCAmelCase__ : Optional[Any] = _maybe_download(
_a , _a , source_url + train_images_file )
with gfile.Open(_a , '''rb''' ) as f:
lowerCAmelCase__ : Optional[Any] = _extract_images(_a )
lowerCAmelCase__ : Any = _maybe_download(
_a , _a , source_url + train_labels_file )
with gfile.Open(_a , '''rb''' ) as f:
lowerCAmelCase__ : Any = _extract_labels(_a , one_hot=_a )
lowerCAmelCase__ : Any = _maybe_download(
_a , _a , source_url + test_images_file )
with gfile.Open(_a , '''rb''' ) as f:
lowerCAmelCase__ : str = _extract_images(_a )
lowerCAmelCase__ : Dict = _maybe_download(
_a , _a , source_url + test_labels_file )
with gfile.Open(_a , '''rb''' ) as f:
lowerCAmelCase__ : int = _extract_labels(_a , one_hot=_a )
if not 0 <= validation_size <= len(_a ):
lowerCAmelCase__ : Dict = (
'''Validation size should be between 0 and '''
f'{len(_a )}. Received: {validation_size}.'
)
raise ValueError(_a )
lowerCAmelCase__ : List[str] = train_images[:validation_size]
lowerCAmelCase__ : Any = train_labels[:validation_size]
lowerCAmelCase__ : Optional[Any] = train_images[validation_size:]
lowerCAmelCase__ : Optional[int] = train_labels[validation_size:]
lowerCAmelCase__ : Optional[Any] = {'''dtype''': dtype, '''reshape''': reshape, '''seed''': seed}
lowerCAmelCase__ : List[str] = _DataSet(_a , _a , **_a )
lowerCAmelCase__ : Dict = _DataSet(_a , _a , **_a )
lowerCAmelCase__ : Dict = _DataSet(_a , _a , **_a )
return _Datasets(train=_a , validation=_a , test=_a )
| 131 | 1 |
lowerCAmelCase = 9.8_0_6_6_5
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = g ):
"""simple docstring"""
if fluid_density <= 0:
raise ValueError('''Impossible fluid density''' )
if volume < 0:
raise ValueError('''Impossible Object volume''' )
if gravity <= 0:
raise ValueError('''Impossible Gravity''' )
return fluid_density * gravity * volume
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
| 368 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import floats_tensor, load_image, load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Union[str, Any] = ShapEImgaImgPipeline
_lowercase : Optional[Any] = ['''image''']
_lowercase : Optional[int] = ['''image''']
_lowercase : Optional[int] = [
'''num_images_per_prompt''',
'''num_inference_steps''',
'''generator''',
'''latents''',
'''guidance_scale''',
'''frame_size''',
'''output_type''',
'''return_dict''',
]
_lowercase : Tuple = False
@property
def lowerCamelCase_ ( self: List[Any] ) -> Union[str, Any]:
"""simple docstring"""
return 32
@property
def lowerCamelCase_ ( self: Union[str, Any] ) -> int:
"""simple docstring"""
return 32
@property
def lowerCamelCase_ ( self: str ) -> List[str]:
"""simple docstring"""
return self.time_input_dim * 4
@property
def lowerCamelCase_ ( self: List[Any] ) -> str:
"""simple docstring"""
return 8
@property
def lowerCamelCase_ ( self: int ) -> Dict:
"""simple docstring"""
torch.manual_seed(0 )
lowercase__ = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=64 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1 , )
lowercase__ = CLIPVisionModel(UpperCamelCase_ )
return model
@property
def lowerCamelCase_ ( self: Dict ) -> List[Any]:
"""simple docstring"""
lowercase__ = CLIPImageProcessor(
crop_size=224 , do_center_crop=UpperCamelCase_ , do_normalize=UpperCamelCase_ , do_resize=UpperCamelCase_ , image_mean=[0.48145466, 0.4578275, 0.40821073] , image_std=[0.26862954, 0.26130258, 0.27577711] , resample=3 , size=224 , )
return image_processor
@property
def lowerCamelCase_ ( self: List[str] ) -> str:
"""simple docstring"""
torch.manual_seed(0 )
lowercase__ = {
'''num_attention_heads''': 2,
'''attention_head_dim''': 16,
'''embedding_dim''': self.time_input_dim,
'''num_embeddings''': 32,
'''embedding_proj_dim''': self.text_embedder_hidden_size,
'''time_embed_dim''': self.time_embed_dim,
'''num_layers''': 1,
'''clip_embed_dim''': self.time_input_dim * 2,
'''additional_embeddings''': 0,
'''time_embed_act_fn''': '''gelu''',
'''norm_in_type''': '''layer''',
'''embedding_proj_norm_type''': '''layer''',
'''encoder_hid_proj_type''': None,
'''added_emb_type''': None,
}
lowercase__ = PriorTransformer(**UpperCamelCase_ )
return model
@property
def lowerCamelCase_ ( self: Dict ) -> List[Any]:
"""simple docstring"""
torch.manual_seed(0 )
lowercase__ = {
'''param_shapes''': (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'''d_latent''': self.time_input_dim,
'''d_hidden''': self.renderer_dim,
'''n_output''': 12,
'''background''': (
0.1,
0.1,
0.1,
),
}
lowercase__ = ShapERenderer(**UpperCamelCase_ )
return model
def lowerCamelCase_ ( self: str ) -> Any:
"""simple docstring"""
lowercase__ = self.dummy_prior
lowercase__ = self.dummy_image_encoder
lowercase__ = self.dummy_image_processor
lowercase__ = self.dummy_renderer
lowercase__ = HeunDiscreteScheduler(
beta_schedule='''exp''' , num_train_timesteps=1_024 , prediction_type='''sample''' , use_karras_sigmas=UpperCamelCase_ , clip_sample=UpperCamelCase_ , clip_sample_range=1.0 , )
lowercase__ = {
'''prior''': prior,
'''image_encoder''': image_encoder,
'''image_processor''': image_processor,
'''renderer''': renderer,
'''scheduler''': scheduler,
}
return components
def lowerCamelCase_ ( self: Optional[Any] , UpperCamelCase_: Tuple , UpperCamelCase_: Optional[int]=0 ) -> Tuple:
"""simple docstring"""
lowercase__ = floats_tensor((1, 3, 64, 64) , rng=random.Random(UpperCamelCase_ ) ).to(UpperCamelCase_ )
if str(UpperCamelCase_ ).startswith('''mps''' ):
lowercase__ = torch.manual_seed(UpperCamelCase_ )
else:
lowercase__ = torch.Generator(device=UpperCamelCase_ ).manual_seed(UpperCamelCase_ )
lowercase__ = {
'''image''': input_image,
'''generator''': generator,
'''num_inference_steps''': 1,
'''frame_size''': 32,
'''output_type''': '''np''',
}
return inputs
def lowerCamelCase_ ( self: int ) -> str:
"""simple docstring"""
lowercase__ = '''cpu'''
lowercase__ = self.get_dummy_components()
lowercase__ = self.pipeline_class(**UpperCamelCase_ )
lowercase__ = pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
lowercase__ = pipe(**self.get_dummy_inputs(UpperCamelCase_ ) )
lowercase__ = output.images[0]
lowercase__ = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
lowercase__ = np.array(
[
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCamelCase_ ( self: int ) -> int:
"""simple docstring"""
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def lowerCamelCase_ ( self: List[str] ) -> List[Any]:
"""simple docstring"""
lowercase__ = torch_device == '''cpu'''
lowercase__ = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=UpperCamelCase_ , relax_max_difference=UpperCamelCase_ , )
def lowerCamelCase_ ( self: List[str] ) -> str:
"""simple docstring"""
lowercase__ = self.get_dummy_components()
lowercase__ = self.pipeline_class(**UpperCamelCase_ )
lowercase__ = pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
lowercase__ = 1
lowercase__ = 2
lowercase__ = self.get_dummy_inputs(UpperCamelCase_ )
for key in inputs.keys():
if key in self.batch_params:
lowercase__ = batch_size * [inputs[key]]
lowercase__ = pipe(**UpperCamelCase_ , num_images_per_prompt=UpperCamelCase_ )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class _a ( unittest.TestCase ):
def lowerCamelCase_ ( self: Optional[int] ) -> Optional[Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase_ ( self: str ) -> str:
"""simple docstring"""
lowercase__ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/shap_e/corgi.png''' )
lowercase__ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/shap_e/test_shap_e_img2img_out.npy''' )
lowercase__ = ShapEImgaImgPipeline.from_pretrained('''openai/shap-e-img2img''' )
lowercase__ = pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
lowercase__ = torch.Generator(device=UpperCamelCase_ ).manual_seed(0 )
lowercase__ = pipe(
UpperCamelCase_ , generator=UpperCamelCase_ , guidance_scale=3.0 , num_inference_steps=64 , frame_size=64 , output_type='''np''' , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(UpperCamelCase_ , UpperCamelCase_ )
| 93 | 0 |
"""simple docstring"""
def lowercase ( ) -> int:
_UpperCamelCase = []
_UpperCamelCase = 1
while len(a__ ) < 1e6:
constant.append(str(a__ ) )
i += 1
_UpperCamelCase = ''''''.join(a__ )
return (
int(constant[0] )
* int(constant[9] )
* int(constant[99] )
* int(constant[999] )
* int(constant[9999] )
* int(constant[99999] )
* int(constant[999999] )
)
if __name__ == "__main__":
print(solution())
| 256 | """simple docstring"""
import os
import sys
import unittest
UpperCAmelCase = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, """utils"""))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
UpperCAmelCase = os.path.join(git_repo_path, """src""", """diffusers""")
class UpperCAmelCase_ ( unittest.TestCase):
def _UpperCamelCase ( self : Tuple ) -> str:
_UpperCamelCase = find_backend(''' if not is_torch_available():''' )
self.assertEqual(__UpperCamelCase , '''torch''' )
# backend_with_underscore = find_backend(" if not is_tensorflow_text_available():")
# self.assertEqual(backend_with_underscore, "tensorflow_text")
_UpperCamelCase = find_backend(''' if not (is_torch_available() and is_transformers_available()):''' )
self.assertEqual(__UpperCamelCase , '''torch_and_transformers''' )
# double_backend_with_underscore = find_backend(
# " if not (is_sentencepiece_available() and is_tensorflow_text_available()):"
# )
# self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text")
_UpperCamelCase = find_backend(
''' if not (is_torch_available() and is_transformers_available() and is_onnx_available()):''' )
self.assertEqual(__UpperCamelCase , '''torch_and_transformers_and_onnx''' )
def _UpperCamelCase ( self : Optional[Any] ) -> Union[str, Any]:
_UpperCamelCase = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn('''torch''' , __UpperCamelCase )
self.assertIn('''torch_and_transformers''' , __UpperCamelCase )
self.assertIn('''flax_and_transformers''' , __UpperCamelCase )
self.assertIn('''torch_and_transformers_and_onnx''' , __UpperCamelCase )
# Likewise, we can't assert on the exact content of a key
self.assertIn('''UNet2DModel''' , objects['''torch'''] )
self.assertIn('''FlaxUNet2DConditionModel''' , objects['''flax'''] )
self.assertIn('''StableDiffusionPipeline''' , objects['''torch_and_transformers'''] )
self.assertIn('''FlaxStableDiffusionPipeline''' , objects['''flax_and_transformers'''] )
self.assertIn('''LMSDiscreteScheduler''' , objects['''torch_and_scipy'''] )
self.assertIn('''OnnxStableDiffusionPipeline''' , objects['''torch_and_transformers_and_onnx'''] )
def _UpperCamelCase ( self : Tuple ) -> Optional[int]:
_UpperCamelCase = create_dummy_object('''CONSTANT''' , '''\'torch\'''' )
self.assertEqual(__UpperCamelCase , '''\nCONSTANT = None\n''' )
_UpperCamelCase = create_dummy_object('''function''' , '''\'torch\'''' )
self.assertEqual(
__UpperCamelCase , '''\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n''' )
_UpperCamelCase = '''
class FakeClass(metaclass=DummyObject):
_backends = \'torch\'
def __init__(self, *args, **kwargs):
requires_backends(self, \'torch\')
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, \'torch\')
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, \'torch\')
'''
_UpperCamelCase = create_dummy_object('''FakeClass''' , '''\'torch\'''' )
self.assertEqual(__UpperCamelCase , __UpperCamelCase )
def _UpperCamelCase ( self : Any ) -> Any:
_UpperCamelCase = '''# This file is autogenerated by the command `make fix-copies`, do not edit.
from ..utils import DummyObject, requires_backends
CONSTANT = None
def function(*args, **kwargs):
requires_backends(function, ["torch"])
class FakeClass(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
'''
_UpperCamelCase = create_dummy_files({'''torch''': ['''CONSTANT''', '''function''', '''FakeClass''']} )
self.assertEqual(dummy_files['''torch'''] , __UpperCamelCase )
| 256 | 1 |
from __future__ import annotations
import math
def lowerCamelCase_ ( UpperCamelCase__ : int, UpperCamelCase__ : int, UpperCamelCase__ : bool, UpperCamelCase__ : list[int], UpperCamelCase__ : float ):
'''simple docstring'''
if depth < 0:
raise ValueError('''Depth cannot be less than 0''' )
if not scores:
raise ValueError('''Scores cannot be empty''' )
if depth == height:
return scores[node_index]
return (
max(
minimax(depth + 1, node_index * 2, _a, _a, _a ), minimax(depth + 1, node_index * 2 + 1, _a, _a, _a ), )
if is_max
else min(
minimax(depth + 1, node_index * 2, _a, _a, _a ), minimax(depth + 1, node_index * 2 + 1, _a, _a, _a ), )
)
def lowerCamelCase_ ( ):
'''simple docstring'''
UpperCamelCase__ = [90, 23, 6, 33, 21, 65, 123, 3_4423]
UpperCamelCase__ = math.log(len(_a ), 2 )
print(F"""Optimal value : {minimax(0, 0, _a, _a, _a )}""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 360 | from __future__ import annotations
import unittest
from transformers import AutoTokenizer, MBartConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFMBartForConditionalGeneration, TFMBartModel
@require_tf
class __lowercase :
'''simple docstring'''
_A : int = MBartConfig
_A : str = {}
_A : str = '''gelu'''
def __init__( self : Tuple , _a : Dict , _a : Optional[Any]=13 , _a : List[Any]=7 , _a : Any=True , _a : List[Any]=False , _a : List[Any]=99 , _a : int=32 , _a : Optional[Any]=2 , _a : Optional[Any]=4 , _a : Any=37 , _a : Any=0.1 , _a : Any=0.1 , _a : Dict=20 , _a : Optional[Any]=2 , _a : List[str]=1 , _a : List[str]=0 , ):
UpperCamelCase__ = parent
UpperCamelCase__ = batch_size
UpperCamelCase__ = seq_length
UpperCamelCase__ = is_training
UpperCamelCase__ = use_labels
UpperCamelCase__ = vocab_size
UpperCamelCase__ = hidden_size
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = intermediate_size
UpperCamelCase__ = hidden_dropout_prob
UpperCamelCase__ = attention_probs_dropout_prob
UpperCamelCase__ = max_position_embeddings
UpperCamelCase__ = eos_token_id
UpperCamelCase__ = pad_token_id
UpperCamelCase__ = bos_token_id
def A_ ( self : Any ):
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
UpperCamelCase__ = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
UpperCamelCase__ = tf.concat([input_ids, eos_tensor] , axis=1 )
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase__ = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
UpperCamelCase__ = prepare_mbart_inputs_dict(_a , _a , _a )
return config, inputs_dict
def A_ ( self : Union[str, Any] , _a : Tuple , _a : Dict ):
UpperCamelCase__ = TFMBartModel(config=_a ).get_decoder()
UpperCamelCase__ = inputs_dict['''input_ids''']
UpperCamelCase__ = input_ids[:1, :]
UpperCamelCase__ = inputs_dict['''attention_mask'''][:1, :]
UpperCamelCase__ = inputs_dict['''head_mask''']
UpperCamelCase__ = 1
# first forward pass
UpperCamelCase__ = model(_a , attention_mask=_a , head_mask=_a , use_cache=_a )
UpperCamelCase__ , UpperCamelCase__ = outputs.to_tuple()
UpperCamelCase__ = past_key_values[1]
def lowerCamelCase_ ( UpperCamelCase__ : Dict, UpperCamelCase__ : Optional[int], UpperCamelCase__ : Optional[int], UpperCamelCase__ : Optional[Any]=None, UpperCamelCase__ : Tuple=None, UpperCamelCase__ : Dict=None, UpperCamelCase__ : Tuple=None, UpperCamelCase__ : Tuple=None, ):
'''simple docstring'''
if attention_mask is None:
UpperCamelCase__ = tf.cast(tf.math.not_equal(UpperCamelCase__, config.pad_token_id ), tf.inta )
if decoder_attention_mask is None:
UpperCamelCase__ = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape, dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:], config.pad_token_id ), tf.inta ),
], axis=-1, )
if head_mask is None:
UpperCamelCase__ = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
UpperCamelCase__ = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
UpperCamelCase__ = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class __lowercase ( A, A, unittest.TestCase ):
'''simple docstring'''
_A : List[str] = (TFMBartForConditionalGeneration, TFMBartModel) if is_tf_available() else ()
_A : List[str] = (TFMBartForConditionalGeneration,) if is_tf_available() else ()
_A : List[Any] = (
{
'''conversational''': TFMBartForConditionalGeneration,
'''feature-extraction''': TFMBartModel,
'''summarization''': TFMBartForConditionalGeneration,
'''text2text-generation''': TFMBartForConditionalGeneration,
'''translation''': TFMBartForConditionalGeneration,
}
if is_tf_available()
else {}
)
_A : List[Any] = True
_A : Any = False
_A : List[Any] = False
def A_ ( self : Any , _a : Tuple , _a : List[Any] , _a : Tuple , _a : List[str] , _a : List[Any] ):
if pipeline_test_casse_name != "FeatureExtractionPipelineTests":
# Exception encountered when calling layer '...'
return True
return False
def A_ ( self : List[Any] ):
UpperCamelCase__ = TFMBartModelTester(self )
UpperCamelCase__ = ConfigTester(self , config_class=_a )
def A_ ( self : Tuple ):
self.config_tester.run_common_tests()
def A_ ( self : Optional[Any] ):
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*_a )
@require_sentencepiece
@require_tokenizers
@require_tf
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
_A : Union[str, Any] = [
''' UN Chief Says There Is No Military Solution in Syria''',
]
_A : Dict = [
'''Şeful ONU declară că nu există o soluţie militară în Siria''',
]
_A : Dict = '''facebook/mbart-large-en-ro'''
@cached_property
def A_ ( self : Any ):
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def A_ ( self : str ):
UpperCamelCase__ = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def A_ ( self : Optional[int] , **_a : Optional[int] ):
UpperCamelCase__ = self.translate_src_text(**_a )
self.assertListEqual(self.expected_text , _a )
def A_ ( self : List[str] , **_a : Dict ):
UpperCamelCase__ = self.tokenizer(self.src_text , **_a , return_tensors='''tf''' )
UpperCamelCase__ = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 )
UpperCamelCase__ = self.tokenizer.batch_decode(_a , skip_special_tokens=_a )
return generated_words
@slow
def A_ ( self : Optional[Any] ):
self._assert_generated_batch_equal_expected()
| 35 | 0 |
from typing import Callable, List, Optional, Union
import PIL
import torch
from transformers import (
CLIPImageProcessor,
CLIPSegForImageSegmentation,
CLIPSegProcessor,
CLIPTextModel,
CLIPTokenizer,
)
from diffusers import DiffusionPipeline
from diffusers.configuration_utils import FrozenDict
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionInpaintPipeline
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import deprecate, is_accelerate_available, logging
a__ : Any = logging.get_logger(__name__) # pylint: disable=invalid-name
class a_ ( a__ ):
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ) ->Optional[int]:
super().__init__()
if hasattr(scheduler.config , '''steps_offset''' ) and scheduler.config.steps_offset != 1:
SCREAMING_SNAKE_CASE : Optional[Any] = (
F"""The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`"""
F""" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure """
'''to update the config accordingly as leaving `steps_offset` might led to incorrect results'''
''' in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,'''
''' it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`'''
''' file'''
)
deprecate('''steps_offset!=1''' , '''1.0.0''' , _lowerCamelCase , standard_warn=_lowerCamelCase )
SCREAMING_SNAKE_CASE : Tuple = dict(scheduler.config )
SCREAMING_SNAKE_CASE : Any = 1
SCREAMING_SNAKE_CASE : Union[str, Any] = FrozenDict(_lowerCamelCase )
if hasattr(scheduler.config , '''skip_prk_steps''' ) and scheduler.config.skip_prk_steps is False:
SCREAMING_SNAKE_CASE : str = (
F"""The configuration file of this scheduler: {scheduler} has not set the configuration"""
''' `skip_prk_steps`. `skip_prk_steps` should be set to True in the configuration file. Please make'''
''' sure to update the config accordingly as not setting `skip_prk_steps` in the config might lead to'''
''' incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face'''
''' Hub, it would be very nice if you could open a Pull request for the'''
''' `scheduler/scheduler_config.json` file'''
)
deprecate('''skip_prk_steps not set''' , '''1.0.0''' , _lowerCamelCase , standard_warn=_lowerCamelCase )
SCREAMING_SNAKE_CASE : List[Any] = dict(scheduler.config )
SCREAMING_SNAKE_CASE : Union[str, Any] = True
SCREAMING_SNAKE_CASE : Optional[int] = FrozenDict(_lowerCamelCase )
if safety_checker is None:
logger.warning(
F"""You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"""
''' that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered'''
''' results in services or applications open to the public. Both the diffusers team and Hugging Face'''
''' strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling'''
''' it only for use-cases that involve analyzing network behavior or auditing its results. For more'''
''' information, please have a look at https://github.com/huggingface/diffusers/pull/254 .''' )
self.register_modules(
segmentation_model=_lowerCamelCase , segmentation_processor=_lowerCamelCase , vae=_lowerCamelCase , text_encoder=_lowerCamelCase , tokenizer=_lowerCamelCase , unet=_lowerCamelCase , scheduler=_lowerCamelCase , safety_checker=_lowerCamelCase , feature_extractor=_lowerCamelCase , )
def __lowerCAmelCase ( self , _lowerCamelCase = "auto" ) ->Dict:
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
SCREAMING_SNAKE_CASE : Any = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(_lowerCamelCase )
def __lowerCAmelCase ( self ) ->int:
self.enable_attention_slicing(_lowerCamelCase )
def __lowerCAmelCase ( self ) ->str:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('''Please install accelerate via `pip install accelerate`''' )
SCREAMING_SNAKE_CASE : int = torch.device('''cuda''' )
for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.safety_checker]:
if cpu_offloaded_model is not None:
cpu_offload(_lowerCamelCase , _lowerCamelCase )
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def __lowerCAmelCase ( self ) ->List[str]:
if self.device != torch.device('''meta''' ) or not hasattr(self.unet , '''_hf_hook''' ):
return self.device
for module in self.unet.modules():
if (
hasattr(_lowerCamelCase , '''_hf_hook''' )
and hasattr(module._hf_hook , '''execution_device''' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
def __call__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = 512 , _lowerCamelCase = 512 , _lowerCamelCase = 50 , _lowerCamelCase = 7.5 , _lowerCamelCase = None , _lowerCamelCase = 1 , _lowerCamelCase = 0.0 , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = "pil" , _lowerCamelCase = True , _lowerCamelCase = None , _lowerCamelCase = 1 , **_lowerCamelCase , ) ->List[Any]:
SCREAMING_SNAKE_CASE : Union[str, Any] = self.segmentation_processor(
text=[text] , images=[image] , padding='''max_length''' , return_tensors='''pt''' ).to(self.device )
SCREAMING_SNAKE_CASE : Any = self.segmentation_model(**_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[Any] = torch.sigmoid(outputs.logits ).cpu().detach().unsqueeze(-1 ).numpy()
SCREAMING_SNAKE_CASE : List[str] = self.numpy_to_pil(_lowerCamelCase )[0].resize(image.size )
# Run inpainting pipeline with the generated mask
SCREAMING_SNAKE_CASE : str = StableDiffusionInpaintPipeline(
vae=self.vae , text_encoder=self.text_encoder , tokenizer=self.tokenizer , unet=self.unet , scheduler=self.scheduler , safety_checker=self.safety_checker , feature_extractor=self.feature_extractor , )
return inpainting_pipeline(
prompt=_lowerCamelCase , image=_lowerCamelCase , mask_image=_lowerCamelCase , height=_lowerCamelCase , width=_lowerCamelCase , num_inference_steps=_lowerCamelCase , guidance_scale=_lowerCamelCase , negative_prompt=_lowerCamelCase , num_images_per_prompt=_lowerCamelCase , eta=_lowerCamelCase , generator=_lowerCamelCase , latents=_lowerCamelCase , output_type=_lowerCamelCase , return_dict=_lowerCamelCase , callback=_lowerCamelCase , callback_steps=_lowerCamelCase , )
| 313 |
from __future__ import annotations
import math
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
if len(a__ ) != 2 or len(a[0] ) != 2 or len(a__ ) != 2 or len(b[0] ) != 2:
raise Exception('''Matrices are not 2x2''' )
SCREAMING_SNAKE_CASE : Dict = [
[a[0][0] * b[0][0] + a[0][1] * b[1][0], a[0][0] * b[0][1] + a[0][1] * b[1][1]],
[a[1][0] * b[0][0] + a[1][1] * b[1][0], a[1][0] * b[0][1] + a[1][1] * b[1][1]],
]
return new_matrix
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
return [
[matrix_a[row][col] + matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(a__ ) )
]
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
return [
[matrix_a[row][col] - matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(a__ ) )
]
def UpperCAmelCase_( a__ ):
"""simple docstring"""
if len(a__ ) % 2 != 0 or len(a[0] ) % 2 != 0:
raise Exception('''Odd matrices are not supported!''' )
SCREAMING_SNAKE_CASE : str = len(a__ )
SCREAMING_SNAKE_CASE : Any = matrix_length // 2
SCREAMING_SNAKE_CASE : Tuple = [[a[i][j] for j in range(a__ , a__ )] for i in range(a__ )]
SCREAMING_SNAKE_CASE : Optional[int] = [
[a[i][j] for j in range(a__ , a__ )] for i in range(a__ , a__ )
]
SCREAMING_SNAKE_CASE : Optional[Any] = [[a[i][j] for j in range(a__ )] for i in range(a__ )]
SCREAMING_SNAKE_CASE : List[Any] = [[a[i][j] for j in range(a__ )] for i in range(a__ , a__ )]
return top_left, top_right, bot_left, bot_right
def UpperCAmelCase_( a__ ):
"""simple docstring"""
return len(a__ ), len(matrix[0] )
def UpperCAmelCase_( a__ ):
"""simple docstring"""
print('''\n'''.join(str(a__ ) for line in matrix ) )
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
if matrix_dimensions(a__ ) == (2, 2):
return default_matrix_multiplication(a__ , a__ )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = split_matrix(a__ )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = split_matrix(a__ )
SCREAMING_SNAKE_CASE : Dict = actual_strassen(a__ , matrix_subtraction(a__ , a__ ) )
SCREAMING_SNAKE_CASE : List[Any] = actual_strassen(matrix_addition(a__ , a__ ) , a__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = actual_strassen(matrix_addition(a__ , a__ ) , a__ )
SCREAMING_SNAKE_CASE : int = actual_strassen(a__ , matrix_subtraction(a__ , a__ ) )
SCREAMING_SNAKE_CASE : Any = actual_strassen(matrix_addition(a__ , a__ ) , matrix_addition(a__ , a__ ) )
SCREAMING_SNAKE_CASE : Tuple = actual_strassen(matrix_subtraction(a__ , a__ ) , matrix_addition(a__ , a__ ) )
SCREAMING_SNAKE_CASE : Tuple = actual_strassen(matrix_subtraction(a__ , a__ ) , matrix_addition(a__ , a__ ) )
SCREAMING_SNAKE_CASE : List[Any] = matrix_addition(matrix_subtraction(matrix_addition(a__ , a__ ) , a__ ) , a__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = matrix_addition(a__ , a__ )
SCREAMING_SNAKE_CASE : Dict = matrix_addition(a__ , a__ )
SCREAMING_SNAKE_CASE : Optional[Any] = matrix_subtraction(matrix_subtraction(matrix_addition(a__ , a__ ) , a__ ) , a__ )
# construct the new matrix from our 4 quadrants
SCREAMING_SNAKE_CASE : Optional[Any] = []
for i in range(len(a__ ) ):
new_matrix.append(top_left[i] + top_right[i] )
for i in range(len(a__ ) ):
new_matrix.append(bot_left[i] + bot_right[i] )
return new_matrix
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
if matrix_dimensions(a__ )[1] != matrix_dimensions(a__ )[0]:
SCREAMING_SNAKE_CASE : Any = (
'''Unable to multiply these matrices, please check the dimensions.\n'''
F"""Matrix A: {matrixa}\n"""
F"""Matrix B: {matrixa}"""
)
raise Exception(a__ )
SCREAMING_SNAKE_CASE : str = matrix_dimensions(a__ )
SCREAMING_SNAKE_CASE : Tuple = matrix_dimensions(a__ )
if dimensiona[0] == dimensiona[1] and dimensiona[0] == dimensiona[1]:
return [matrixa, matrixa]
SCREAMING_SNAKE_CASE : str = max(*a__ , *a__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = int(math.pow(2 , math.ceil(math.loga(a__ ) ) ) )
SCREAMING_SNAKE_CASE : Optional[int] = matrixa
SCREAMING_SNAKE_CASE : Tuple = matrixa
# Adding zeros to the matrices so that the arrays dimensions are the same and also
# power of 2
for i in range(0 , a__ ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , a__ ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
if i < dimensiona[0]:
for _ in range(dimensiona[1] , a__ ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
SCREAMING_SNAKE_CASE : Optional[Any] = actual_strassen(a__ , a__ )
# Removing the additional zeros
for i in range(0 , a__ ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , a__ ):
final_matrix[i].pop()
else:
final_matrix.pop()
return final_matrix
if __name__ == "__main__":
a__ : Dict = [
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 2, 3, 1],
]
a__ : Union[str, Any] = [[0, 2, 1, 1], [16, 2, 3, 3], [2, 2, 7, 7], [13, 11, 22, 4]]
print(strassen(matrixa, matrixa))
| 313 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
lowerCAmelCase_ : Union[str, Any] = logging.get_logger(__name__)
lowerCAmelCase_ : List[str] = {
'''microsoft/layoutlmv3-base''': '''https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json''',
}
class UpperCamelCase_ ( a_ ):
_A : List[Any] = 'layoutlmv3'
def __init__( self , snake_case__=5_02_65 , snake_case__=7_68 , snake_case__=12 , snake_case__=12 , snake_case__=30_72 , snake_case__="gelu" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=5_12 , snake_case__=2 , snake_case__=0.02 , snake_case__=1e-5 , snake_case__=1 , snake_case__=0 , snake_case__=2 , snake_case__=10_24 , snake_case__=1_28 , snake_case__=1_28 , snake_case__=True , snake_case__=32 , snake_case__=1_28 , snake_case__=64 , snake_case__=2_56 , snake_case__=True , snake_case__=True , snake_case__=True , snake_case__=2_24 , snake_case__=3 , snake_case__=16 , snake_case__=None , **snake_case__ , ) -> Tuple:
"""simple docstring"""
super().__init__(
vocab_size=snake_case__ , hidden_size=snake_case__ , num_hidden_layers=snake_case__ , num_attention_heads=snake_case__ , intermediate_size=snake_case__ , hidden_act=snake_case__ , hidden_dropout_prob=snake_case__ , attention_probs_dropout_prob=snake_case__ , max_position_embeddings=snake_case__ , type_vocab_size=snake_case__ , initializer_range=snake_case__ , layer_norm_eps=snake_case__ , pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , **snake_case__ , )
UpperCAmelCase = max_ad_position_embeddings
UpperCAmelCase = coordinate_size
UpperCAmelCase = shape_size
UpperCAmelCase = has_relative_attention_bias
UpperCAmelCase = rel_pos_bins
UpperCAmelCase = max_rel_pos
UpperCAmelCase = has_spatial_attention_bias
UpperCAmelCase = rel_ad_pos_bins
UpperCAmelCase = max_rel_ad_pos
UpperCAmelCase = text_embed
UpperCAmelCase = visual_embed
UpperCAmelCase = input_size
UpperCAmelCase = num_channels
UpperCAmelCase = patch_size
UpperCAmelCase = classifier_dropout
class UpperCamelCase_ ( a_ ):
_A : str = version.parse('1.12' )
@property
def UpperCamelCase_ ( self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task in ["question-answering", "sequence-classification"]:
return OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """sequence"""}),
("""attention_mask""", {0: """batch""", 1: """sequence"""}),
("""bbox""", {0: """batch""", 1: """sequence"""}),
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
else:
return OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """sequence"""}),
("""bbox""", {0: """batch""", 1: """sequence"""}),
("""attention_mask""", {0: """batch""", 1: """sequence"""}),
("""pixel_values""", {0: """batch""", 1: """num_channels"""}),
] )
@property
def UpperCamelCase_ ( self ) -> float:
"""simple docstring"""
return 1e-5
@property
def UpperCamelCase_ ( self ) -> int:
"""simple docstring"""
return 12
def UpperCamelCase_ ( self , snake_case__ , snake_case__ = -1 , snake_case__ = -1 , snake_case__ = False , snake_case__ = None , snake_case__ = 3 , snake_case__ = 40 , snake_case__ = 40 , ) -> Mapping[str, Any]:
"""simple docstring"""
setattr(processor.image_processor , """apply_ocr""" , snake_case__ )
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
UpperCAmelCase = compute_effective_axis_dimension(
snake_case__ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
UpperCAmelCase = processor.tokenizer.num_special_tokens_to_add(snake_case__ )
UpperCAmelCase = compute_effective_axis_dimension(
snake_case__ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=snake_case__ )
# Generate dummy inputs according to compute batch and sequence
UpperCAmelCase = [[""" """.join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size
# Generate dummy bounding boxes
UpperCAmelCase = [[[48, 84, 73, 1_28]]] * batch_size
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
# batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch)
UpperCAmelCase = self._generate_dummy_images(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
UpperCAmelCase = dict(
processor(
snake_case__ , text=snake_case__ , boxes=snake_case__ , return_tensors=snake_case__ , ) )
return inputs
| 370 |
"""simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ : Dict = logging.get_logger(__name__)
lowerCAmelCase_ : List[str] = {
'''microsoft/unispeech-large-1500h-cv''': (
'''https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json'''
),
# See all UniSpeech models at https://huggingface.co/models?filter=unispeech
}
class UpperCamelCase_ ( a_ ):
_A : Dict = 'unispeech'
def __init__( self , snake_case__=32 , snake_case__=7_68 , snake_case__=12 , snake_case__=12 , snake_case__=30_72 , snake_case__="gelu" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=0.1 , snake_case__=0.0 , snake_case__=0.0 , snake_case__=0.1 , snake_case__=0.1 , snake_case__=0.02 , snake_case__=1e-5 , snake_case__="group" , snake_case__="gelu" , snake_case__=(5_12, 5_12, 5_12, 5_12, 5_12, 5_12, 5_12) , snake_case__=(5, 2, 2, 2, 2, 2, 2) , snake_case__=(10, 3, 3, 3, 3, 2, 2) , snake_case__=False , snake_case__=1_28 , snake_case__=16 , snake_case__=False , snake_case__=True , snake_case__=0.05 , snake_case__=10 , snake_case__=2 , snake_case__=0.0 , snake_case__=10 , snake_case__=0 , snake_case__=3_20 , snake_case__=2 , snake_case__=0.1 , snake_case__=1_00 , snake_case__=2_56 , snake_case__=2_56 , snake_case__=0.1 , snake_case__="mean" , snake_case__=False , snake_case__=False , snake_case__=2_56 , snake_case__=80 , snake_case__=0 , snake_case__=1 , snake_case__=2 , snake_case__=0.5 , **snake_case__ , ) -> Dict:
"""simple docstring"""
super().__init__(**snake_case__ , pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ )
UpperCAmelCase = hidden_size
UpperCAmelCase = feat_extract_norm
UpperCAmelCase = feat_extract_activation
UpperCAmelCase = list(snake_case__ )
UpperCAmelCase = list(snake_case__ )
UpperCAmelCase = list(snake_case__ )
UpperCAmelCase = conv_bias
UpperCAmelCase = num_conv_pos_embeddings
UpperCAmelCase = num_conv_pos_embedding_groups
UpperCAmelCase = len(self.conv_dim )
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_act
UpperCAmelCase = num_attention_heads
UpperCAmelCase = hidden_dropout
UpperCAmelCase = attention_dropout
UpperCAmelCase = activation_dropout
UpperCAmelCase = feat_proj_dropout
UpperCAmelCase = final_dropout
UpperCAmelCase = layerdrop
UpperCAmelCase = layer_norm_eps
UpperCAmelCase = initializer_range
UpperCAmelCase = num_ctc_classes
UpperCAmelCase = vocab_size
UpperCAmelCase = do_stable_layer_norm
UpperCAmelCase = use_weighted_layer_sum
UpperCAmelCase = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="""
""" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="""
f''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
f''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
UpperCAmelCase = apply_spec_augment
UpperCAmelCase = mask_time_prob
UpperCAmelCase = mask_time_length
UpperCAmelCase = mask_time_min_masks
UpperCAmelCase = mask_feature_prob
UpperCAmelCase = mask_feature_length
UpperCAmelCase = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
UpperCAmelCase = num_codevectors_per_group
UpperCAmelCase = num_codevector_groups
UpperCAmelCase = contrastive_logits_temperature
UpperCAmelCase = feat_quantizer_dropout
UpperCAmelCase = num_negatives
UpperCAmelCase = codevector_dim
UpperCAmelCase = proj_codevector_dim
UpperCAmelCase = diversity_loss_weight
# ctc loss
UpperCAmelCase = ctc_loss_reduction
UpperCAmelCase = ctc_zero_infinity
# pretraining loss
UpperCAmelCase = replace_prob
@property
def UpperCamelCase_ ( self ) -> Tuple:
"""simple docstring"""
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 248 | 0 |
'''simple docstring'''
import unittest
import numpy as np
import requests
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
lowercase__ = False
if is_vision_available():
from PIL import Image
from transformers import PixaStructImageProcessor
class A_ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Any , lowercase_ : Any , lowercase_ : Dict=7 , lowercase_ : List[Any]=3 , lowercase_ : Tuple=18 , lowercase_ : str=30 , lowercase_ : Optional[int]=400 , lowercase_ : str=None , lowercase_ : Any=True , lowercase_ : int=True , lowercase_ : int=None , ) -> Union[str, Any]:
UpperCAmelCase : Optional[Any] = size if size is not None else {'height': 20, 'width': 20}
UpperCAmelCase : Optional[int] = parent
UpperCAmelCase : Tuple = batch_size
UpperCAmelCase : Any = num_channels
UpperCAmelCase : Tuple = image_size
UpperCAmelCase : Tuple = min_resolution
UpperCAmelCase : List[str] = max_resolution
UpperCAmelCase : Any = size
UpperCAmelCase : str = do_normalize
UpperCAmelCase : Union[str, Any] = do_convert_rgb
UpperCAmelCase : Any = [512, 1_024, 2_048, 4_096]
UpperCAmelCase : Tuple = patch_size if patch_size is not None else {'height': 16, 'width': 16}
def UpperCAmelCase_ ( self : List[str] ) -> List[str]:
return {"do_normalize": self.do_normalize, "do_convert_rgb": self.do_convert_rgb}
def UpperCAmelCase_ ( self : Tuple ) -> Optional[int]:
UpperCAmelCase : Tuple = 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg'
UpperCAmelCase : Optional[Any] = Image.open(requests.get(lowercase_ , stream=lowercase_ ).raw ).convert('RGB' )
return raw_image
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason="""`Pix2StructImageProcessor` requires `torch>=1.11.0`.""" , )
@require_torch
@require_vision
class A_ ( _snake_case , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = PixaStructImageProcessor if is_vision_available() else None
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Union[str, Any]:
UpperCAmelCase : Any = PixaStructImageProcessingTester(self )
@property
def UpperCAmelCase_ ( self : Optional[int] ) -> str:
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase_ ( self : str ) -> List[str]:
UpperCAmelCase : Optional[int] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowercase_ , 'do_normalize' ) )
self.assertTrue(hasattr(lowercase_ , 'do_convert_rgb' ) )
def UpperCAmelCase_ ( self : Dict ) -> Optional[Any]:
UpperCAmelCase : Optional[Any] = self.image_processor_tester.prepare_dummy_image()
UpperCAmelCase : List[Any] = self.image_processing_class(**self.image_processor_dict )
UpperCAmelCase : int = 2_048
UpperCAmelCase : Optional[int] = image_processor(lowercase_ , return_tensors='pt' , max_patches=lowercase_ )
self.assertTrue(torch.allclose(inputs.flattened_patches.mean() , torch.tensor(0.0606 ) , atol=1E-3 , rtol=1E-3 ) )
def UpperCAmelCase_ ( self : Optional[int] ) -> List[str]:
# Initialize image_processor
UpperCAmelCase : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase_ )
for image in image_inputs:
self.assertIsInstance(lowercase_ , Image.Image )
# Test not batched input
UpperCAmelCase : Tuple = (
(self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width'])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
UpperCAmelCase : Optional[Any] = image_processor(
image_inputs[0] , return_tensors='pt' , max_patches=lowercase_ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
UpperCAmelCase : str = image_processor(
lowercase_ , return_tensors='pt' , max_patches=lowercase_ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def UpperCAmelCase_ ( self : Tuple ) -> List[Any]:
# Initialize image_processor
UpperCAmelCase : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase_ )
for image in image_inputs:
self.assertIsInstance(lowercase_ , Image.Image )
# Test not batched input
UpperCAmelCase : Optional[Any] = (
(self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width'])
* self.image_processor_tester.num_channels
) + 2
UpperCAmelCase : Any = True
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
with self.assertRaises(lowercase_ ):
UpperCAmelCase : Optional[int] = image_processor(
image_inputs[0] , return_tensors='pt' , max_patches=lowercase_ ).flattened_patches
UpperCAmelCase : Tuple = 'Hello'
UpperCAmelCase : Tuple = image_processor(
image_inputs[0] , return_tensors='pt' , max_patches=lowercase_ , header_text=lowercase_ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
UpperCAmelCase : List[Any] = image_processor(
lowercase_ , return_tensors='pt' , max_patches=lowercase_ , header_text=lowercase_ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def UpperCAmelCase_ ( self : Optional[int] ) -> List[Any]:
# Initialize image_processor
UpperCAmelCase : Dict = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCAmelCase : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase_ , numpify=lowercase_ )
for image in image_inputs:
self.assertIsInstance(lowercase_ , np.ndarray )
UpperCAmelCase : Any = (
(self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width'])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
UpperCAmelCase : List[Any] = image_processor(
image_inputs[0] , return_tensors='pt' , max_patches=lowercase_ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
UpperCAmelCase : Tuple = image_processor(
lowercase_ , return_tensors='pt' , max_patches=lowercase_ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> str:
# Initialize image_processor
UpperCAmelCase : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCAmelCase : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase_ , torchify=lowercase_ )
for image in image_inputs:
self.assertIsInstance(lowercase_ , torch.Tensor )
# Test not batched input
UpperCAmelCase : Optional[int] = (
(self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width'])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
UpperCAmelCase : str = image_processor(
image_inputs[0] , return_tensors='pt' , max_patches=lowercase_ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
UpperCAmelCase : Optional[Any] = image_processor(
lowercase_ , return_tensors='pt' , max_patches=lowercase_ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason="""`Pix2StructImageProcessor` requires `torch>=1.11.0`.""" , )
@require_torch
@require_vision
class A_ ( _snake_case , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = PixaStructImageProcessor if is_vision_available() else None
def UpperCAmelCase_ ( self : Union[str, Any] ) -> List[Any]:
UpperCAmelCase : str = PixaStructImageProcessingTester(self , num_channels=4 )
UpperCAmelCase : Dict = 3
@property
def UpperCAmelCase_ ( self : Tuple ) -> Optional[Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase_ ( self : Any ) -> List[Any]:
UpperCAmelCase : Any = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowercase_ , 'do_normalize' ) )
self.assertTrue(hasattr(lowercase_ , 'do_convert_rgb' ) )
def UpperCAmelCase_ ( self : Optional[int] ) -> Tuple:
# Initialize image_processor
UpperCAmelCase : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase_ )
for image in image_inputs:
self.assertIsInstance(lowercase_ , Image.Image )
# Test not batched input
UpperCAmelCase : List[str] = (
(self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width'])
* (self.image_processor_tester.num_channels - 1)
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
UpperCAmelCase : Optional[Any] = image_processor(
image_inputs[0] , return_tensors='pt' , max_patches=lowercase_ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
UpperCAmelCase : Tuple = image_processor(
lowercase_ , return_tensors='pt' , max_patches=lowercase_ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
| 151 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import XGLMConfig, XGLMTokenizer, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.xglm.modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
)
@require_tf
class A_ :
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = XGLMConfig
UpperCAmelCase_ : str = {}
UpperCAmelCase_ : List[str] = """gelu"""
def __init__( self : Tuple , lowercase_ : str , lowercase_ : List[str]=14 , lowercase_ : Optional[int]=7 , lowercase_ : Optional[int]=True , lowercase_ : List[str]=True , lowercase_ : Union[str, Any]=True , lowercase_ : Dict=99 , lowercase_ : Optional[int]=32 , lowercase_ : Any=2 , lowercase_ : Union[str, Any]=4 , lowercase_ : Optional[int]=37 , lowercase_ : List[str]="gelu" , lowercase_ : Tuple=0.1 , lowercase_ : List[Any]=0.1 , lowercase_ : List[Any]=512 , lowercase_ : Union[str, Any]=0.02 , ) -> str:
UpperCAmelCase : Optional[Any] = parent
UpperCAmelCase : Optional[int] = batch_size
UpperCAmelCase : List[str] = seq_length
UpperCAmelCase : List[str] = is_training
UpperCAmelCase : str = use_input_mask
UpperCAmelCase : int = use_labels
UpperCAmelCase : Union[str, Any] = vocab_size
UpperCAmelCase : Optional[int] = d_model
UpperCAmelCase : str = num_hidden_layers
UpperCAmelCase : Optional[int] = num_attention_heads
UpperCAmelCase : List[str] = ffn_dim
UpperCAmelCase : Optional[int] = activation_function
UpperCAmelCase : Optional[Any] = activation_dropout
UpperCAmelCase : Dict = attention_dropout
UpperCAmelCase : List[str] = max_position_embeddings
UpperCAmelCase : List[Any] = initializer_range
UpperCAmelCase : Optional[Any] = None
UpperCAmelCase : str = 0
UpperCAmelCase : List[Any] = 2
UpperCAmelCase : Optional[Any] = 1
def UpperCAmelCase_ ( self : int ) -> Union[str, Any]:
return XGLMConfig.from_pretrained('facebook/xglm-564M' )
def UpperCAmelCase_ ( self : Dict ) -> int:
UpperCAmelCase : Any = tf.clip_by_value(
ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) , clip_value_min=0 , clip_value_max=3 )
UpperCAmelCase : Optional[Any] = None
if self.use_input_mask:
UpperCAmelCase : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase : Any = self.get_config()
UpperCAmelCase : Optional[int] = floats_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
)
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Tuple:
return XGLMConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , num_layers=self.num_hidden_layers , attention_heads=self.num_attention_heads , ffn_dim=self.ffn_dim , activation_function=self.activation_function , activation_dropout=self.activation_dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , use_cache=lowercase_ , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , return_dict=lowercase_ , )
def UpperCAmelCase_ ( self : List[str] ) -> Dict:
UpperCAmelCase : Union[str, Any] = self.prepare_config_and_inputs()
(
(
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) ,
) : Union[str, Any] = config_and_inputs
UpperCAmelCase : List[str] = {
'input_ids': input_ids,
'head_mask': head_mask,
}
return config, inputs_dict
@require_tf
class A_ ( _snake_case , _snake_case , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ : int = (TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else ()
UpperCAmelCase_ : List[str] = (TFXGLMForCausalLM,) if is_tf_available() else ()
UpperCAmelCase_ : str = (
{"""feature-extraction""": TFXGLMModel, """text-generation""": TFXGLMForCausalLM} if is_tf_available() else {}
)
UpperCAmelCase_ : List[str] = False
UpperCAmelCase_ : List[str] = False
UpperCAmelCase_ : str = False
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Any:
UpperCAmelCase : Any = TFXGLMModelTester(self )
UpperCAmelCase : int = ConfigTester(self , config_class=lowercase_ , n_embd=37 )
def UpperCAmelCase_ ( self : Any ) -> List[str]:
self.config_tester.run_common_tests()
@slow
def UpperCAmelCase_ ( self : Tuple ) -> Optional[int]:
for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase : int = TFXGLMModel.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
@unittest.skip(reason='Currently, model embeddings are going to undergo a major refactor.' )
def UpperCAmelCase_ ( self : List[Any] ) -> List[Any]:
super().test_resize_token_embeddings()
@require_tf
class A_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCAmelCase_ ( self : int , lowercase_ : str=True ) -> Any:
UpperCAmelCase : str = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' )
UpperCAmelCase : Any = tf.convert_to_tensor([[2, 268, 9_865]] , dtype=tf.intaa ) # The dog
# </s> The dog is a very friendly dog. He is very affectionate and loves to play with other
# fmt: off
UpperCAmelCase : Union[str, Any] = [2, 268, 9_865, 67, 11, 1_988, 57_252, 9_865, 5, 984, 67, 1_988, 213_838, 1_658, 53, 70_446, 33, 6_657, 278, 1_581]
# fmt: on
UpperCAmelCase : int = model.generate(lowercase_ , do_sample=lowercase_ , num_beams=1 )
if verify_outputs:
self.assertListEqual(output_ids[0].numpy().tolist() , lowercase_ )
@slow
def UpperCAmelCase_ ( self : Optional[int] ) -> int:
UpperCAmelCase : str = XGLMTokenizer.from_pretrained('facebook/xglm-564M' )
UpperCAmelCase : Tuple = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' )
tf.random.set_seed(0 )
UpperCAmelCase : Dict = tokenizer('Today is a nice day and' , return_tensors='tf' )
UpperCAmelCase : Tuple = tokenized.input_ids
# forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices)
with tf.device(':/CPU:0' ):
UpperCAmelCase : int = model.generate(lowercase_ , do_sample=lowercase_ , seed=[7, 0] )
UpperCAmelCase : Dict = tokenizer.decode(output_ids[0] , skip_special_tokens=lowercase_ )
UpperCAmelCase : Dict = (
'Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due'
)
self.assertEqual(lowercase_ , lowercase_ )
@slow
def UpperCAmelCase_ ( self : int ) -> str:
UpperCAmelCase : List[str] = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' )
UpperCAmelCase : Any = XGLMTokenizer.from_pretrained('facebook/xglm-564M' )
UpperCAmelCase : str = 'left'
# use different length sentences to test batching
UpperCAmelCase : Tuple = [
'This is an extremelly long sentence that only exists to test the ability of the model to cope with '
'left-padding, such as in batched generation. The output for the sequence below should be the same '
'regardless of whether left padding is applied or not. When',
'Hello, my dog is a little',
]
UpperCAmelCase : Union[str, Any] = tokenizer(lowercase_ , return_tensors='tf' , padding=lowercase_ )
UpperCAmelCase : Any = inputs['input_ids']
UpperCAmelCase : int = model.generate(input_ids=lowercase_ , attention_mask=inputs['attention_mask'] , max_new_tokens=12 )
UpperCAmelCase : Union[str, Any] = tokenizer(sentences[0] , return_tensors='tf' ).input_ids
UpperCAmelCase : Dict = model.generate(input_ids=lowercase_ , max_new_tokens=12 )
UpperCAmelCase : Tuple = tokenizer(sentences[1] , return_tensors='tf' ).input_ids
UpperCAmelCase : List[Any] = model.generate(input_ids=lowercase_ , max_new_tokens=12 )
UpperCAmelCase : List[str] = tokenizer.batch_decode(lowercase_ , skip_special_tokens=lowercase_ )
UpperCAmelCase : List[str] = tokenizer.decode(output_non_padded[0] , skip_special_tokens=lowercase_ )
UpperCAmelCase : Optional[int] = tokenizer.decode(output_padded[0] , skip_special_tokens=lowercase_ )
UpperCAmelCase : str = [
'This is an extremelly long sentence that only exists to test the ability of the model to cope with '
'left-padding, such as in batched generation. The output for the sequence below should be the same '
'regardless of whether left padding is applied or not. When left padding is applied, the sequence will be '
'a single',
'Hello, my dog is a little bit of a shy one, but he is very friendly',
]
self.assertListEqual(lowercase_ , lowercase_ )
self.assertListEqual(lowercase_ , [non_padded_sentence, padded_sentence] )
| 151 | 1 |
import os
import re
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = {'''vocab_file''': '''spiece.model'''}
lowerCamelCase = {
'''vocab_file''': {
'''google/bigbird-roberta-base''': '''https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model''',
'''google/bigbird-roberta-large''': (
'''https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model'''
),
'''google/bigbird-base-trivia-itc''': (
'''https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model'''
),
}
}
lowerCamelCase = {
'''google/bigbird-roberta-base''': 4096,
'''google/bigbird-roberta-large''': 4096,
'''google/bigbird-base-trivia-itc''': 4096,
}
class _a ( _UpperCamelCase):
_a : Tuple = VOCAB_FILES_NAMES
_a : Dict = PRETRAINED_VOCAB_FILES_MAP
_a : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_a : int = ['input_ids', 'attention_mask']
_a : List[int] = []
def __init__( self : int , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : List[str]="<unk>" , _SCREAMING_SNAKE_CASE : str="<s>" , _SCREAMING_SNAKE_CASE : Any="</s>" , _SCREAMING_SNAKE_CASE : Any="<pad>" , _SCREAMING_SNAKE_CASE : Tuple="[SEP]" , _SCREAMING_SNAKE_CASE : int="[MASK]" , _SCREAMING_SNAKE_CASE : List[str]="[CLS]" , _SCREAMING_SNAKE_CASE : Optional[Dict[str, Any]] = None , **_SCREAMING_SNAKE_CASE : List[str] , )-> Optional[int]:
lowerCAmelCase__ : List[Any] = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else bos_token
lowerCAmelCase__ : Dict = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else eos_token
lowerCAmelCase__ : List[Any] = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else unk_token
lowerCAmelCase__ : List[str] = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else pad_token
lowerCAmelCase__ : Dict = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else cls_token
lowerCAmelCase__ : Optional[int] = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
lowerCAmelCase__ : Optional[Any] = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else mask_token
lowerCAmelCase__ : Optional[int] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_UpperCAmelCase , eos_token=_UpperCAmelCase , unk_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , sep_token=_UpperCAmelCase , mask_token=_UpperCAmelCase , cls_token=_UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **_UpperCAmelCase , )
lowerCAmelCase__ : int = vocab_file
lowerCAmelCase__ : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_UpperCAmelCase )
@property
def UpperCAmelCase__( self : Union[str, Any] )-> Union[str, Any]:
return self.sp_model.get_piece_size()
def UpperCAmelCase__( self : str )-> List[str]:
lowerCAmelCase__ : Dict = {self.convert_ids_to_tokens(_UpperCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : List[Any] )-> int:
lowerCAmelCase__ : List[str] = self.__dict__.copy()
lowerCAmelCase__ : Union[str, Any] = None
return state
def __setstate__( self : List[Any] , _SCREAMING_SNAKE_CASE : Dict )-> Any:
lowerCAmelCase__ : List[str] = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
lowerCAmelCase__ : Optional[Any] = {}
lowerCAmelCase__ : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCAmelCase__( self : Optional[Any] , _SCREAMING_SNAKE_CASE : str )-> Tuple:
return self.sp_model.encode(_UpperCAmelCase , out_type=_UpperCAmelCase )
def UpperCAmelCase__( self : str , _SCREAMING_SNAKE_CASE : Any )-> Union[str, Any]:
return self.sp_model.piece_to_id(_UpperCAmelCase )
def UpperCAmelCase__( self : List[Any] , _SCREAMING_SNAKE_CASE : Tuple )-> int:
lowerCAmelCase__ : Tuple = self.sp_model.IdToPiece(_UpperCAmelCase )
return token
def UpperCAmelCase__( self : Tuple , _SCREAMING_SNAKE_CASE : Optional[int] )-> List[Any]:
lowerCAmelCase__ : Union[str, Any] = []
lowerCAmelCase__ : int = ''
lowerCAmelCase__ : List[Any] = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(_UpperCAmelCase ) + token
lowerCAmelCase__ : Dict = True
lowerCAmelCase__ : Tuple = []
else:
current_sub_tokens.append(_UpperCAmelCase )
lowerCAmelCase__ : List[str] = False
out_string += self.sp_model.decode(_UpperCAmelCase )
return out_string.strip()
def UpperCAmelCase__( self : List[Any] , _SCREAMING_SNAKE_CASE : List[int] , _SCREAMING_SNAKE_CASE : bool = False , _SCREAMING_SNAKE_CASE : bool = None , _SCREAMING_SNAKE_CASE : bool = True , **_SCREAMING_SNAKE_CASE : List[str] , )-> int:
lowerCAmelCase__ : Optional[Any] = kwargs.pop('''use_source_tokenizer''' , _UpperCAmelCase )
lowerCAmelCase__ : List[Any] = self.convert_ids_to_tokens(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
lowerCAmelCase__ : str = []
lowerCAmelCase__ : int = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(_UpperCAmelCase ) )
lowerCAmelCase__ : Dict = []
sub_texts.append(_UpperCAmelCase )
else:
current_sub_text.append(_UpperCAmelCase )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(_UpperCAmelCase ) )
# Mimic the behavior of the Rust tokenizer:
# No space before [MASK] and [SEP]
if spaces_between_special_tokens:
lowerCAmelCase__ : List[str] = re.sub(r''' (\[(MASK|SEP)\])''' , r'''\1''' , ''' '''.join(_UpperCAmelCase ) )
else:
lowerCAmelCase__ : str = ''.join(_UpperCAmelCase )
lowerCAmelCase__ : str = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
lowerCAmelCase__ : List[str] = self.clean_up_tokenization(_UpperCAmelCase )
return clean_text
else:
return text
def UpperCAmelCase__( self : Any , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Optional[str] = None )-> List[Any]:
if not os.path.isdir(_UpperCAmelCase ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
lowerCAmelCase__ : str = os.path.join(
_UpperCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _UpperCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_UpperCAmelCase , '''wb''' ) as fi:
lowerCAmelCase__ : Optional[int] = self.sp_model.serialized_model_proto()
fi.write(_UpperCAmelCase )
return (out_vocab_file,)
def UpperCAmelCase__( self : List[str] , _SCREAMING_SNAKE_CASE : List[int] , _SCREAMING_SNAKE_CASE : Optional[List[int]] = None )-> Optional[Any]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowerCAmelCase__ : Any = [self.cls_token_id]
lowerCAmelCase__ : str = [self.sep_token_id]
return cls + token_ids_a + sep + token_ids_a + sep
def UpperCAmelCase__( self : str , _SCREAMING_SNAKE_CASE : List[int] , _SCREAMING_SNAKE_CASE : Optional[List[int]] = None , _SCREAMING_SNAKE_CASE : bool = False )-> int:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_UpperCAmelCase , token_ids_a=_UpperCAmelCase , already_has_special_tokens=_UpperCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(_UpperCAmelCase )) + [1]
return [1] + ([0] * len(_UpperCAmelCase )) + [1] + ([0] * len(_UpperCAmelCase )) + [1]
def UpperCAmelCase__( self : Union[str, Any] , _SCREAMING_SNAKE_CASE : List[int] , _SCREAMING_SNAKE_CASE : Optional[List[int]] = None )-> Any:
lowerCAmelCase__ : Any = [self.sep_token_id]
lowerCAmelCase__ : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] | 369 |
# using dfs for finding eulerian path traversal
def lowerCamelCase_ ( _a , _a , _a , _a=None ):
"""simple docstring"""
lowerCAmelCase__ : Optional[Any] = (path or []) + [u]
for v in graph[u]:
if visited_edge[u][v] is False:
lowerCAmelCase__ , lowerCAmelCase__ : List[Any] = True, True
lowerCAmelCase__ : Any = dfs(_a , _a , _a , _a )
return path
def lowerCamelCase_ ( _a , _a ):
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = 0
lowerCAmelCase__ : str = -1
for i in range(_a ):
if i not in graph.keys():
continue
if len(graph[i] ) % 2 == 1:
odd_degree_nodes += 1
lowerCAmelCase__ : Tuple = i
if odd_degree_nodes == 0:
return 1, odd_node
if odd_degree_nodes == 2:
return 2, odd_node
return 3, odd_node
def lowerCamelCase_ ( _a , _a ):
"""simple docstring"""
lowerCAmelCase__ : Optional[Any] = [[False for _ in range(max_node + 1 )] for _ in range(max_node + 1 )]
lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = check_circuit_or_path(_a , _a )
if check == 3:
print('''graph is not Eulerian''' )
print('''no path''' )
return
lowerCAmelCase__ : Optional[int] = 1
if check == 2:
lowerCAmelCase__ : Any = odd_node
print('''graph has a Euler path''' )
if check == 1:
print('''graph has a Euler cycle''' )
lowerCAmelCase__ : Optional[int] = dfs(_a , _a , _a )
print(_a )
def lowerCamelCase_ ( ):
"""simple docstring"""
lowerCAmelCase__ : List[str] = {1: [2, 3, 4], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [4]}
lowerCAmelCase__ : Tuple = {1: [2, 3, 4, 5], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [1, 4]}
lowerCAmelCase__ : str = {1: [2, 3, 4], 2: [1, 3, 4], 3: [1, 2], 4: [1, 2, 5], 5: [4]}
lowerCAmelCase__ : List[str] = {1: [2, 3], 2: [1, 3], 3: [1, 2]}
lowerCAmelCase__ : List[Any] = {
1: [],
2: []
# all degree is zero
}
lowerCAmelCase__ : Optional[Any] = 10
check_euler(_a , _a )
check_euler(_a , _a )
check_euler(_a , _a )
check_euler(_a , _a )
check_euler(_a , _a )
if __name__ == "__main__":
main()
| 211 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"""sail/poolformer_s12""": """https://huggingface.co/sail/poolformer_s12/resolve/main/config.json""",
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
}
class __lowerCamelCase ( A__ ):
'''simple docstring'''
a_ : Union[str, Any] = """poolformer"""
def __init__( self : str , a_ : Optional[int]=3 , a_ : Union[str, Any]=16 , a_ : Union[str, Any]=16 , a_ : List[Any]=3 , a_ : Union[str, Any]=4.0 , a_ : Optional[int]=[2, 2, 6, 2] , a_ : Any=[64, 1_28, 3_20, 5_12] , a_ : Tuple=[7, 3, 3, 3] , a_ : List[Any]=[4, 2, 2, 2] , a_ : Dict=[2, 1, 1, 1] , a_ : Optional[int]=4 , a_ : List[Any]=0.0 , a_ : Optional[int]="gelu" , a_ : Optional[int]=True , a_ : int=1e-5 , a_ : List[str]=0.02 , **a_ : List[str] , ):
lowerCAmelCase_ : Tuple = num_channels
lowerCAmelCase_ : Dict = patch_size
lowerCAmelCase_ : Dict = stride
lowerCAmelCase_ : Tuple = padding
lowerCAmelCase_ : List[str] = pool_size
lowerCAmelCase_ : List[Any] = hidden_sizes
lowerCAmelCase_ : List[str] = mlp_ratio
lowerCAmelCase_ : Any = depths
lowerCAmelCase_ : Tuple = patch_sizes
lowerCAmelCase_ : Tuple = strides
lowerCAmelCase_ : Any = num_encoder_blocks
lowerCAmelCase_ : Dict = drop_path_rate
lowerCAmelCase_ : List[str] = hidden_act
lowerCAmelCase_ : int = use_layer_scale
lowerCAmelCase_ : Union[str, Any] = layer_scale_init_value
lowerCAmelCase_ : Optional[int] = initializer_range
super().__init__(**a_ )
class __lowerCamelCase ( A__ ):
'''simple docstring'''
a_ : List[str] = version.parse("""1.11""" )
@property
def lowerCamelCase ( self : Any ):
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def lowerCamelCase ( self : str ):
return 2e-3
| 241 |
"""simple docstring"""
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline
from diffusers.utils import floats_tensor, nightly, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
class __lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCamelCase ( self : Any ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def lowerCamelCase ( self : Optional[Any] ):
lowerCAmelCase_ : Optional[int] = 1
lowerCAmelCase_ : int = 3
lowerCAmelCase_ : Dict = (32, 32)
lowerCAmelCase_ : List[Any] = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(a_ )
return image
@property
def lowerCamelCase ( self : List[Any] ):
torch.manual_seed(0 )
lowerCAmelCase_ : Union[str, Any] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
return model
@property
def lowerCamelCase ( self : Tuple ):
torch.manual_seed(0 )
lowerCAmelCase_ : Dict = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
return model
@property
def lowerCamelCase ( self : List[str] ):
torch.manual_seed(0 )
lowerCAmelCase_ : Tuple = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
return CLIPTextModel(a_ )
@property
def lowerCamelCase ( self : Union[str, Any] ):
def extract(*a_ : Tuple , **a_ : Tuple ):
class __lowerCamelCase :
'''simple docstring'''
def __init__( self : Union[str, Any] ):
lowerCAmelCase_ : List[str] = torch.ones([0] )
def lowerCamelCase ( self : str , a_ : Optional[int] ):
self.pixel_values.to(a_ )
return self
return Out()
return extract
def lowerCamelCase ( self : List[str] ):
lowerCAmelCase_ : Dict = "cpu" # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase_ : List[Any] = self.dummy_cond_unet
lowerCAmelCase_ : List[Any] = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=a_ , set_alpha_to_one=a_ , )
lowerCAmelCase_ : List[Any] = self.dummy_vae
lowerCAmelCase_ : List[str] = self.dummy_text_encoder
lowerCAmelCase_ : List[Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
# make sure here that pndm scheduler skips prk
lowerCAmelCase_ : Optional[Any] = StableDiffusionPipeline(
unet=a_ , scheduler=a_ , vae=a_ , text_encoder=a_ , tokenizer=a_ , safety_checker=a_ , feature_extractor=self.dummy_extractor , )
lowerCAmelCase_ : Union[str, Any] = sd_pipe.to(a_ )
sd_pipe.set_progress_bar_config(disable=a_ )
lowerCAmelCase_ : str = "A painting of a squirrel eating a burger"
lowerCAmelCase_ : Any = torch.Generator(device=a_ ).manual_seed(0 )
lowerCAmelCase_ : Union[str, Any] = sd_pipe([prompt] , generator=a_ , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" )
lowerCAmelCase_ : str = output.images
lowerCAmelCase_ : Dict = torch.Generator(device=a_ ).manual_seed(0 )
lowerCAmelCase_ : str = sd_pipe(
[prompt] , generator=a_ , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" , return_dict=a_ , )[0]
lowerCAmelCase_ : str = image[0, -3:, -3:, -1]
lowerCAmelCase_ : List[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase_ : Any = np.array([0.5756, 0.6118, 0.5005, 0.5041, 0.5471, 0.4726, 0.4976, 0.4865, 0.4864] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def lowerCamelCase ( self : List[Any] ):
lowerCAmelCase_ : List[str] = "cpu" # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase_ : Union[str, Any] = self.dummy_cond_unet
lowerCAmelCase_ : Any = PNDMScheduler(skip_prk_steps=a_ )
lowerCAmelCase_ : List[Any] = self.dummy_vae
lowerCAmelCase_ : List[str] = self.dummy_text_encoder
lowerCAmelCase_ : Optional[Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
# make sure here that pndm scheduler skips prk
lowerCAmelCase_ : List[str] = StableDiffusionPipeline(
unet=a_ , scheduler=a_ , vae=a_ , text_encoder=a_ , tokenizer=a_ , safety_checker=a_ , feature_extractor=self.dummy_extractor , )
lowerCAmelCase_ : Optional[Any] = sd_pipe.to(a_ )
sd_pipe.set_progress_bar_config(disable=a_ )
lowerCAmelCase_ : Optional[Any] = "A painting of a squirrel eating a burger"
lowerCAmelCase_ : List[Any] = torch.Generator(device=a_ ).manual_seed(0 )
lowerCAmelCase_ : Any = sd_pipe([prompt] , generator=a_ , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" )
lowerCAmelCase_ : Union[str, Any] = output.images
lowerCAmelCase_ : List[str] = torch.Generator(device=a_ ).manual_seed(0 )
lowerCAmelCase_ : Optional[int] = sd_pipe(
[prompt] , generator=a_ , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" , return_dict=a_ , )[0]
lowerCAmelCase_ : Dict = image[0, -3:, -3:, -1]
lowerCAmelCase_ : Any = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase_ : str = np.array([0.5125, 0.5716, 0.4828, 0.5060, 0.5650, 0.4768, 0.5185, 0.4895, 0.4993] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def lowerCamelCase ( self : Tuple ):
lowerCAmelCase_ : Tuple = StableDiffusionPipeline.from_pretrained(
"hf-internal-testing/tiny-stable-diffusion-lms-pipe" , safety_checker=a_ )
assert isinstance(a_ , a_ )
assert isinstance(pipe.scheduler , a_ )
assert pipe.safety_checker is None
lowerCAmelCase_ : str = pipe("example prompt" , num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(a_ )
lowerCAmelCase_ : List[str] = StableDiffusionPipeline.from_pretrained(a_ )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
lowerCAmelCase_ : Any = pipe("example prompt" , num_inference_steps=2 ).images[0]
assert image is not None
@unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" )
def lowerCamelCase ( self : Tuple ):
lowerCAmelCase_ : str = self.dummy_cond_unet
lowerCAmelCase_ : str = PNDMScheduler(skip_prk_steps=a_ )
lowerCAmelCase_ : Tuple = self.dummy_vae
lowerCAmelCase_ : Dict = self.dummy_text_encoder
lowerCAmelCase_ : Tuple = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
# put models in fp16
lowerCAmelCase_ : int = unet.half()
lowerCAmelCase_ : Dict = vae.half()
lowerCAmelCase_ : List[Any] = bert.half()
# make sure here that pndm scheduler skips prk
lowerCAmelCase_ : Optional[int] = StableDiffusionPipeline(
unet=a_ , scheduler=a_ , vae=a_ , text_encoder=a_ , tokenizer=a_ , safety_checker=a_ , feature_extractor=self.dummy_extractor , )
lowerCAmelCase_ : Optional[Any] = sd_pipe.to(a_ )
sd_pipe.set_progress_bar_config(disable=a_ )
lowerCAmelCase_ : List[str] = "A painting of a squirrel eating a burger"
lowerCAmelCase_ : Optional[Any] = sd_pipe([prompt] , num_inference_steps=2 , output_type="np" ).images
assert image.shape == (1, 64, 64, 3)
@nightly
@require_torch_gpu
class __lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCamelCase ( self : Optional[int] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase ( self : List[str] ):
lowerCAmelCase_ : Union[str, Any] = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5" , safety_checker=a_ )
lowerCAmelCase_ : Any = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
lowerCAmelCase_ : str = sd_pipe.to(a_ )
sd_pipe.set_progress_bar_config(disable=a_ )
lowerCAmelCase_ : List[str] = (
"portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle"
" coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with"
" anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and"
" children from bahnhof zoo, detailed "
)
lowerCAmelCase_ : Optional[int] = 40_03_66_03_46
lowerCAmelCase_ : List[Any] = 7
# without safety guidance (sld_guidance_scale = 0)
lowerCAmelCase_ : Union[str, Any] = torch.manual_seed(a_ )
lowerCAmelCase_ : Union[str, Any] = sd_pipe(
[prompt] , generator=a_ , guidance_scale=a_ , num_inference_steps=50 , output_type="np" , width=5_12 , height=5_12 , sld_guidance_scale=0 , )
lowerCAmelCase_ : Union[str, Any] = output.images
lowerCAmelCase_ : Optional[Any] = image[0, -3:, -3:, -1]
lowerCAmelCase_ : Union[str, Any] = [0.2278, 0.2231, 0.2249, 0.2333, 0.2303, 0.1885, 0.2273, 0.2144, 0.2176]
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
# without safety guidance (strong configuration)
lowerCAmelCase_ : List[str] = torch.manual_seed(a_ )
lowerCAmelCase_ : Any = sd_pipe(
[prompt] , generator=a_ , guidance_scale=a_ , num_inference_steps=50 , output_type="np" , width=5_12 , height=5_12 , sld_guidance_scale=20_00 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
lowerCAmelCase_ : Optional[Any] = output.images
lowerCAmelCase_ : Optional[int] = image[0, -3:, -3:, -1]
lowerCAmelCase_ : Optional[Any] = [0.2383, 0.2276, 0.236, 0.2192, 0.2186, 0.2053, 0.1971, 0.1901, 0.1719]
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def lowerCamelCase ( self : str ):
lowerCAmelCase_ : Any = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5" , safety_checker=a_ )
lowerCAmelCase_ : Union[str, Any] = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
lowerCAmelCase_ : Optional[Any] = sd_pipe.to(a_ )
sd_pipe.set_progress_bar_config(disable=a_ )
lowerCAmelCase_ : Union[str, Any] = "padme amidala taking a bath artwork, safe for work, no nudity"
lowerCAmelCase_ : Union[str, Any] = 27_34_97_17_55
lowerCAmelCase_ : Union[str, Any] = 7
lowerCAmelCase_ : str = torch.manual_seed(a_ )
lowerCAmelCase_ : Dict = sd_pipe(
[prompt] , generator=a_ , guidance_scale=a_ , num_inference_steps=50 , output_type="np" , width=5_12 , height=5_12 , sld_guidance_scale=0 , )
lowerCAmelCase_ : Any = output.images
lowerCAmelCase_ : int = image[0, -3:, -3:, -1]
lowerCAmelCase_ : int = [0.3502, 0.3622, 0.3396, 0.3642, 0.3478, 0.3318, 0.35, 0.3348, 0.3297]
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
lowerCAmelCase_ : Optional[int] = torch.manual_seed(a_ )
lowerCAmelCase_ : Union[str, Any] = sd_pipe(
[prompt] , generator=a_ , guidance_scale=a_ , num_inference_steps=50 , output_type="np" , width=5_12 , height=5_12 , sld_guidance_scale=20_00 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
lowerCAmelCase_ : Any = output.images
lowerCAmelCase_ : Optional[int] = image[0, -3:, -3:, -1]
lowerCAmelCase_ : Tuple = [0.5531, 0.5206, 0.4895, 0.5156, 0.5182, 0.4751, 0.4802, 0.4803, 0.4443]
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def lowerCamelCase ( self : Union[str, Any] ):
lowerCAmelCase_ : Dict = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5" )
lowerCAmelCase_ : Any = sd_pipe.to(a_ )
sd_pipe.set_progress_bar_config(disable=a_ )
lowerCAmelCase_ : Tuple = (
"the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c."
" leyendecker"
)
lowerCAmelCase_ : List[Any] = 10_44_35_52_34
lowerCAmelCase_ : Dict = 12
lowerCAmelCase_ : int = torch.manual_seed(a_ )
lowerCAmelCase_ : List[str] = sd_pipe(
[prompt] , generator=a_ , guidance_scale=a_ , num_inference_steps=50 , output_type="np" , width=5_12 , height=5_12 , sld_guidance_scale=0 , )
lowerCAmelCase_ : int = output.images
lowerCAmelCase_ : int = image[0, -3:, -3:, -1]
lowerCAmelCase_ : int = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] )
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-7
lowerCAmelCase_ : int = torch.manual_seed(a_ )
lowerCAmelCase_ : Any = sd_pipe(
[prompt] , generator=a_ , guidance_scale=a_ , num_inference_steps=50 , output_type="np" , width=5_12 , height=5_12 , sld_guidance_scale=20_00 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
lowerCAmelCase_ : Optional[Any] = output.images
lowerCAmelCase_ : Optional[Any] = image[0, -3:, -3:, -1]
lowerCAmelCase_ : str = np.array([0.5818, 0.6285, 0.6835, 0.6019, 0.625, 0.6754, 0.6096, 0.6334, 0.6561] )
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 241 | 1 |
'''simple docstring'''
# Lint as: python3
import dataclasses
import re
from dataclasses import dataclass
from functools import total_ordering
from typing import Optional, Union
lowerCAmelCase__ : Any = re.compile(R"^(?P<major>\d+)" R"\.(?P<minor>\d+)" R"\.(?P<patch>\d+)$")
@total_ordering
@dataclass
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
def lowerCamelCase_ ( self : Optional[Any] ):
"""simple docstring"""
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = _str_to_version_tuple(self.version_str )
def __repr__( self : List[Any] ):
"""simple docstring"""
return f"{self.tuple[0]}.{self.tuple[1]}.{self.tuple[2]}"
@property
def lowerCamelCase_ ( self : str ):
"""simple docstring"""
return self.major, self.minor, self.patch
def lowerCamelCase_ ( self : int , UpperCAmelCase_ : Any ):
"""simple docstring"""
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
return Version(UpperCAmelCase_ )
elif isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
return other
raise TypeError(f"{other} (type {type(UpperCAmelCase_ )}) cannot be compared to version." )
def __eq__( self : List[str] , UpperCAmelCase_ : str ):
"""simple docstring"""
try:
__UpperCAmelCase : int = self._validate_operand(UpperCAmelCase_ )
except (TypeError, ValueError):
return False
else:
return self.tuple == other.tuple
def __lt__( self : int , UpperCAmelCase_ : str ):
"""simple docstring"""
__UpperCAmelCase : int = self._validate_operand(UpperCAmelCase_ )
return self.tuple < other.tuple
def __hash__( self : Optional[int] ):
"""simple docstring"""
return hash(_version_tuple_to_str(self.tuple ) )
@classmethod
def lowerCamelCase_ ( cls : List[Any] , UpperCAmelCase_ : str ):
"""simple docstring"""
__UpperCAmelCase : Optional[int] = {f.name for f in dataclasses.fields(cls )}
return cls(**{k: v for k, v in dic.items() if k in field_names} )
def lowerCamelCase_ ( self : int ):
"""simple docstring"""
return self.version_str
def __UpperCamelCase ( _UpperCAmelCase ):
__UpperCAmelCase : str = _VERSION_REG.match(_UpperCAmelCase )
if not res:
raise ValueError(F"Invalid version '{version_str}'. Format should be x.y.z with {{x,y,z}} being digits." )
return tuple(int(_UpperCAmelCase ) for v in [res.group("major" ), res.group("minor" ), res.group("patch" )] )
def __UpperCamelCase ( _UpperCAmelCase ):
return ".".join(str(_UpperCAmelCase ) for v in version_tuple )
| 37 |
'''simple docstring'''
import unittest
from transformers import PegasusTokenizer, PegasusTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
lowerCAmelCase__ : Any = get_tests_dir("fixtures/test_sentencepiece_no_bos.model")
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE__ ( snake_case__ ,unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = PegasusTokenizer
SCREAMING_SNAKE_CASE = PegasusTokenizerFast
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = True
def lowerCamelCase_ ( self : int ):
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
__UpperCAmelCase : Tuple = PegasusTokenizer(UpperCAmelCase_ )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
return PegasusTokenizer.from_pretrained("google/pegasus-large" )
def lowerCamelCase_ ( self : List[Any] , **UpperCAmelCase_ : List[str] ):
"""simple docstring"""
return PegasusTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase_ )
def lowerCamelCase_ ( self : str , UpperCAmelCase_ : int ):
"""simple docstring"""
return ("This is a test", "This is a test")
def lowerCamelCase_ ( self : Optional[Any] ):
"""simple docstring"""
__UpperCAmelCase : List[str] = "</s>"
__UpperCAmelCase : Union[str, Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCAmelCase_ ) , UpperCAmelCase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCAmelCase_ ) , UpperCAmelCase_ )
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
__UpperCAmelCase : int = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<pad>" )
self.assertEqual(vocab_keys[1] , "</s>" )
self.assertEqual(vocab_keys[-1] , "v" )
self.assertEqual(len(UpperCAmelCase_ ) , 1_103 )
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1_103 )
def lowerCamelCase_ ( self : str ):
"""simple docstring"""
__UpperCAmelCase : str = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
__UpperCAmelCase : int = self.tokenizer_class.from_pretrained(self.tmpdirname )
__UpperCAmelCase : Tuple = (
"Let's see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important"
" </s> <pad> <pad> <pad>"
)
__UpperCAmelCase : List[str] = rust_tokenizer([raw_input_str] , return_tensors=UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ ).input_ids[0]
__UpperCAmelCase : int = py_tokenizer([raw_input_str] , return_tensors=UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ ).input_ids[0]
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
__UpperCAmelCase : Any = self._large_tokenizer
# <mask_1> masks whole sentence while <mask_2> masks single word
__UpperCAmelCase : Tuple = "<mask_1> To ensure a <mask_2> flow of bank resolutions."
__UpperCAmelCase : Optional[Any] = [2, 413, 615, 114, 3, 1_971, 113, 1_679, 10_710, 107, 1]
__UpperCAmelCase : Optional[Any] = tokenizer([raw_input_str] , return_tensors=UpperCAmelCase_ ).input_ids[0]
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
__UpperCAmelCase : Dict = self._large_tokenizer
# The tracebacks for the following asserts are **better** without messages or self.assertEqual
assert tokenizer.vocab_size == 96_103
assert tokenizer.pad_token_id == 0
assert tokenizer.eos_token_id == 1
assert tokenizer.offset == 103
assert tokenizer.unk_token_id == tokenizer.offset + 2 == 105
assert tokenizer.unk_token == "<unk>"
assert tokenizer.model_max_length == 1_024
__UpperCAmelCase : Tuple = "To ensure a smooth flow of bank resolutions."
__UpperCAmelCase : str = [413, 615, 114, 2_291, 1_971, 113, 1_679, 10_710, 107, 1]
__UpperCAmelCase : Union[str, Any] = tokenizer([raw_input_str] , return_tensors=UpperCAmelCase_ ).input_ids[0]
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"]
@require_torch
def lowerCamelCase_ ( self : str ):
"""simple docstring"""
__UpperCAmelCase : List[Any] = ["This is going to be way too long." * 150, "short example"]
__UpperCAmelCase : Optional[int] = ["not super long but more than 5 tokens", "tiny"]
__UpperCAmelCase : str = self._large_tokenizer(UpperCAmelCase_ , padding=UpperCAmelCase_ , truncation=UpperCAmelCase_ , return_tensors="pt" )
__UpperCAmelCase : Union[str, Any] = self._large_tokenizer(
text_target=UpperCAmelCase_ , max_length=5 , padding=UpperCAmelCase_ , truncation=UpperCAmelCase_ , return_tensors="pt" )
assert batch.input_ids.shape == (2, 1_024)
assert batch.attention_mask.shape == (2, 1_024)
assert targets["input_ids"].shape == (2, 5)
assert len(UpperCAmelCase_ ) == 2 # input_ids, attention_mask.
@slow
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
# fmt: off
__UpperCAmelCase : Tuple = {"input_ids": [[38_979, 143, 18_485, 606, 130, 26_669, 87_686, 121, 54_189, 1_129, 111, 26_669, 87_686, 121, 9_114, 14_787, 121, 13_249, 158, 592, 956, 121, 14_621, 31_576, 143, 62_613, 108, 9_688, 930, 43_430, 11_562, 62_613, 304, 108, 11_443, 897, 108, 9_314, 17_415, 63_399, 108, 11_443, 7_614, 18_316, 118, 4_284, 7_148, 12_430, 143, 1_400, 25_703, 158, 111, 4_284, 7_148, 11_772, 143, 21_297, 1_064, 158, 122, 204, 3_506, 1_754, 1_133, 14_787, 1_581, 115, 33_224, 4_482, 111, 1_355, 110, 29_173, 317, 50_833, 108, 20_147, 94_665, 111, 77_198, 107, 1], [110, 62_613, 117, 638, 112, 1_133, 121, 20_098, 1_355, 79_050, 13_872, 135, 1_596, 53_541, 1_352, 141, 13_039, 5_542, 124, 302, 518, 111, 268, 2_956, 115, 149, 4_427, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [139, 1_235, 2_799, 18_289, 17_780, 204, 109, 9_474, 1_296, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCAmelCase_ , model_name="google/bigbird-pegasus-large-arxiv" , revision="ba85d0851d708441f91440d509690f1ab6353415" , )
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE__ ( snake_case__ ,unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = PegasusTokenizer
SCREAMING_SNAKE_CASE = PegasusTokenizerFast
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = True
def lowerCamelCase_ ( self : Optional[Any] ):
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
__UpperCAmelCase : List[str] = PegasusTokenizer(UpperCAmelCase_ , offset=0 , mask_token_sent=UpperCAmelCase_ , mask_token="[MASK]" )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def lowerCamelCase_ ( self : List[Any] ):
"""simple docstring"""
return PegasusTokenizer.from_pretrained("google/bigbird-pegasus-large-arxiv" )
def lowerCamelCase_ ( self : Union[str, Any] , **UpperCAmelCase_ : int ):
"""simple docstring"""
return PegasusTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase_ )
def lowerCamelCase_ ( self : str , UpperCAmelCase_ : Union[str, Any] ):
"""simple docstring"""
return ("This is a test", "This is a test")
def lowerCamelCase_ ( self : Optional[Any] ):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
__UpperCAmelCase : Optional[Any] = self.tokenizer_class.from_pretrained(self.tmpdirname )
__UpperCAmelCase : List[str] = (
"Let's see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>"
" <pad> <pad> <pad>"
)
__UpperCAmelCase : str = rust_tokenizer([raw_input_str] , return_tensors=UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ ).input_ids[0]
__UpperCAmelCase : int = py_tokenizer([raw_input_str] , return_tensors=UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ ).input_ids[0]
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
@require_torch
def lowerCamelCase_ ( self : List[Any] ):
"""simple docstring"""
__UpperCAmelCase : Any = ["This is going to be way too long." * 1_000, "short example"]
__UpperCAmelCase : List[Any] = ["not super long but more than 5 tokens", "tiny"]
__UpperCAmelCase : int = self._large_tokenizer(UpperCAmelCase_ , padding=UpperCAmelCase_ , truncation=UpperCAmelCase_ , return_tensors="pt" )
__UpperCAmelCase : List[Any] = self._large_tokenizer(
text_target=UpperCAmelCase_ , max_length=5 , padding=UpperCAmelCase_ , truncation=UpperCAmelCase_ , return_tensors="pt" )
assert batch.input_ids.shape == (2, 4_096)
assert batch.attention_mask.shape == (2, 4_096)
assert targets["input_ids"].shape == (2, 5)
assert len(UpperCAmelCase_ ) == 2 # input_ids, attention_mask.
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
__UpperCAmelCase : List[Any] = (
"This is an example string that is used to test the original TF implementation against the HF"
" implementation"
)
__UpperCAmelCase : int = self._large_tokenizer(UpperCAmelCase_ ).input_ids
self.assertListEqual(
UpperCAmelCase_ , [182, 117, 142, 587, 4_211, 120, 117, 263, 112, 804, 109, 856, 25_016, 3_137, 464, 109, 26_955, 3_137, 1] , )
| 37 | 1 |
'''simple docstring'''
from __future__ import annotations
def UpperCamelCase ( _lowerCamelCase : list[float] , _lowerCamelCase : list[float] ):
A__ = sorted(numsa + numsa )
A__, A__ = divmod(len(_lowerCamelCase ) , 2 )
if mod == 1:
return all_numbers[div]
else:
return (all_numbers[div] + all_numbers[div - 1]) / 2
if __name__ == "__main__":
import doctest
doctest.testmod()
__lowerCAmelCase : Optional[int] =[float(x) for x in input("Enter the elements of first array: ").split()]
__lowerCAmelCase : List[str] =[float(x) for x in input("Enter the elements of second array: ").split()]
print(f"""The median of two arrays is: {median_of_two_arrays(array_a, array_a)}""")
| 237 |
'''simple docstring'''
from .integrations import (
is_optuna_available,
is_ray_available,
is_sigopt_available,
is_wandb_available,
run_hp_search_optuna,
run_hp_search_ray,
run_hp_search_sigopt,
run_hp_search_wandb,
)
from .trainer_utils import (
HPSearchBackend,
default_hp_space_optuna,
default_hp_space_ray,
default_hp_space_sigopt,
default_hp_space_wandb,
)
from .utils import logging
__lowerCAmelCase : Optional[Any] =logging.get_logger(__name__)
class UpperCAmelCase :
__lowercase = 42
__lowercase = None
@staticmethod
def UpperCAmelCase_ ( )-> Dict:
raise NotImplementedError
def UpperCAmelCase_ ( self :List[Any] , lowercase_ :str , lowercase_ :int , lowercase_ :str , **lowercase_ :Dict )-> str:
raise NotImplementedError
def UpperCAmelCase_ ( self :Optional[int] , lowercase_ :int )-> Any:
raise NotImplementedError
def UpperCAmelCase_ ( self :List[Any] )-> Optional[Any]:
if not self.is_available():
raise RuntimeError(
F"You picked the {self.name} backend, but it is not installed. Run {self.pip_install()}." )
@classmethod
def UpperCAmelCase_ ( cls :int )-> Any:
return F"`pip install {cls.pip_package or cls.name}`"
class UpperCAmelCase ( UpperCamelCase__ ):
__lowercase = """optuna"""
@staticmethod
def UpperCAmelCase_ ( )-> int:
return is_optuna_available()
def UpperCAmelCase_ ( self :List[str] , lowercase_ :str , lowercase_ :int , lowercase_ :str , **lowercase_ :List[Any] )-> Tuple:
return run_hp_search_optuna(lowercase_ , lowercase_ , lowercase_ , **lowercase_ )
def UpperCAmelCase_ ( self :str , lowercase_ :Optional[int] )-> Optional[Any]:
return default_hp_space_optuna(lowercase_ )
class UpperCAmelCase ( UpperCamelCase__ ):
__lowercase = """ray"""
__lowercase = """'ray[tune]'"""
@staticmethod
def UpperCAmelCase_ ( )-> str:
return is_ray_available()
def UpperCAmelCase_ ( self :int , lowercase_ :Dict , lowercase_ :int , lowercase_ :str , **lowercase_ :List[str] )-> int:
return run_hp_search_ray(lowercase_ , lowercase_ , lowercase_ , **lowercase_ )
def UpperCAmelCase_ ( self :Optional[int] , lowercase_ :Dict )-> int:
return default_hp_space_ray(lowercase_ )
class UpperCAmelCase ( UpperCamelCase__ ):
__lowercase = """sigopt"""
@staticmethod
def UpperCAmelCase_ ( )-> Union[str, Any]:
return is_sigopt_available()
def UpperCAmelCase_ ( self :Any , lowercase_ :Union[str, Any] , lowercase_ :int , lowercase_ :str , **lowercase_ :Dict )-> Dict:
return run_hp_search_sigopt(lowercase_ , lowercase_ , lowercase_ , **lowercase_ )
def UpperCAmelCase_ ( self :Optional[int] , lowercase_ :Optional[int] )-> List[str]:
return default_hp_space_sigopt(lowercase_ )
class UpperCAmelCase ( UpperCamelCase__ ):
__lowercase = """wandb"""
@staticmethod
def UpperCAmelCase_ ( )-> List[str]:
return is_wandb_available()
def UpperCAmelCase_ ( self :Dict , lowercase_ :Optional[Any] , lowercase_ :int , lowercase_ :str , **lowercase_ :Dict )-> List[str]:
return run_hp_search_wandb(lowercase_ , lowercase_ , lowercase_ , **lowercase_ )
def UpperCAmelCase_ ( self :Union[str, Any] , lowercase_ :str )-> Dict:
return default_hp_space_wandb(lowercase_ )
__lowerCAmelCase : int ={
HPSearchBackend(backend.name): backend for backend in [OptunaBackend, RayTuneBackend, SigOptBackend, WandbBackend]
}
def UpperCamelCase ( ):
A__ = [backend for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() if backend.is_available()]
if len(_lowerCamelCase ) > 0:
A__ = available_backends[0].name
if len(_lowerCamelCase ) > 1:
logger.info(
F"{len(_lowerCamelCase )} hyperparameter search backends available. Using {name} as the default." )
return name
raise RuntimeError(
"No hyperparameter search backend available.\n"
+ "\n".join(
F" - To install {backend.name} run {backend.pip_install()}"
for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() ) )
| 237 | 1 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import add_start_docstrings
__UpperCAmelCase = r'''
[`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and
can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.
Args:
title_sep (`str`, *optional*, defaults to `" / "`):
Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`].
doc_sep (`str`, *optional*, defaults to `" // "`):
Separator inserted between the text of the retrieved document and the original input when calling
[`RagRetriever`].
n_docs (`int`, *optional*, defaults to 5):
Number of documents to retrieve.
max_combined_length (`int`, *optional*, defaults to 300):
Max length of contextualized input returned by [`~RagRetriever.__call__`].
retrieval_vector_size (`int`, *optional*, defaults to 768):
Dimensionality of the document embeddings indexed by [`RagRetriever`].
retrieval_batch_size (`int`, *optional*, defaults to 8):
Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated
[`RagRetriever`].
dataset (`str`, *optional*, defaults to `"wiki_dpr"`):
A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids
using `datasets.list_datasets()`).
dataset_split (`str`, *optional*, defaults to `"train"`)
Which split of the `dataset` to load.
index_name (`str`, *optional*, defaults to `"compressed"`)
The index name of the index associated with the `dataset`. One can choose between `"legacy"`, `"exact"` and
`"compressed"`.
index_path (`str`, *optional*)
The path to the serialized faiss index on disk.
passages_path (`str`, *optional*):
A path to text passages compatible with the faiss index. Required if using
[`~models.rag.retrieval_rag.LegacyIndex`]
use_dummy_dataset (`bool`, *optional*, defaults to `False`)
Whether to load a "dummy" variant of the dataset specified by `dataset`.
label_smoothing (`float`, *optional*, defaults to 0.0):
Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing
in the loss calculation. If set to 0, no label smoothing is performed.
do_marginalize (`bool`, *optional*, defaults to `False`):
If `True`, the logits are marginalized over all documents by making use of
`torch.nn.functional.log_softmax`.
reduce_loss (`bool`, *optional*, defaults to `False`):
Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation.
do_deduplication (`bool`, *optional*, defaults to `True`):
Whether or not to deduplicate the generations from different context documents for a given input. Has to be
set to `False` if used while training with distributed backend.
exclude_bos_score (`bool`, *optional*, defaults to `False`):
Whether or not to disregard the BOS token when computing the loss.
output_retrieved(`bool`, *optional*, defaults to `False`):
If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and
`context_attention_mask` are returned. See returned tensors for more detail.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models).
forced_eos_token_id (`int`, *optional*):
The id of the token to force as the last generated token when `max_length` is reached. Usually set to
`eos_token_id`.
'''
@add_start_docstrings(a__ )
class lowerCAmelCase_ ( a__ ):
UpperCAmelCase__ : int = "rag"
UpperCAmelCase__ : List[Any] = True
def __init__( self, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=" / ", SCREAMING_SNAKE_CASE_=" // ", SCREAMING_SNAKE_CASE_=5, SCREAMING_SNAKE_CASE_=300, SCREAMING_SNAKE_CASE_=768, SCREAMING_SNAKE_CASE_=8, SCREAMING_SNAKE_CASE_="wiki_dpr", SCREAMING_SNAKE_CASE_="train", SCREAMING_SNAKE_CASE_="compressed", SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=False, SCREAMING_SNAKE_CASE_=False, SCREAMING_SNAKE_CASE_=0.0, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=False, SCREAMING_SNAKE_CASE_=False, SCREAMING_SNAKE_CASE_=False, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=None, **SCREAMING_SNAKE_CASE_, ) -> Union[str, Any]:
super().__init__(
bos_token_id=SCREAMING_SNAKE_CASE_, pad_token_id=SCREAMING_SNAKE_CASE_, eos_token_id=SCREAMING_SNAKE_CASE_, decoder_start_token_id=SCREAMING_SNAKE_CASE_, forced_eos_token_id=SCREAMING_SNAKE_CASE_, is_encoder_decoder=SCREAMING_SNAKE_CASE_, prefix=SCREAMING_SNAKE_CASE_, vocab_size=SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_, )
assert (
"question_encoder" in kwargs and "generator" in kwargs
), "Config has to be initialized with question_encoder and generator config"
UpperCamelCase : Union[str, Any] = kwargs.pop('question_encoder' )
UpperCamelCase : str = question_encoder_config.pop('model_type' )
UpperCamelCase : Optional[Any] = kwargs.pop('generator' )
UpperCamelCase : List[Any] = decoder_config.pop('model_type' )
from ..auto.configuration_auto import AutoConfig
UpperCamelCase : str = AutoConfig.for_model(SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Tuple = AutoConfig.for_model(SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[Any] = reduce_loss
UpperCamelCase : Optional[Any] = label_smoothing
UpperCamelCase : int = exclude_bos_score
UpperCamelCase : Tuple = do_marginalize
UpperCamelCase : Optional[Any] = title_sep
UpperCamelCase : Any = doc_sep
UpperCamelCase : List[str] = n_docs
UpperCamelCase : Optional[int] = max_combined_length
UpperCamelCase : List[Any] = dataset
UpperCamelCase : Union[str, Any] = dataset_split
UpperCamelCase : str = index_name
UpperCamelCase : List[str] = retrieval_vector_size
UpperCamelCase : Any = retrieval_batch_size
UpperCamelCase : Optional[Any] = passages_path
UpperCamelCase : Dict = index_path
UpperCamelCase : Optional[Any] = use_dummy_dataset
UpperCamelCase : Any = output_retrieved
UpperCamelCase : Optional[int] = do_deduplication
UpperCamelCase : List[str] = use_cache
if self.forced_eos_token_id is None:
UpperCamelCase : int = getattr(self.generator, 'forced_eos_token_id', SCREAMING_SNAKE_CASE_ )
@classmethod
def snake_case_ ( cls, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ ) -> PretrainedConfig:
return cls(question_encoder=question_encoder_config.to_dict(), generator=generator_config.to_dict(), **SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ) -> List[Any]:
UpperCamelCase : str = copy.deepcopy(self.__dict__ )
UpperCamelCase : str = self.question_encoder.to_dict()
UpperCamelCase : Union[str, Any] = self.generator.to_dict()
UpperCamelCase : Optional[Any] = self.__class__.model_type
return output
| 103 |
import unittest
from transformers import JukeboxTokenizer
from transformers.testing_utils import require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
UpperCAmelCase__ : Union[str, Any] = JukeboxTokenizer
UpperCAmelCase__ : Optional[int] = {
"artist": "Zac Brown Band",
"genres": "Country",
"lyrics": "I met a traveller from an antique land,\n Who said \"Two vast and trunkless legs of stone\n Stand in the desert. . . . Near them, on the sand,\n Half sunk a shattered visage lies, whose frown,\n And wrinkled lip, and sneer of cold command,\n Tell that its sculptor well those passions read\n Which yet survive, stamped on these lifeless things,\n The hand that mocked them, and the heart that fed;\n And on the pedestal, these words appear:\n My name is Ozymandias, King of Kings;\n Look on my Works, ye Mighty, and despair!\n Nothing beside remains. Round the decay\n Of that colossal Wreck, boundless and bare\n The lone and level sands stretch far away\n ",
}
@require_torch
def snake_case_ ( self ) -> Optional[Any]:
import torch
UpperCamelCase : Tuple = JukeboxTokenizer.from_pretrained('openai/jukebox-1b-lyrics' )
UpperCamelCase : List[str] = tokenizer(**self.metas )['input_ids']
# fmt: off
UpperCamelCase : Dict = [
torch.tensor([[
0, 0, 0, 7169, 507, 9, 76, 39, 31, 46, 76, 27,
76, 46, 44, 27, 48, 31, 38, 38, 31, 44, 76, 32,
44, 41, 39, 76, 27, 40, 76, 27, 40, 46, 35, 43,
47, 31, 76, 38, 27, 40, 30, 64, 78, 76, 76, 76,
76, 76, 76, 76, 76, 23, 34, 41, 76, 45, 27, 35,
30, 76, 71, 20, 49, 41, 76, 48, 27, 45, 46, 76,
27, 40, 30, 76, 46, 44, 47, 40, 37, 38, 31, 45,
45, 76, 38, 31, 33, 45, 76, 41, 32, 76, 45, 46,
41, 40, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
19, 46, 27, 40, 30, 76, 35, 40, 76, 46, 34, 31,
76, 30, 31, 45, 31, 44, 46, 63, 76, 63, 76, 63,
76, 63, 76, 14, 31, 27, 44, 76, 46, 34, 31, 39,
64, 76, 41, 40, 76, 46, 34, 31, 76, 45, 27, 40,
30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 8,
27, 38, 32, 76, 45, 47, 40, 37, 76, 27, 76, 45,
34, 27, 46, 46, 31, 44, 31, 30, 76, 48, 35, 45,
27, 33, 31, 76, 38, 35, 31, 45, 64, 76, 49, 34,
41, 45, 31, 76, 32, 44, 41, 49, 40, 64, 78, 76,
76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 49,
44, 35, 40, 37, 38, 31, 30, 76, 38, 35, 42, 64,
76, 27, 40, 30, 76, 45, 40, 31, 31, 44, 76, 41,
32, 76, 29, 41, 38, 30, 76, 29, 41, 39, 39, 27,
40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 31, 38, 38, 76, 46, 34, 27, 46, 76, 35, 46,
45, 76, 45, 29, 47, 38, 42, 46, 41, 44, 76, 49,
31, 38, 38, 76, 46, 34, 41, 45, 31, 76, 42, 27,
45, 45, 35, 41, 40, 45, 76, 44, 31, 27, 30, 78,
76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 35, 29,
34, 76, 51, 31, 46, 76, 45, 47, 44, 48, 35, 48,
31, 64, 76, 45, 46, 27, 39, 42, 31, 30, 76, 41,
40, 76, 46, 34, 31, 45, 31, 76, 38, 35, 32, 31,
38, 31, 45, 45, 76, 46, 34, 35, 40, 33, 45, 64,
78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31,
76, 34, 27, 40, 30, 76, 46, 34, 27, 46, 76, 39,
41, 29, 37, 31, 30, 76, 46, 34, 31, 39, 64, 76,
27, 40, 30, 76, 46, 34, 31, 76, 34, 31, 27, 44,
46, 76, 46, 34, 27, 46, 76, 32, 31, 30, 66, 78,
76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76,
41, 40, 76, 46, 34, 31, 76, 42, 31, 30, 31, 45,
46, 27, 38, 64, 76, 46, 34, 31, 45, 31, 76, 49,
41, 44, 30, 45, 76, 27, 42, 42, 31, 27, 44, 65,
78, 76, 76, 76, 76, 76, 76, 76, 76, 13, 51, 76,
40, 27, 39, 31, 76, 35, 45, 76, 15, 52, 51, 39,
27, 40, 30, 35, 27, 45, 64, 76, 11, 35, 40, 33,
76, 41, 32, 76, 11, 35, 40, 33, 45, 66, 78, 76,
76, 76, 76, 76, 76, 76, 76, 12, 41, 41, 37, 76,
41, 40, 76, 39, 51, 76, 23, 41, 44, 37, 45, 64,
76, 51, 31, 76, 13, 35, 33, 34, 46, 51, 64, 76,
27, 40, 30, 76, 30, 31, 45, 42, 27, 35, 44, 67,
78, 76, 76, 76, 76, 76, 76, 76, 76, 14, 41, 46,
34, 35, 40, 33, 76, 28, 31, 45, 35, 30, 31, 76,
44, 31, 39, 27, 35, 40, 45, 63, 76, 18, 41, 47,
40, 30, 76, 46, 34, 31, 76, 30, 31, 29, 27, 51,
78, 76, 76, 76, 76, 76, 76, 76, 76, 15, 32, 76,
46, 34, 27, 46, 76, 29, 41, 38, 41, 45, 45, 27,
38, 76, 23, 44, 31, 29, 37, 64, 76, 28, 41, 47,
40, 30, 38, 31, 45, 45, 76, 27, 40, 30, 76, 28,
27, 44, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 34, 31, 76, 38, 41, 40, 31, 76, 27, 40, 30,
76, 38, 31, 48, 31, 38, 76, 45, 27, 40, 30, 45,
76, 45, 46, 44, 31, 46, 29, 34, 76, 32, 27, 44,
76, 27, 49, 27, 51, 78, 76, 76, 76, 76, 76, 76,
76, 76]] ),
torch.tensor([[0, 0, 0, 1069, 11]] ),
torch.tensor([[0, 0, 0, 1069, 11]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0], EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1], EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2], EXPECTED_OUTPUT[2] ) )
@require_torch
def snake_case_ ( self ) -> Optional[Any]:
import torch
UpperCamelCase : str = JukeboxTokenizer.from_pretrained('openai/jukebox-5b-lyrics' )
UpperCamelCase : Dict = tokenizer(**self.metas )['input_ids']
# fmt: off
UpperCamelCase : Optional[int] = [
torch.tensor([[
0, 0, 0, 1069, 11, -1, -1, -1, -1, 9, 77, 39,
31, 46, 77, 27, 77, 46, 44, 27, 48, 31, 38, 38,
31, 44, 77, 32, 44, 41, 39, 77, 27, 40, 77, 27,
40, 46, 35, 43, 47, 31, 77, 38, 27, 40, 30, 64,
79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 41,
77, 45, 27, 35, 30, 77, 72, 20, 49, 41, 77, 48,
27, 45, 46, 77, 27, 40, 30, 77, 46, 44, 47, 40,
37, 38, 31, 45, 45, 77, 38, 31, 33, 45, 77, 41,
32, 77, 45, 46, 41, 40, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 19, 46, 27, 40, 30, 77, 35, 40,
77, 46, 34, 31, 77, 30, 31, 45, 31, 44, 46, 63,
77, 63, 77, 63, 77, 63, 77, 14, 31, 27, 44, 77,
46, 34, 31, 39, 64, 77, 41, 40, 77, 46, 34, 31,
77, 45, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77,
77, 77, 77, 8, 27, 38, 32, 77, 45, 47, 40, 37,
77, 27, 77, 45, 34, 27, 46, 46, 31, 44, 31, 30,
77, 48, 35, 45, 27, 33, 31, 77, 38, 35, 31, 45,
64, 77, 49, 34, 41, 45, 31, 77, 32, 44, 41, 49,
40, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1,
40, 30, 77, 49, 44, 35, 40, 37, 38, 31, 30, 77,
38, 35, 42, 64, 77, 27, 40, 30, 77, 45, 40, 31,
31, 44, 77, 41, 32, 77, 29, 41, 38, 30, 77, 29,
41, 39, 39, 27, 40, 30, 64, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 31, 38, 38, 77, 46, 34, 27,
46, 77, 35, 46, 45, 77, 45, 29, 47, 38, 42, 46,
41, 44, 77, 49, 31, 38, 38, 77, 46, 34, 41, 45,
31, 77, 42, 27, 45, 45, 35, 41, 40, 45, 77, 44,
31, 27, 30, 79, 77, 77, 77, 77, 77, 77, 77, 77,
23, 34, 35, 29, 34, 77, 51, 31, 46, 77, 45, 47,
44, 48, 35, 48, 31, 64, 77, 45, 46, 27, 39, 42,
31, 30, 77, 41, 40, 77, 46, 34, 31, 45, 31, 77,
38, 35, 32, 31, 38, 31, 45, 45, 77, 46, 34, 35,
40, 33, 45, 64, 79, 77, 77, 77, 77, 77, 77, 77,
77, 20, 34, 31, 77, 34, 27, 40, 30, 77, 46, 34,
27, 46, 77, 39, 41, 29, 37, 31, 30, 77, 46, 34,
31, 39, 64, 77, 27, 40, 30, 77, 46, 34, 31, 77,
34, 31, 27, 44, 46, 77, 46, 34, 27, 46, 77, 32,
31, 30, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77,
1, 40, 30, 77, 41, 40, 77, 46, 34, 31, 77, 42,
31, 30, 31, 45, 46, 27, 38, 64, 77, 46, 34, 31,
45, 31, 77, 49, 41, 44, 30, 45, 77, 27, 42, 42,
31, 27, 44, 65, 79, 77, 77, 77, 77, 77, 77, 77,
77, 13, 51, 77, 40, 27, 39, 31, 77, 35, 45, 77,
15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 77,
11, 35, 40, 33, 77, 41, 32, 77, 11, 35, 40, 33,
45, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 12,
41, 41, 37, 77, 41, 40, 77, 39, 51, 77, 23, 41,
44, 37, 45, 64, 77, 51, 31, 77, 13, 35, 33, 34,
46, 51, 64, 77, 27, 40, 30, 77, 30, 31, 45, 42,
27, 35, 44, 67, 79, 77, 77, 77, 77, 77, 77, 77,
77, 14, 41, 46, 34, 35, 40, 33, 77, 28, 31, 45,
35, 30, 31, 77, 44, 31, 39, 27, 35, 40, 45, 63,
77, 18, 41, 47, 40, 30, 77, 46, 34, 31, 77, 30,
31, 29, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77,
77, 15, 32, 77, 46, 34, 27, 46, 77, 29, 41, 38,
41, 45, 45, 27, 38, 77, 23, 44, 31, 29, 37, 64,
77, 28, 41, 47, 40, 30, 38, 31, 45, 45, 77, 27,
40, 30, 77, 28, 27, 44, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 34, 31, 77, 38, 41, 40, 31,
77, 27, 40, 30, 77, 38, 31, 48, 31, 38, 77, 45,
27, 40, 30, 45, 77, 45, 46, 44, 31, 46, 29, 34,
77, 32, 27, 44, 77, 27, 49, 27, 51, 79, 77, 77,
77, 77, 77, 77, 77, 77]] ),
torch.tensor([[0, 0, 0, 1069, 11, -1, -1, -1, -1]] ),
torch.tensor([[0, 0, 0, 1069, 11, -1, -1, -1, -1]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0], EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1], EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2], EXPECTED_OUTPUT[2] ) )
| 103 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a_ = {"""configuration_vit_msn""": ["""VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ViTMSNConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
"""VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ViTMSNModel""",
"""ViTMSNForImageClassification""",
"""ViTMSNPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_msn import (
VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMSNForImageClassification,
ViTMSNModel,
ViTMSNPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 330 |
import unittest
from huggingface_hub import hf_hub_download
from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor
from transformers.pipelines import VideoClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_decord,
require_tf,
require_torch,
require_torch_or_tf,
require_vision,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
@require_vision
@require_decord
class __lowerCAmelCase ( unittest.TestCase ):
lowerCamelCase_ : Any = MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ ) -> Dict:
'''simple docstring'''
snake_case_ : Any = hf_hub_download(
repo_id='''nateraw/video-demo''' , filename='''archery.mp4''' , repo_type='''dataset''' )
snake_case_ : List[Any] = VideoClassificationPipeline(model=__magic_name__ , image_processor=__magic_name__ , top_k=2 )
snake_case_ : str = [
example_video_filepath,
'''https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4''',
]
return video_classifier, examples
def lowerCamelCase (self , __magic_name__ , __magic_name__ ) -> Any:
'''simple docstring'''
for example in examples:
snake_case_ : Union[str, Any] = video_classifier(__magic_name__ )
self.assertEqual(
__magic_name__ , [
{'''score''': ANY(__magic_name__ ), '''label''': ANY(__magic_name__ )},
{'''score''': ANY(__magic_name__ ), '''label''': ANY(__magic_name__ )},
] , )
@require_torch
def lowerCamelCase (self ) -> str:
'''simple docstring'''
snake_case_ : Any = '''hf-internal-testing/tiny-random-VideoMAEForVideoClassification'''
snake_case_ : str = VideoMAEFeatureExtractor(
size={'''shortest_edge''': 10} , crop_size={'''height''': 10, '''width''': 10} )
snake_case_ : int = pipeline(
'''video-classification''' , model=__magic_name__ , feature_extractor=__magic_name__ , frame_sampling_rate=4 )
snake_case_ : List[str] = hf_hub_download(repo_id='''nateraw/video-demo''' , filename='''archery.mp4''' , repo_type='''dataset''' )
snake_case_ : Union[str, Any] = video_classifier(__magic_name__ , top_k=2 )
self.assertEqual(
nested_simplify(__magic_name__ , decimals=4 ) , [{'''score''': 0.5_199, '''label''': '''LABEL_0'''}, {'''score''': 0.4_801, '''label''': '''LABEL_1'''}] , )
snake_case_ : int = video_classifier(
[
video_file_path,
video_file_path,
] , top_k=2 , )
self.assertEqual(
nested_simplify(__magic_name__ , decimals=4 ) , [
[{'''score''': 0.5_199, '''label''': '''LABEL_0'''}, {'''score''': 0.4_801, '''label''': '''LABEL_1'''}],
[{'''score''': 0.5_199, '''label''': '''LABEL_0'''}, {'''score''': 0.4_801, '''label''': '''LABEL_1'''}],
] , )
@require_tf
def lowerCamelCase (self ) -> Optional[int]:
'''simple docstring'''
pass
| 279 | 0 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__snake_case = logging.get_logger(__name__)
def a ( __a , __a=False ) -> Dict:
'''simple docstring'''
UpperCamelCase__ :Tuple = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'''blocks.{i}.norm1.weight''', f'''deit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((f'''blocks.{i}.norm1.bias''', f'''deit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append((f'''blocks.{i}.attn.proj.weight''', f'''deit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((f'''blocks.{i}.attn.proj.bias''', f'''deit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((f'''blocks.{i}.norm2.weight''', f'''deit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((f'''blocks.{i}.norm2.bias''', f'''deit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((f'''blocks.{i}.mlp.fc1.weight''', f'''deit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((f'''blocks.{i}.mlp.fc1.bias''', f'''deit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((f'''blocks.{i}.mlp.fc2.weight''', f'''deit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((f'''blocks.{i}.mlp.fc2.bias''', f'''deit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
('''cls_token''', '''deit.embeddings.cls_token'''),
('''dist_token''', '''deit.embeddings.distillation_token'''),
('''patch_embed.proj.weight''', '''deit.embeddings.patch_embeddings.projection.weight'''),
('''patch_embed.proj.bias''', '''deit.embeddings.patch_embeddings.projection.bias'''),
('''pos_embed''', '''deit.embeddings.position_embeddings'''),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('''norm.weight''', '''layernorm.weight'''),
('''norm.bias''', '''layernorm.bias'''),
('''pre_logits.fc.weight''', '''pooler.dense.weight'''),
('''pre_logits.fc.bias''', '''pooler.dense.bias'''),
] )
# if just the base model, we should remove "deit" from all keys that start with "deit"
UpperCamelCase__ :List[str] = [(pair[0], pair[1][4:]) if pair[1].startswith('''deit''' ) else pair for pair in rename_keys]
else:
# layernorm + classification heads
rename_keys.extend(
[
('''norm.weight''', '''deit.layernorm.weight'''),
('''norm.bias''', '''deit.layernorm.bias'''),
('''head.weight''', '''cls_classifier.weight'''),
('''head.bias''', '''cls_classifier.bias'''),
('''head_dist.weight''', '''distillation_classifier.weight'''),
('''head_dist.bias''', '''distillation_classifier.bias'''),
] )
return rename_keys
def a ( __a , __a , __a=False ) -> List[str]:
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
UpperCamelCase__ :Tuple = ''''''
else:
UpperCamelCase__ :Optional[int] = '''deit.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
UpperCamelCase__ :Tuple = state_dict.pop(f'''blocks.{i}.attn.qkv.weight''' )
UpperCamelCase__ :int = state_dict.pop(f'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
UpperCamelCase__ :Optional[int] = in_proj_weight[
: config.hidden_size, :
]
UpperCamelCase__ :Optional[Any] = in_proj_bias[: config.hidden_size]
UpperCamelCase__ :Tuple = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
UpperCamelCase__ :Dict = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
UpperCamelCase__ :int = in_proj_weight[
-config.hidden_size :, :
]
UpperCamelCase__ :str = in_proj_bias[-config.hidden_size :]
def a ( __a , __a , __a ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ :Optional[Any] = dct.pop(lowerCamelCase__ )
UpperCamelCase__ :Tuple = val
def a ( ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase__ :Tuple = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
UpperCamelCase__ :Union[str, Any] = Image.open(requests.get(lowerCamelCase__ , stream=lowerCamelCase__ ).raw )
return im
@torch.no_grad()
def a ( __a , __a ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ :List[str] = DeiTConfig()
# all deit models have fine-tuned heads
UpperCamelCase__ :Optional[int] = False
# dataset (fine-tuned on ImageNet 2012), patch_size and image_size
UpperCamelCase__ :List[Any] = 1000
UpperCamelCase__ :str = '''huggingface/label-files'''
UpperCamelCase__ :List[Any] = '''imagenet-1k-id2label.json'''
UpperCamelCase__ :Tuple = json.load(open(hf_hub_download(lowerCamelCase__ , lowerCamelCase__ , repo_type='''dataset''' ) , '''r''' ) )
UpperCamelCase__ :Optional[Any] = {int(lowerCamelCase__ ): v for k, v in idalabel.items()}
UpperCamelCase__ :Optional[int] = idalabel
UpperCamelCase__ :List[str] = {v: k for k, v in idalabel.items()}
UpperCamelCase__ :Union[str, Any] = int(deit_name[-6:-4] )
UpperCamelCase__ :Optional[int] = int(deit_name[-3:] )
# size of the architecture
if deit_name[9:].startswith('''tiny''' ):
UpperCamelCase__ :Tuple = 192
UpperCamelCase__ :Any = 768
UpperCamelCase__ :Optional[Any] = 12
UpperCamelCase__ :Tuple = 3
elif deit_name[9:].startswith('''small''' ):
UpperCamelCase__ :List[Any] = 384
UpperCamelCase__ :List[str] = 1536
UpperCamelCase__ :str = 12
UpperCamelCase__ :Optional[int] = 6
if deit_name[9:].startswith('''base''' ):
pass
elif deit_name[4:].startswith('''large''' ):
UpperCamelCase__ :Union[str, Any] = 1024
UpperCamelCase__ :List[str] = 4096
UpperCamelCase__ :Optional[Any] = 24
UpperCamelCase__ :Dict = 16
# load original model from timm
UpperCamelCase__ :Dict = timm.create_model(lowerCamelCase__ , pretrained=lowerCamelCase__ )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
UpperCamelCase__ :List[str] = timm_model.state_dict()
UpperCamelCase__ :Any = create_rename_keys(lowerCamelCase__ , lowerCamelCase__ )
for src, dest in rename_keys:
rename_key(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
read_in_q_k_v(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# load HuggingFace model
UpperCamelCase__ :int = DeiTForImageClassificationWithTeacher(lowerCamelCase__ ).eval()
model.load_state_dict(lowerCamelCase__ )
# Check outputs on an image, prepared by DeiTImageProcessor
UpperCamelCase__ :Any = int(
(256 / 224) * config.image_size ) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103
UpperCamelCase__ :Optional[int] = DeiTImageProcessor(size=lowerCamelCase__ , crop_size=config.image_size )
UpperCamelCase__ :Union[str, Any] = image_processor(images=prepare_img() , return_tensors='''pt''' )
UpperCamelCase__ :Optional[int] = encoding['''pixel_values''']
UpperCamelCase__ :str = model(lowerCamelCase__ )
UpperCamelCase__ :List[str] = timm_model(lowerCamelCase__ )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(lowerCamelCase__ , outputs.logits , atol=1e-3 )
Path(lowerCamelCase__ ).mkdir(exist_ok=lowerCamelCase__ )
print(f'''Saving model {deit_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowerCamelCase__ )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(lowerCamelCase__ )
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--deit_name''',
default='''vit_deit_base_distilled_patch16_224''',
type=str,
help='''Name of the DeiT timm model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
__snake_case = parser.parse_args()
convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path)
| 350 |
'''simple docstring'''
from math import ceil
def a ( __a , __a ) -> Any:
'''simple docstring'''
UpperCamelCase__ :str = list(range(0 , __a ) )
UpperCamelCase__ :Optional[int] = [item for sublist in list(device_map.values() ) for item in sublist]
# Duplicate check
UpperCamelCase__ :Optional[int] = []
for i in device_map_blocks:
if device_map_blocks.count(__a ) > 1 and i not in duplicate_blocks:
duplicate_blocks.append(__a )
# Missing blocks
UpperCamelCase__ :List[str] = [i for i in blocks if i not in device_map_blocks]
UpperCamelCase__ :Optional[Any] = [i for i in device_map_blocks if i not in blocks]
if len(__a ) != 0:
raise ValueError(
'''Duplicate attention blocks specified in device_map. Attention blocks must be specified to one device.'''
''' These attention blocks were specified more than once: ''' + str(__a ) )
if len(__a ) != 0:
raise ValueError(
'''There are attention blocks for this model that are not specified in the device_map. Add these attention '''
'''blocks to a device on the device_map: ''' + str(__a ) )
if len(__a ) != 0:
raise ValueError(
'''The device_map contains more attention blocks than this model has. Remove these from the device_map:'''
+ str(__a ) )
def a ( __a , __a ) -> Tuple:
'''simple docstring'''
UpperCamelCase__ :Optional[Any] = list(range(__a ) )
UpperCamelCase__ :Any = int(ceil(n_layers / len(__a ) ) )
UpperCamelCase__ :List[Any] = [layers[i : i + n_blocks] for i in range(0 , __a , __a )]
return dict(zip(__a , __a ) ) | 219 | 0 |
"""simple docstring"""
import itertools
import json
import os
import unittest
from transformers import AddedToken, RobertaTokenizer, RobertaTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __snake_case ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = RobertaTokenizer
_lowerCamelCase = RobertaTokenizerFast
_lowerCamelCase = True
_lowerCamelCase = {"""cls_token""": """<s>"""}
def UpperCamelCase__( self ):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__A : int = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
__A : Any = dict(zip(__lowerCamelCase , range(len(__lowerCamelCase ) ) ) )
__A : List[str] = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
__A : Optional[Any] = {'''unk_token''': '''<unk>'''}
__A : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
__A : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__lowerCamelCase ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(__lowerCamelCase ) )
def UpperCamelCase__( self , **__lowerCamelCase ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def UpperCamelCase__( self , **__lowerCamelCase ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return RobertaTokenizerFast.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def UpperCamelCase__( self , __lowerCamelCase ):
'''simple docstring'''
__A : int = '''lower newer'''
__A : str = '''lower newer'''
return input_text, output_text
def UpperCamelCase__( self ):
'''simple docstring'''
__A : List[Any] = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
__A : Optional[Any] = '''lower newer'''
__A : Union[str, Any] = ['''l''', '''o''', '''w''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er''']
__A : str = tokenizer.tokenize(__lowerCamelCase ) # , add_prefix_space=True)
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
__A : List[Any] = tokens + [tokenizer.unk_token]
__A : Tuple = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCamelCase ) , __lowerCamelCase )
def UpperCamelCase__( self ):
'''simple docstring'''
__A : List[Any] = self.get_tokenizer()
self.assertListEqual(tokenizer.encode('''Hello world!''' , add_special_tokens=__lowerCamelCase ) , [0, 3_1414, 232, 328, 2] )
self.assertListEqual(
tokenizer.encode('''Hello world! cécé herlolip 418''' , add_special_tokens=__lowerCamelCase ) , [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2] , )
@slow
def UpperCamelCase__( self ):
'''simple docstring'''
__A : Union[str, Any] = self.tokenizer_class.from_pretrained('''roberta-base''' )
__A : Any = tokenizer.encode('''sequence builders''' , add_special_tokens=__lowerCamelCase )
__A : int = tokenizer.encode('''multi-sequence build''' , add_special_tokens=__lowerCamelCase )
__A : str = tokenizer.encode(
'''sequence builders''' , add_special_tokens=__lowerCamelCase , add_prefix_space=__lowerCamelCase )
__A : List[Any] = tokenizer.encode(
'''sequence builders''' , '''multi-sequence build''' , add_special_tokens=__lowerCamelCase , add_prefix_space=__lowerCamelCase )
__A : Dict = tokenizer.build_inputs_with_special_tokens(__lowerCamelCase )
__A : Tuple = tokenizer.build_inputs_with_special_tokens(__lowerCamelCase , __lowerCamelCase )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def UpperCamelCase__( self ):
'''simple docstring'''
__A : List[str] = self.get_tokenizer()
__A : Optional[int] = '''Encode this sequence.'''
__A : List[Any] = tokenizer.byte_encoder[''' '''.encode('''utf-8''' )[0]]
# Testing encoder arguments
__A : Tuple = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase , add_prefix_space=__lowerCamelCase )
__A : int = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(__lowerCamelCase , __lowerCamelCase )
__A : Optional[int] = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase , add_prefix_space=__lowerCamelCase )
__A : Dict = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(__lowerCamelCase , __lowerCamelCase )
tokenizer.add_special_tokens({'''bos_token''': '''<s>'''} )
__A : List[str] = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
__A : str = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(__lowerCamelCase , __lowerCamelCase )
# Testing spaces after special tokens
__A : Dict = '''<mask>'''
tokenizer.add_special_tokens(
{'''mask_token''': AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase )} ) # mask token has a left space
__A : Any = tokenizer.convert_tokens_to_ids(__lowerCamelCase )
__A : Optional[int] = '''Encode <mask> sequence'''
__A : int = '''Encode <mask>sequence'''
__A : Tuple = tokenizer.encode(__lowerCamelCase )
__A : str = encoded.index(__lowerCamelCase )
__A : Any = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(__lowerCamelCase , __lowerCamelCase )
__A : Union[str, Any] = tokenizer.encode(__lowerCamelCase )
__A : Union[str, Any] = encoded.index(__lowerCamelCase )
__A : Any = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(__lowerCamelCase , __lowerCamelCase )
def UpperCamelCase__( self ):
'''simple docstring'''
pass
def UpperCamelCase__( self ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
__A : Optional[int] = self.rust_tokenizer_class.from_pretrained(__lowerCamelCase , **__lowerCamelCase )
__A : List[Any] = self.tokenizer_class.from_pretrained(__lowerCamelCase , **__lowerCamelCase )
__A : int = '''A, <mask> AllenNLP sentence.'''
__A : int = tokenizer_r.encode_plus(__lowerCamelCase , add_special_tokens=__lowerCamelCase , return_token_type_ids=__lowerCamelCase )
__A : Tuple = tokenizer_p.encode_plus(__lowerCamelCase , add_special_tokens=__lowerCamelCase , return_token_type_ids=__lowerCamelCase )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['''token_type_ids'''] ) , sum(tokens_p['''token_type_ids'''] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['''attention_mask'''] ) / len(tokens_r['''attention_mask'''] ) , sum(tokens_p['''attention_mask'''] ) / len(tokens_p['''attention_mask'''] ) , )
__A : Tuple = tokenizer_r.convert_ids_to_tokens(tokens_r['''input_ids'''] )
__A : Optional[int] = tokenizer_p.convert_ids_to_tokens(tokens_p['''input_ids'''] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['''input_ids'''] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r['''input_ids'''] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] )
self.assertSequenceEqual(
__lowerCamelCase , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
self.assertSequenceEqual(
__lowerCamelCase , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
def UpperCamelCase__( self ):
'''simple docstring'''
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
__A : Optional[Any] = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase )
__A : Dict = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
__A : Dict = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state['''add_prefix_space'''] , __lowerCamelCase )
self.assertEqual(post_processor_state['''add_prefix_space'''] , __lowerCamelCase )
self.assertEqual(post_processor_state['''trim_offsets'''] , __lowerCamelCase )
def UpperCamelCase__( self ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
__A : Optional[Any] = '''hello''' # `hello` is a token in the vocabulary of `pretrained_name`
__A : Tuple = F"""{text_of_1_token} {text_of_1_token}"""
__A : Optional[Any] = self.rust_tokenizer_class.from_pretrained(
__lowerCamelCase , use_fast=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase )
__A : Optional[int] = tokenizer_r(__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__lowerCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__lowerCamelCase ) + 1, len(__lowerCamelCase ) + 1 + len(__lowerCamelCase )) , )
__A : List[Any] = self.rust_tokenizer_class.from_pretrained(
__lowerCamelCase , use_fast=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase )
__A : Dict = tokenizer_r(__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__lowerCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__lowerCamelCase ) + 1, len(__lowerCamelCase ) + 1 + len(__lowerCamelCase )) , )
__A : int = self.rust_tokenizer_class.from_pretrained(
__lowerCamelCase , use_fast=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase )
__A : List[Any] = tokenizer_r(__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__lowerCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__lowerCamelCase ), len(__lowerCamelCase ) + 1 + len(__lowerCamelCase )) , )
__A : Any = self.rust_tokenizer_class.from_pretrained(
__lowerCamelCase , use_fast=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase )
__A : List[str] = tokenizer_r(__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__lowerCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__lowerCamelCase ), len(__lowerCamelCase ) + 1 + len(__lowerCamelCase )) , )
__A : Tuple = F""" {text}"""
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
__A : Optional[Any] = self.rust_tokenizer_class.from_pretrained(
__lowerCamelCase , use_fast=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase )
__A : Dict = tokenizer_r(__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(__lowerCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__lowerCamelCase ) + 1, 1 + len(__lowerCamelCase ) + 1 + len(__lowerCamelCase )) , )
__A : Dict = self.rust_tokenizer_class.from_pretrained(
__lowerCamelCase , use_fast=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase )
__A : Tuple = tokenizer_r(__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__lowerCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__lowerCamelCase ), 1 + len(__lowerCamelCase ) + 1 + len(__lowerCamelCase )) , )
__A : str = self.rust_tokenizer_class.from_pretrained(
__lowerCamelCase , use_fast=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase )
__A : Optional[int] = tokenizer_r(__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__lowerCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__lowerCamelCase ), 1 + len(__lowerCamelCase ) + 1 + len(__lowerCamelCase )) , )
| 179 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableDiffusionUpscalePipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class __snake_case ( unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase__( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def UpperCamelCase__( self ):
'''simple docstring'''
__A : Union[str, Any] = 1
__A : Any = 3
__A : List[str] = (32, 32)
__A : List[str] = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(__lowerCamelCase )
return image
@property
def UpperCamelCase__( self ):
'''simple docstring'''
torch.manual_seed(0 )
__A : List[Any] = UNetaDConditionModel(
block_out_channels=(32, 32, 64) , layers_per_block=2 , sample_size=32 , in_channels=7 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , attention_head_dim=8 , use_linear_projection=__lowerCamelCase , only_cross_attention=(True, True, False) , num_class_embeds=100 , )
return model
@property
def UpperCamelCase__( self ):
'''simple docstring'''
torch.manual_seed(0 )
__A : Any = AutoencoderKL(
block_out_channels=[32, 32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
return model
@property
def UpperCamelCase__( self ):
'''simple docstring'''
torch.manual_seed(0 )
__A : Tuple = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='''gelu''' , projection_dim=512 , )
return CLIPTextModel(__lowerCamelCase )
def UpperCamelCase__( self ):
'''simple docstring'''
__A : List[Any] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__A : int = self.dummy_cond_unet_upscale
__A : Union[str, Any] = DDPMScheduler()
__A : Dict = DDIMScheduler(prediction_type='''v_prediction''' )
__A : int = self.dummy_vae
__A : int = self.dummy_text_encoder
__A : Union[str, Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
__A : Tuple = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
__A : Any = Image.fromarray(np.uinta(__lowerCamelCase ) ).convert('''RGB''' ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
__A : Dict = StableDiffusionUpscalePipeline(
unet=__lowerCamelCase , low_res_scheduler=__lowerCamelCase , scheduler=__lowerCamelCase , vae=__lowerCamelCase , text_encoder=__lowerCamelCase , tokenizer=__lowerCamelCase , max_noise_level=350 , )
__A : str = sd_pipe.to(__lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=__lowerCamelCase )
__A : List[str] = '''A painting of a squirrel eating a burger'''
__A : Any = torch.Generator(device=__lowerCamelCase ).manual_seed(0 )
__A : List[str] = sd_pipe(
[prompt] , image=__lowerCamelCase , generator=__lowerCamelCase , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='''np''' , )
__A : Union[str, Any] = output.images
__A : List[str] = torch.Generator(device=__lowerCamelCase ).manual_seed(0 )
__A : str = sd_pipe(
[prompt] , image=__lowerCamelCase , generator=__lowerCamelCase , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='''np''' , return_dict=__lowerCamelCase , )[0]
__A : Tuple = image[0, -3:, -3:, -1]
__A : int = image_from_tuple[0, -3:, -3:, -1]
__A : Dict = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
__A : str = np.array([0.3_1_1_3, 0.3_9_1_0, 0.4_2_7_2, 0.4_8_5_9, 0.5_0_6_1, 0.4_6_5_2, 0.5_3_6_2, 0.5_7_1_5, 0.5_6_6_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def UpperCamelCase__( self ):
'''simple docstring'''
__A : Tuple = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__A : Dict = self.dummy_cond_unet_upscale
__A : List[str] = DDPMScheduler()
__A : str = DDIMScheduler(prediction_type='''v_prediction''' )
__A : Optional[int] = self.dummy_vae
__A : Optional[Any] = self.dummy_text_encoder
__A : Optional[Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
__A : List[Any] = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
__A : int = Image.fromarray(np.uinta(__lowerCamelCase ) ).convert('''RGB''' ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
__A : Any = StableDiffusionUpscalePipeline(
unet=__lowerCamelCase , low_res_scheduler=__lowerCamelCase , scheduler=__lowerCamelCase , vae=__lowerCamelCase , text_encoder=__lowerCamelCase , tokenizer=__lowerCamelCase , max_noise_level=350 , )
__A : Any = sd_pipe.to(__lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=__lowerCamelCase )
__A : Any = '''A painting of a squirrel eating a burger'''
__A : Any = sd_pipe(
2 * [prompt] , image=2 * [low_res_image] , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='''np''' , )
__A : Union[str, Any] = output.images
assert image.shape[0] == 2
__A : Optional[Any] = torch.Generator(device=__lowerCamelCase ).manual_seed(0 )
__A : Any = sd_pipe(
[prompt] , image=__lowerCamelCase , generator=__lowerCamelCase , num_images_per_prompt=2 , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='''np''' , )
__A : Union[str, Any] = output.images
assert image.shape[0] == 2
@unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' )
def UpperCamelCase__( self ):
'''simple docstring'''
__A : List[Any] = self.dummy_cond_unet_upscale
__A : int = DDPMScheduler()
__A : List[Any] = DDIMScheduler(prediction_type='''v_prediction''' )
__A : Optional[Any] = self.dummy_vae
__A : List[str] = self.dummy_text_encoder
__A : Dict = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
__A : Union[str, Any] = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
__A : int = Image.fromarray(np.uinta(__lowerCamelCase ) ).convert('''RGB''' ).resize((64, 64) )
# put models in fp16, except vae as it overflows in fp16
__A : Union[str, Any] = unet.half()
__A : Optional[int] = text_encoder.half()
# make sure here that pndm scheduler skips prk
__A : Optional[int] = StableDiffusionUpscalePipeline(
unet=__lowerCamelCase , low_res_scheduler=__lowerCamelCase , scheduler=__lowerCamelCase , vae=__lowerCamelCase , text_encoder=__lowerCamelCase , tokenizer=__lowerCamelCase , max_noise_level=350 , )
__A : Union[str, Any] = sd_pipe.to(__lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=__lowerCamelCase )
__A : Union[str, Any] = '''A painting of a squirrel eating a burger'''
__A : Optional[Any] = torch.manual_seed(0 )
__A : Tuple = sd_pipe(
[prompt] , image=__lowerCamelCase , generator=__lowerCamelCase , num_inference_steps=2 , output_type='''np''' , ).images
__A : str = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
@slow
@require_torch_gpu
class __snake_case ( unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase__( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase__( self ):
'''simple docstring'''
__A : List[Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-upscale/low_res_cat.png''' )
__A : Dict = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale'''
'''/upsampled_cat.npy''' )
__A : str = '''stabilityai/stable-diffusion-x4-upscaler'''
__A : Optional[Any] = StableDiffusionUpscalePipeline.from_pretrained(__lowerCamelCase )
pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
pipe.enable_attention_slicing()
__A : Union[str, Any] = '''a cat sitting on a park bench'''
__A : Union[str, Any] = torch.manual_seed(0 )
__A : Optional[Any] = pipe(
prompt=__lowerCamelCase , image=__lowerCamelCase , generator=__lowerCamelCase , output_type='''np''' , )
__A : List[str] = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 1e-3
def UpperCamelCase__( self ):
'''simple docstring'''
__A : Dict = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-upscale/low_res_cat.png''' )
__A : List[Any] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale'''
'''/upsampled_cat_fp16.npy''' )
__A : Optional[int] = '''stabilityai/stable-diffusion-x4-upscaler'''
__A : Optional[int] = StableDiffusionUpscalePipeline.from_pretrained(
__lowerCamelCase , torch_dtype=torch.floataa , )
pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
pipe.enable_attention_slicing()
__A : Dict = '''a cat sitting on a park bench'''
__A : Any = torch.manual_seed(0 )
__A : Optional[int] = pipe(
prompt=__lowerCamelCase , image=__lowerCamelCase , generator=__lowerCamelCase , output_type='''np''' , )
__A : Any = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 5e-1
def UpperCamelCase__( self ):
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__A : Union[str, Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-upscale/low_res_cat.png''' )
__A : List[str] = '''stabilityai/stable-diffusion-x4-upscaler'''
__A : Dict = StableDiffusionUpscalePipeline.from_pretrained(
__lowerCamelCase , torch_dtype=torch.floataa , )
pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
__A : Tuple = '''a cat sitting on a park bench'''
__A : Tuple = torch.manual_seed(0 )
__A : List[str] = pipe(
prompt=__lowerCamelCase , image=__lowerCamelCase , generator=__lowerCamelCase , num_inference_steps=5 , output_type='''np''' , )
__A : Any = torch.cuda.max_memory_allocated()
# make sure that less than 2.9 GB is allocated
assert mem_bytes < 2.9 * 10**9
| 179 | 1 |
'''simple docstring'''
import os
import sys
from contextlib import contextmanager
# Windows only
if os.name == "nt":
import ctypes
import msvcrt # noqa
class __UpperCAmelCase ( ctypes.Structure ):
'''simple docstring'''
__lowerCAmelCase = [('''size''', ctypes.c_int), ('''visible''', ctypes.c_byte)]
def __a ( ) ->Union[str, Any]:
"""simple docstring"""
if os.name == "nt":
A = CursorInfo()
A = ctypes.windll.kernelaa.GetStdHandle(-11 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(UpperCAmelCase , ctypes.byref(UpperCAmelCase ) )
A = False
ctypes.windll.kernelaa.SetConsoleCursorInfo(UpperCAmelCase , ctypes.byref(UpperCAmelCase ) )
elif os.name == "posix":
sys.stdout.write("""\033[?25l""" )
sys.stdout.flush()
def __a ( ) ->int:
"""simple docstring"""
if os.name == "nt":
A = CursorInfo()
A = ctypes.windll.kernelaa.GetStdHandle(-11 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(UpperCAmelCase , ctypes.byref(UpperCAmelCase ) )
A = True
ctypes.windll.kernelaa.SetConsoleCursorInfo(UpperCAmelCase , ctypes.byref(UpperCAmelCase ) )
elif os.name == "posix":
sys.stdout.write("""\033[?25h""" )
sys.stdout.flush()
@contextmanager
def __a ( ) ->Any:
"""simple docstring"""
try:
hide_cursor()
yield
finally:
show_cursor()
| 337 |
'''simple docstring'''
import argparse
import glob
import importlib.util
import os
import re
import black
from doc_builder.style_doc import style_docstrings_in_code
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
_lowerCamelCase : Dict = 'src/diffusers'
_lowerCamelCase : Dict = '.'
# This is to make sure the diffusers module imported is the one in the repo.
_lowerCamelCase : List[str] = importlib.util.spec_from_file_location(
'diffusers',
os.path.join(DIFFUSERS_PATH, '__init__.py'),
submodule_search_locations=[DIFFUSERS_PATH],
)
_lowerCamelCase : Tuple = spec.loader.load_module()
def __a ( UpperCAmelCase , UpperCAmelCase ) ->Union[str, Any]:
"""simple docstring"""
return line.startswith(UpperCAmelCase ) or len(UpperCAmelCase ) <= 1 or re.search(R"""^\s*\)(\s*->.*:|:)\s*$""" , UpperCAmelCase ) is not None
def __a ( UpperCAmelCase ) ->Dict:
"""simple docstring"""
A = object_name.split(""".""" )
A = 0
# First let's find the module where our object lives.
A = parts[i]
while i < len(UpperCAmelCase ) and not os.path.isfile(os.path.join(UpperCAmelCase , f"""{module}.py""" ) ):
i += 1
if i < len(UpperCAmelCase ):
A = os.path.join(UpperCAmelCase , parts[i] )
if i >= len(UpperCAmelCase ):
raise ValueError(f"""`object_name` should begin with the name of a module of diffusers but got {object_name}.""" )
with open(os.path.join(UpperCAmelCase , f"""{module}.py""" ) , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
A = f.readlines()
# Now let's find the class / func in the code!
A = """"""
A = 0
for name in parts[i + 1 :]:
while (
line_index < len(UpperCAmelCase ) and re.search(Rf"""^{indent}(class|def)\s+{name}(\(|\:)""" , lines[line_index] ) is None
):
line_index += 1
indent += " "
line_index += 1
if line_index >= len(UpperCAmelCase ):
raise ValueError(f""" {object_name} does not match any function or class in {module}.""" )
# We found the beginning of the class / func, now let's find the end (when the indent diminishes).
A = line_index
while line_index < len(UpperCAmelCase ) and _should_continue(lines[line_index] , UpperCAmelCase ):
line_index += 1
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
A = lines[start_index:line_index]
return "".join(UpperCAmelCase )
_lowerCamelCase : str = re.compile(R'^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)')
_lowerCamelCase : Any = re.compile(R'^\s*(\S+)->(\S+)(\s+.*|$)')
_lowerCamelCase : str = re.compile(R'<FILL\s+[^>]*>')
def __a ( UpperCAmelCase ) ->str:
"""simple docstring"""
A = code.split("""\n""" )
A = 0
while idx < len(UpperCAmelCase ) and len(lines[idx] ) == 0:
idx += 1
if idx < len(UpperCAmelCase ):
return re.search(R"""^(\s*)\S""" , lines[idx] ).groups()[0]
return ""
def __a ( UpperCAmelCase ) ->Optional[int]:
"""simple docstring"""
A = len(get_indent(UpperCAmelCase ) ) > 0
if has_indent:
A = f"""class Bla:\n{code}"""
A = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 , preview=UpperCAmelCase )
A = black.format_str(UpperCAmelCase , mode=UpperCAmelCase )
A , A = style_docstrings_in_code(UpperCAmelCase )
return result[len("""class Bla:\n""" ) :] if has_indent else result
def __a ( UpperCAmelCase , UpperCAmelCase=False ) ->List[str]:
"""simple docstring"""
with open(UpperCAmelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
A = f.readlines()
A = []
A = 0
# Not a for loop cause `lines` is going to change (if `overwrite=True`).
while line_index < len(UpperCAmelCase ):
A = _re_copy_warning.search(lines[line_index] )
if search is None:
line_index += 1
continue
# There is some copied code here, let's retrieve the original.
A , A , A = search.groups()
A = find_code_in_diffusers(UpperCAmelCase )
A = get_indent(UpperCAmelCase )
A = line_index + 1 if indent == theoretical_indent else line_index + 2
A = theoretical_indent
A = start_index
# Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment.
A = True
while line_index < len(UpperCAmelCase ) and should_continue:
line_index += 1
if line_index >= len(UpperCAmelCase ):
break
A = lines[line_index]
A = _should_continue(UpperCAmelCase , UpperCAmelCase ) and re.search(f"""^{indent}# End copy""" , UpperCAmelCase ) is None
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
A = lines[start_index:line_index]
A = """""".join(UpperCAmelCase )
# Remove any nested `Copied from` comments to avoid circular copies
A = [line for line in theoretical_code.split("""\n""" ) if _re_copy_warning.search(UpperCAmelCase ) is None]
A = """\n""".join(UpperCAmelCase )
# Before comparing, use the `replace_pattern` on the original code.
if len(UpperCAmelCase ) > 0:
A = replace_pattern.replace("""with""" , """""" ).split(""",""" )
A = [_re_replace_pattern.search(UpperCAmelCase ) for p in patterns]
for pattern in patterns:
if pattern is None:
continue
A , A , A = pattern.groups()
A = re.sub(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
if option.strip() == "all-casing":
A = re.sub(obja.lower() , obja.lower() , UpperCAmelCase )
A = re.sub(obja.upper() , obja.upper() , UpperCAmelCase )
# Blackify after replacement. To be able to do that, we need the header (class or function definition)
# from the previous line
A = blackify(lines[start_index - 1] + theoretical_code )
A = theoretical_code[len(lines[start_index - 1] ) :]
# Test for a diff and act accordingly.
if observed_code != theoretical_code:
diffs.append([object_name, start_index] )
if overwrite:
A = lines[:start_index] + [theoretical_code] + lines[line_index:]
A = start_index + 1
if overwrite and len(UpperCAmelCase ) > 0:
# Warn the user a file has been modified.
print(f"""Detected changes, rewriting {filename}.""" )
with open(UpperCAmelCase , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(UpperCAmelCase )
return diffs
def __a ( UpperCAmelCase = False ) ->int:
"""simple docstring"""
A = glob.glob(os.path.join(UpperCAmelCase , """**/*.py""" ) , recursive=UpperCAmelCase )
A = []
for filename in all_files:
A = is_copy_consistent(UpperCAmelCase , UpperCAmelCase )
diffs += [f"""- {filename}: copy does not match {d[0]} at line {d[1]}""" for d in new_diffs]
if not overwrite and len(UpperCAmelCase ) > 0:
A = """\n""".join(UpperCAmelCase )
raise Exception(
"""Found the following copy inconsistencies:\n"""
+ diff
+ """\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them.""" )
if __name__ == "__main__":
_lowerCamelCase : List[Any] = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
_lowerCamelCase : Any = parser.parse_args()
check_copies(args.fix_and_overwrite)
| 337 | 1 |
'''simple docstring'''
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
from .record_evaluation import evaluate as evaluate_record
a_ : Any = "\\n@article{wang2019superglue,\n title={SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems},\n author={Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R},\n journal={arXiv preprint arXiv:1905.00537},\n year={2019}\n}\n"
a_ : List[Any] = "\\nSuperGLUE (https://super.gluebenchmark.com/) is a new benchmark styled after\nGLUE with a new set of more difficult language understanding tasks, improved\nresources, and a new public leaderboard.\n"
a_ : Optional[Any] = "\nCompute SuperGLUE evaluation metric associated to each SuperGLUE dataset.\nArgs:\n predictions: list of predictions to score. Depending on the SuperGlUE subset:\n - for 'record': list of question-answer dictionaries with the following keys:\n - 'idx': index of the question as specified by the dataset\n - 'prediction_text': the predicted answer text\n - for 'multirc': list of question-answer dictionaries with the following keys:\n - 'idx': index of the question-answer pair as specified by the dataset\n - 'prediction': the predicted answer label\n - otherwise: list of predicted labels\n references: list of reference labels. Depending on the SuperGLUE subset:\n - for 'record': list of question-answers dictionaries with the following keys:\n - 'idx': index of the question as specified by the dataset\n - 'answers': list of possible answers\n - otherwise: list of reference labels\nReturns: depending on the SuperGLUE subset:\n - for 'record':\n - 'exact_match': Exact match between answer and gold answer\n - 'f1': F1 score\n - for 'multirc':\n - 'exact_match': Exact match between answer and gold answer\n - 'f1_m': Per-question macro-F1 score\n - 'f1_a': Average F1 score over all answers\n - for 'axb':\n 'matthews_correlation': Matthew Correlation\n - for 'cb':\n - 'accuracy': Accuracy\n - 'f1': F1 score\n - for all others:\n - 'accuracy': Accuracy\nExamples:\n\n >>> super_glue_metric = datasets.load_metric('super_glue', 'copa') # any of [\"copa\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"boolq\", \"axg\"]\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'accuracy': 1.0}\n\n >>> super_glue_metric = datasets.load_metric('super_glue', 'cb')\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'accuracy': 1.0, 'f1': 1.0}\n\n >>> super_glue_metric = datasets.load_metric('super_glue', 'record')\n >>> predictions = [{'idx': {'passage': 0, 'query': 0}, 'prediction_text': 'answer'}]\n >>> references = [{'idx': {'passage': 0, 'query': 0}, 'answers': ['answer', 'another_answer']}]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'exact_match': 1.0, 'f1': 1.0}\n\n >>> super_glue_metric = datasets.load_metric('super_glue', 'multirc')\n >>> predictions = [{'idx': {'answer': 0, 'paragraph': 0, 'question': 0}, 'prediction': 0}, {'idx': {'answer': 1, 'paragraph': 2, 'question': 3}, 'prediction': 1}]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'exact_match': 1.0, 'f1_m': 1.0, 'f1_a': 1.0}\n\n >>> super_glue_metric = datasets.load_metric('super_glue', 'axb')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'matthews_correlation': 1.0}\n"
def _A (lowerCAmelCase__ :int , lowerCAmelCase__ :str ) -> str:
'''simple docstring'''
return float((preds == labels).mean() )
def _A (lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Dict , lowerCAmelCase__ :str="binary" ) -> List[str]:
'''simple docstring'''
_a = simple_accuracy(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
_a = float(fa_score(y_true=__SCREAMING_SNAKE_CASE , y_pred=__SCREAMING_SNAKE_CASE , average=__SCREAMING_SNAKE_CASE ) )
return {
"accuracy": acc,
"f1": fa,
}
def _A (lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :Optional[int] ) -> List[str]:
'''simple docstring'''
_a = {}
for id_pred, label in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
_a = f'{id_pred["idx"]["paragraph"]}-{id_pred["idx"]["question"]}'
_a = id_pred['''prediction''']
if question_id in question_map:
question_map[question_id].append((pred, label) )
else:
_a = [(pred, label)]
_a = [], []
for question, preds_labels in question_map.items():
_a = zip(*__SCREAMING_SNAKE_CASE )
_a = fa_score(y_true=__SCREAMING_SNAKE_CASE , y_pred=__SCREAMING_SNAKE_CASE , average='macro' )
fas.append(__SCREAMING_SNAKE_CASE )
_a = int(sum(pred == label for pred, label in preds_labels ) == len(__SCREAMING_SNAKE_CASE ) )
ems.append(__SCREAMING_SNAKE_CASE )
_a = float(sum(__SCREAMING_SNAKE_CASE ) / len(__SCREAMING_SNAKE_CASE ) )
_a = sum(__SCREAMING_SNAKE_CASE ) / len(__SCREAMING_SNAKE_CASE )
_a = float(fa_score(y_true=__SCREAMING_SNAKE_CASE , y_pred=[id_pred['prediction'] for id_pred in ids_preds] ) )
return {"exact_match": em, "f1_m": fa_m, "f1_a": fa_a}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a ( datasets.Metric ):
def __UpperCAmelCase ( self ) -> List[str]:
if self.config_name not in [
"boolq",
"cb",
"copa",
"multirc",
"record",
"rte",
"wic",
"wsc",
"wsc.fixed",
"axb",
"axg",
]:
raise KeyError(
'You should supply a configuration name selected in '
'["boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg",]' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , codebase_urls=[] , reference_urls=[] , format='numpy' if not self.config_name == 'record' and not self.config_name == 'multirc' else None , )
def __UpperCAmelCase ( self ) -> str:
if self.config_name == "record":
return {
"predictions": {
"idx": {
"passage": datasets.Value('int64' ),
"query": datasets.Value('int64' ),
},
"prediction_text": datasets.Value('string' ),
},
"references": {
"idx": {
"passage": datasets.Value('int64' ),
"query": datasets.Value('int64' ),
},
"answers": datasets.Sequence(datasets.Value('string' ) ),
},
}
elif self.config_name == "multirc":
return {
"predictions": {
"idx": {
"answer": datasets.Value('int64' ),
"paragraph": datasets.Value('int64' ),
"question": datasets.Value('int64' ),
},
"prediction": datasets.Value('int64' ),
},
"references": datasets.Value('int64' ),
}
else:
return {
"predictions": datasets.Value('int64' ),
"references": datasets.Value('int64' ),
}
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ ) -> str:
if self.config_name == "axb":
return {"matthews_correlation": matthews_corrcoef(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )}
elif self.config_name == "cb":
return acc_and_fa(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , fa_avg='macro' )
elif self.config_name == "record":
_a = [
{
'''qas''': [
{'''id''': ref['''idx''']['''query'''], '''answers''': [{'''text''': ans} for ans in ref['''answers''']]}
for ref in references
]
}
]
_a = {pred['''idx''']['''query''']: pred['''prediction_text'''] for pred in predictions}
return evaluate_record(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )[0]
elif self.config_name == "multirc":
return evaluate_multirc(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
elif self.config_name in ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]:
return {"accuracy": simple_accuracy(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )}
else:
raise KeyError(
'You should supply a configuration name selected in '
'["boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg",]' )
| 168 |
'''simple docstring'''
import torch
from transformers import AutoModel
class lowerCAmelCase__ ( torch.nn.Module ):
def __init__( self , __SCREAMING_SNAKE_CASE="sayef/fsner-bert-base-uncased" ):
"""simple docstring"""
super(__SCREAMING_SNAKE_CASE , self ).__init__()
lowercase_ : Tuple = AutoModel.from_pretrained(__SCREAMING_SNAKE_CASE , return_dict=__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[Any] = torch.nn.CosineSimilarity(3 , 1E-0_8 )
lowercase_ : Optional[Any] = torch.nn.Softmax(dim=1 )
def _snake_case ( self , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return self.bert(**__SCREAMING_SNAKE_CASE ).last_hidden_state
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return token_embeddings.sum(2 , keepdim=__SCREAMING_SNAKE_CASE )
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=1 ):
"""simple docstring"""
return self.softmax(T * self.cos(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : Optional[Any] = W_supports['''sizes'''].tolist()
lowercase_ : Dict = W_supports['''start_token_id'''].item()
lowercase_ : List[Any] = W_supports['''end_token_id'''].item()
del W_supports["sizes"]
del W_supports["start_token_id"]
del W_supports["end_token_id"]
lowercase_ : List[str] = self.BERT(**__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[int] = self.BERT(**__SCREAMING_SNAKE_CASE )
lowercase_ : str = None
lowercase_ : Dict = None
lowercase_ : Tuple = W_supports['''input_ids'''] == start_token_id
lowercase_ : Any = W_supports['''input_ids'''] == end_token_id
for i, size in enumerate(__SCREAMING_SNAKE_CASE ):
if i == 0:
lowercase_ : List[str] = 0
else:
lowercase_ : List[Any] = support_sizes[i - 1]
lowercase_ : str = S[s : s + size][start_token_masks[s : s + size]]
lowercase_ : Optional[int] = S[s : s + size][end_token_masks[s : s + size]]
lowercase_ : List[str] = torch.matmul(q[i] , s_start.T ).sum(1 ).softmax(0 )
lowercase_ : List[str] = torch.matmul(q[i] , s_end.T ).sum(1 ).softmax(0 )
if p_starts is not None:
lowercase_ : Tuple = torch.vstack((p_starts, p_start) )
lowercase_ : Optional[Any] = torch.vstack((p_ends, p_end) )
else:
lowercase_ : str = p_start
lowercase_ : int = p_end
return p_starts, p_ends
| 93 | 0 |
"""simple docstring"""
import unittest
from transformers import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING, is_vision_available, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class A__ :
@staticmethod
def __lowerCamelCase ( *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
pass
@is_pipeline_test
@require_vision
@require_torch
class A__ ( unittest.TestCase):
A_ : Any = MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Tuple = pipeline(
'zero-shot-object-detection' , model='hf-internal-testing/tiny-random-owlvit-object-detection' )
__lowerCAmelCase : List[str] = [
{
'image': './tests/fixtures/tests_samples/COCO/000000039769.png',
'candidate_labels': ['cat', 'remote', 'couch'],
}
]
return object_detector, examples
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Union[str, Any] = object_detector(examples[0] , threshold=0.0 )
__lowerCAmelCase : List[str] = len(_SCREAMING_SNAKE_CASE )
self.assertGreater(_SCREAMING_SNAKE_CASE , 0 )
self.assertEqual(
_SCREAMING_SNAKE_CASE , [
{
'score': ANY(_SCREAMING_SNAKE_CASE ),
'label': ANY(_SCREAMING_SNAKE_CASE ),
'box': {'xmin': ANY(_SCREAMING_SNAKE_CASE ), 'ymin': ANY(_SCREAMING_SNAKE_CASE ), 'xmax': ANY(_SCREAMING_SNAKE_CASE ), 'ymax': ANY(_SCREAMING_SNAKE_CASE )},
}
for i in range(_SCREAMING_SNAKE_CASE )
] , )
@require_tf
@unittest.skip('Zero Shot Object Detection not implemented in TF' )
def __lowerCamelCase ( self ):
pass
@require_torch
def __lowerCamelCase ( self ):
__lowerCAmelCase : str = pipeline(
'zero-shot-object-detection' , model='hf-internal-testing/tiny-random-owlvit-object-detection' )
__lowerCAmelCase : Tuple = object_detector(
'./tests/fixtures/tests_samples/COCO/000000039769.png' , candidate_labels=['cat', 'remote', 'couch'] , threshold=0.64 , )
self.assertEqual(
nested_simplify(_SCREAMING_SNAKE_CASE , decimals=4 ) , [
{'score': 0.7235, 'label': 'cat', 'box': {'xmin': 2_04, 'ymin': 1_67, 'xmax': 2_32, 'ymax': 1_90}},
{'score': 0.7218, 'label': 'remote', 'box': {'xmin': 2_04, 'ymin': 1_67, 'xmax': 2_32, 'ymax': 1_90}},
{'score': 0.7184, 'label': 'couch', 'box': {'xmin': 2_04, 'ymin': 1_67, 'xmax': 2_32, 'ymax': 1_90}},
{'score': 0.6748, 'label': 'remote', 'box': {'xmin': 5_71, 'ymin': 83, 'xmax': 5_98, 'ymax': 1_03}},
{'score': 0.6656, 'label': 'cat', 'box': {'xmin': 5_71, 'ymin': 83, 'xmax': 5_98, 'ymax': 1_03}},
{'score': 0.6614, 'label': 'couch', 'box': {'xmin': 5_71, 'ymin': 83, 'xmax': 5_98, 'ymax': 1_03}},
{'score': 0.6456, 'label': 'remote', 'box': {'xmin': 4_94, 'ymin': 1_05, 'xmax': 5_21, 'ymax': 1_27}},
{'score': 0.642, 'label': 'remote', 'box': {'xmin': 67, 'ymin': 2_74, 'xmax': 93, 'ymax': 2_97}},
{'score': 0.6419, 'label': 'cat', 'box': {'xmin': 4_94, 'ymin': 1_05, 'xmax': 5_21, 'ymax': 1_27}},
] , )
__lowerCAmelCase : List[Any] = object_detector(
[
{
'image': './tests/fixtures/tests_samples/COCO/000000039769.png',
'candidate_labels': ['cat', 'remote', 'couch'],
}
] , threshold=0.64 , )
self.assertEqual(
nested_simplify(_SCREAMING_SNAKE_CASE , decimals=4 ) , [
[
{'score': 0.7235, 'label': 'cat', 'box': {'xmin': 2_04, 'ymin': 1_67, 'xmax': 2_32, 'ymax': 1_90}},
{'score': 0.7218, 'label': 'remote', 'box': {'xmin': 2_04, 'ymin': 1_67, 'xmax': 2_32, 'ymax': 1_90}},
{'score': 0.7184, 'label': 'couch', 'box': {'xmin': 2_04, 'ymin': 1_67, 'xmax': 2_32, 'ymax': 1_90}},
{'score': 0.6748, 'label': 'remote', 'box': {'xmin': 5_71, 'ymin': 83, 'xmax': 5_98, 'ymax': 1_03}},
{'score': 0.6656, 'label': 'cat', 'box': {'xmin': 5_71, 'ymin': 83, 'xmax': 5_98, 'ymax': 1_03}},
{'score': 0.6614, 'label': 'couch', 'box': {'xmin': 5_71, 'ymin': 83, 'xmax': 5_98, 'ymax': 1_03}},
{'score': 0.6456, 'label': 'remote', 'box': {'xmin': 4_94, 'ymin': 1_05, 'xmax': 5_21, 'ymax': 1_27}},
{'score': 0.642, 'label': 'remote', 'box': {'xmin': 67, 'ymin': 2_74, 'xmax': 93, 'ymax': 2_97}},
{'score': 0.6419, 'label': 'cat', 'box': {'xmin': 4_94, 'ymin': 1_05, 'xmax': 5_21, 'ymax': 1_27}},
]
] , )
@require_torch
@slow
def __lowerCamelCase ( self ):
__lowerCAmelCase : Union[str, Any] = pipeline('zero-shot-object-detection' )
__lowerCAmelCase : List[Any] = object_detector(
'http://images.cocodataset.org/val2017/000000039769.jpg' , candidate_labels=['cat', 'remote', 'couch'] , )
self.assertEqual(
nested_simplify(_SCREAMING_SNAKE_CASE , decimals=4 ) , [
{'score': 0.2868, 'label': 'cat', 'box': {'xmin': 3_24, 'ymin': 20, 'xmax': 6_40, 'ymax': 3_73}},
{'score': 0.277, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 72, 'xmax': 1_77, 'ymax': 1_15}},
{'score': 0.2537, 'label': 'cat', 'box': {'xmin': 1, 'ymin': 55, 'xmax': 3_15, 'ymax': 4_72}},
{'score': 0.1474, 'label': 'remote', 'box': {'xmin': 3_35, 'ymin': 74, 'xmax': 3_71, 'ymax': 1_87}},
{'score': 0.1208, 'label': 'couch', 'box': {'xmin': 4, 'ymin': 0, 'xmax': 6_42, 'ymax': 4_76}},
] , )
__lowerCAmelCase : Union[str, Any] = object_detector(
[
{
'image': 'http://images.cocodataset.org/val2017/000000039769.jpg',
'candidate_labels': ['cat', 'remote', 'couch'],
},
{
'image': 'http://images.cocodataset.org/val2017/000000039769.jpg',
'candidate_labels': ['cat', 'remote', 'couch'],
},
] , )
self.assertEqual(
nested_simplify(_SCREAMING_SNAKE_CASE , decimals=4 ) , [
[
{'score': 0.2868, 'label': 'cat', 'box': {'xmin': 3_24, 'ymin': 20, 'xmax': 6_40, 'ymax': 3_73}},
{'score': 0.277, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 72, 'xmax': 1_77, 'ymax': 1_15}},
{'score': 0.2537, 'label': 'cat', 'box': {'xmin': 1, 'ymin': 55, 'xmax': 3_15, 'ymax': 4_72}},
{'score': 0.1474, 'label': 'remote', 'box': {'xmin': 3_35, 'ymin': 74, 'xmax': 3_71, 'ymax': 1_87}},
{'score': 0.1208, 'label': 'couch', 'box': {'xmin': 4, 'ymin': 0, 'xmax': 6_42, 'ymax': 4_76}},
],
[
{'score': 0.2868, 'label': 'cat', 'box': {'xmin': 3_24, 'ymin': 20, 'xmax': 6_40, 'ymax': 3_73}},
{'score': 0.277, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 72, 'xmax': 1_77, 'ymax': 1_15}},
{'score': 0.2537, 'label': 'cat', 'box': {'xmin': 1, 'ymin': 55, 'xmax': 3_15, 'ymax': 4_72}},
{'score': 0.1474, 'label': 'remote', 'box': {'xmin': 3_35, 'ymin': 74, 'xmax': 3_71, 'ymax': 1_87}},
{'score': 0.1208, 'label': 'couch', 'box': {'xmin': 4, 'ymin': 0, 'xmax': 6_42, 'ymax': 4_76}},
],
] , )
@require_tf
@unittest.skip('Zero Shot Object Detection not implemented in TF' )
def __lowerCamelCase ( self ):
pass
@require_torch
@slow
def __lowerCamelCase ( self ):
__lowerCAmelCase : Tuple = 0.2
__lowerCAmelCase : str = pipeline('zero-shot-object-detection' )
__lowerCAmelCase : Optional[Any] = object_detector(
'http://images.cocodataset.org/val2017/000000039769.jpg' , candidate_labels=['cat', 'remote', 'couch'] , threshold=_SCREAMING_SNAKE_CASE , )
self.assertEqual(
nested_simplify(_SCREAMING_SNAKE_CASE , decimals=4 ) , [
{'score': 0.2868, 'label': 'cat', 'box': {'xmin': 3_24, 'ymin': 20, 'xmax': 6_40, 'ymax': 3_73}},
{'score': 0.277, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 72, 'xmax': 1_77, 'ymax': 1_15}},
{'score': 0.2537, 'label': 'cat', 'box': {'xmin': 1, 'ymin': 55, 'xmax': 3_15, 'ymax': 4_72}},
] , )
@require_torch
@slow
def __lowerCamelCase ( self ):
__lowerCAmelCase : Any = 2
__lowerCAmelCase : Union[str, Any] = pipeline('zero-shot-object-detection' )
__lowerCAmelCase : Union[str, Any] = object_detector(
'http://images.cocodataset.org/val2017/000000039769.jpg' , candidate_labels=['cat', 'remote', 'couch'] , top_k=_SCREAMING_SNAKE_CASE , )
self.assertEqual(
nested_simplify(_SCREAMING_SNAKE_CASE , decimals=4 ) , [
{'score': 0.2868, 'label': 'cat', 'box': {'xmin': 3_24, 'ymin': 20, 'xmax': 6_40, 'ymax': 3_73}},
{'score': 0.277, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 72, 'xmax': 1_77, 'ymax': 1_15}},
] , ) | 182 |
"""simple docstring"""
import warnings
from functools import wraps
from typing import Callable
def __lowerCAmelCase (_UpperCamelCase ):
@wraps(_UpperCamelCase )
def _inner_fn(*_UpperCamelCase , **_UpperCamelCase ):
warnings.warn(
(F"'{fn.__name__}' is experimental and might be subject to breaking changes in the future.") , _UpperCamelCase , )
return fn(*_UpperCamelCase , **_UpperCamelCase )
return _inner_fn | 182 | 1 |
from __future__ import annotations
from collections import deque
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : int , a : list[str] ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : list[dict] = []
self.adlist.append(
{"value": "", "next_states": [], "fail_state": 0, "output": []} )
for keyword in keywords:
self.add_keyword(a )
self.set_fail_transitions()
def __UpperCamelCase ( self : Tuple , a : int , a : str ) -> int | None:
"""simple docstring"""
for state in self.adlist[current_state]["next_states"]:
if char == self.adlist[state]["value"]:
return state
return None
def __UpperCamelCase ( self : List[str] , a : str ) -> None:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = 0
for character in keyword:
SCREAMING_SNAKE_CASE : Optional[int] = self.find_next_state(a , a )
if next_state is None:
self.adlist.append(
{
"value": character,
"next_states": [],
"fail_state": 0,
"output": [],
} )
self.adlist[current_state]["next_states"].append(len(self.adlist ) - 1 )
SCREAMING_SNAKE_CASE : int = len(self.adlist ) - 1
else:
SCREAMING_SNAKE_CASE : Dict = next_state
self.adlist[current_state]["output"].append(a )
def __UpperCamelCase ( self : List[Any] ) -> None:
"""simple docstring"""
SCREAMING_SNAKE_CASE : deque = deque()
for node in self.adlist[0]["next_states"]:
q.append(a )
SCREAMING_SNAKE_CASE : List[Any] = 0
while q:
SCREAMING_SNAKE_CASE : Any = q.popleft()
for child in self.adlist[r]["next_states"]:
q.append(a )
SCREAMING_SNAKE_CASE : Union[str, Any] = self.adlist[r]["fail_state"]
while (
self.find_next_state(a , self.adlist[child]["value"] ) is None
and state != 0
):
SCREAMING_SNAKE_CASE : str = self.adlist[state]["fail_state"]
SCREAMING_SNAKE_CASE : Dict = self.find_next_state(
a , self.adlist[child]["value"] )
if self.adlist[child]["fail_state"] is None:
SCREAMING_SNAKE_CASE : int = 0
SCREAMING_SNAKE_CASE : Dict = (
self.adlist[child]["output"]
+ self.adlist[self.adlist[child]["fail_state"]]["output"]
)
def __UpperCamelCase ( self : Optional[Any] , a : str ) -> dict[str, list[int]]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : dict = {} # returns a dict with keywords and list of its occurrences
SCREAMING_SNAKE_CASE : Optional[Any] = 0
for i in range(len(a ) ):
while (
self.find_next_state(a , string[i] ) is None
and current_state != 0
):
SCREAMING_SNAKE_CASE : Union[str, Any] = self.adlist[current_state]["fail_state"]
SCREAMING_SNAKE_CASE : Optional[Any] = self.find_next_state(a , string[i] )
if next_state is None:
SCREAMING_SNAKE_CASE : Optional[int] = 0
else:
SCREAMING_SNAKE_CASE : Optional[int] = next_state
for key in self.adlist[current_state]["output"]:
if key not in result:
SCREAMING_SNAKE_CASE : str = []
result[key].append(i - len(a ) + 1 )
return result
if __name__ == "__main__":
import doctest
doctest.testmod() | 76 |
'''simple docstring'''
__a = frozenset(
[
"prompt",
"height",
"width",
"guidance_scale",
"negative_prompt",
"prompt_embeds",
"negative_prompt_embeds",
"cross_attention_kwargs",
]
)
__a = frozenset(["prompt", "negative_prompt"])
__a = frozenset([])
__a = frozenset(["image"])
__a = frozenset(
[
"image",
"height",
"width",
"guidance_scale",
]
)
__a = frozenset(["image"])
__a = frozenset(
[
"prompt",
"image",
"height",
"width",
"guidance_scale",
"negative_prompt",
"prompt_embeds",
"negative_prompt_embeds",
]
)
__a = frozenset(["prompt", "image", "negative_prompt"])
__a = frozenset(
[
# Text guided image variation with an image mask
"prompt",
"image",
"mask_image",
"height",
"width",
"guidance_scale",
"negative_prompt",
"prompt_embeds",
"negative_prompt_embeds",
]
)
__a = frozenset(["prompt", "image", "mask_image", "negative_prompt"])
__a = frozenset(
[
# image variation with an image mask
"image",
"mask_image",
"height",
"width",
"guidance_scale",
]
)
__a = frozenset(["image", "mask_image"])
__a = frozenset(
[
"example_image",
"image",
"mask_image",
"height",
"width",
"guidance_scale",
]
)
__a = frozenset(["example_image", "image", "mask_image"])
__a = frozenset(["class_labels"])
__a = frozenset(["class_labels"])
__a = frozenset(["batch_size"])
__a = frozenset([])
__a = frozenset(["batch_size"])
__a = frozenset([])
__a = frozenset(
[
"prompt",
"audio_length_in_s",
"guidance_scale",
"negative_prompt",
"prompt_embeds",
"negative_prompt_embeds",
"cross_attention_kwargs",
]
)
__a = frozenset(["prompt", "negative_prompt"])
__a = frozenset(["input_tokens"])
__a = frozenset(["input_tokens"])
| 35 | 0 |
"""simple docstring"""
import sys
import turtle
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
return (pa[0] + pa[0]) / 2, (pa[1] + pa[1]) / 2
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , ):
'''simple docstring'''
my_pen.up()
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.down()
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.goto(vertexa[0] , vertexa[1] )
if depth == 0:
return
triangle(lowerCamelCase__ , get_mid(lowerCamelCase__ , lowerCamelCase__ ) , get_mid(lowerCamelCase__ , lowerCamelCase__ ) , depth - 1 )
triangle(lowerCamelCase__ , get_mid(lowerCamelCase__ , lowerCamelCase__ ) , get_mid(lowerCamelCase__ , lowerCamelCase__ ) , depth - 1 )
triangle(lowerCamelCase__ , get_mid(lowerCamelCase__ , lowerCamelCase__ ) , get_mid(lowerCamelCase__ , lowerCamelCase__ ) , depth - 1 )
if __name__ == "__main__":
if len(sys.argv) != 2:
raise ValueError(
"Correct format for using this script: "
"python fractals.py <int:depth_for_fractal>"
)
lowercase__ :Optional[int] = turtle.Turtle()
my_pen.ht()
my_pen.speed(5)
my_pen.pencolor("red")
lowercase__ :str = [(-175, -125), (0, 175), (175, -125)] # vertices of triangle
triangle(vertices[0], vertices[1], vertices[2], int(sys.argv[1]))
| 353 |
import os
from typing import BinaryIO, Optional, Union
import numpy as np
import pyarrow.parquet as pq
from .. import Audio, Dataset, Features, Image, NamedSplit, Value, config
from ..features.features import FeatureType, _visit
from ..formatting import query_table
from ..packaged_modules import _PACKAGED_DATASETS_MODULES
from ..packaged_modules.parquet.parquet import Parquet
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
lowercase = np.inf
def set_batch_size(lowerCAmelCase__ ) -> None:
nonlocal batch_size
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
lowercase = min(lowerCAmelCase__ , config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS )
elif isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
lowercase = min(lowerCAmelCase__ , config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS )
elif isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and feature.dtype == "binary":
lowercase = min(lowerCAmelCase__ , config.PARQUET_ROW_GROUP_SIZE_FOR_BINARY_DATASETS )
_visit(lowerCAmelCase__ , lowerCAmelCase__ )
return None if batch_size is np.inf else batch_size
class lowercase ( SCREAMING_SNAKE_CASE__ ):
def __init__( self ,A__ ,A__ = None ,A__ = None ,A__ = None ,A__ = False ,A__ = False ,A__ = None ,**A__ ,):
super().__init__(
A__ ,split=A__ ,features=A__ ,cache_dir=A__ ,keep_in_memory=A__ ,streaming=A__ ,num_proc=A__ ,**A__ ,)
lowercase = path_or_paths if isinstance(A__ ,A__) else {self.split: path_or_paths}
lowercase = _PACKAGED_DATASETS_MODULES['''parquet'''][1]
lowercase = Parquet(
cache_dir=A__ ,data_files=A__ ,features=A__ ,hash=A__ ,**A__ ,)
def A__ ( self):
# Build iterable dataset
if self.streaming:
lowercase = self.builder.as_streaming_dataset(split=self.split)
# Build regular (map-style) dataset
else:
lowercase = None
lowercase = None
lowercase = None
lowercase = None
self.builder.download_and_prepare(
download_config=A__ ,download_mode=A__ ,verification_mode=A__ ,base_path=A__ ,num_proc=self.num_proc ,)
lowercase = self.builder.as_dataset(
split=self.split ,verification_mode=A__ ,in_memory=self.keep_in_memory)
return dataset
class lowercase :
def __init__( self ,A__ ,A__ ,A__ = None ,**A__ ,):
lowercase = dataset
lowercase = path_or_buf
lowercase = batch_size or get_writer_batch_size(dataset.features)
lowercase = parquet_writer_kwargs
def A__ ( self):
lowercase = self.batch_size if self.batch_size else config.DEFAULT_MAX_BATCH_SIZE
if isinstance(self.path_or_buf ,(str, bytes, os.PathLike)):
with open(self.path_or_buf ,'''wb+''') as buffer:
lowercase = self._write(file_obj=A__ ,batch_size=A__ ,**self.parquet_writer_kwargs)
else:
lowercase = self._write(file_obj=self.path_or_buf ,batch_size=A__ ,**self.parquet_writer_kwargs)
return written
def A__ ( self ,A__ ,A__ ,**A__):
lowercase = 0
lowercase = parquet_writer_kwargs.pop('''path_or_buf''' ,A__)
lowercase = self.dataset.features.arrow_schema
lowercase = pq.ParquetWriter(A__ ,schema=A__ ,**A__)
for offset in logging.tqdm(
range(0 ,len(self.dataset) ,A__) ,unit='''ba''' ,disable=not logging.is_progress_bar_enabled() ,desc='''Creating parquet from Arrow format''' ,):
lowercase = query_table(
table=self.dataset._data ,key=slice(A__ ,offset + batch_size) ,indices=self.dataset._indices if self.dataset._indices is not None else None ,)
writer.write_table(A__)
written += batch.nbytes
writer.close()
return written
| 97 | 0 |
"""simple docstring"""
A = {}
def __A ( a_ :List[str] , a_ :str , a_ :List[str]) -> Dict:
if late == 3 or absent == 2:
return 0
# if we have no days left, and have not failed any other rules,
# we have a prize string
if days == 0:
return 1
# No easy solution, so now we need to do the recursive calculation
# First, check if the combination is already in the cache, and
# if yes, return the stored value from there since we already
# know the number of possible prize strings from this point on
__a : Union[str, Any] = (days, absent, late)
if key in cache:
return cache[key]
# now we calculate the three possible ways that can unfold from
# this point on, depending on our attendance today
# 1) if we are late (but not absent), the "absent" counter stays as
# it is, but the "late" counter increases by one
__a : Optional[int] = _calculate(days - 1 , a__ , late + 1)
# 2) if we are absent, the "absent" counter increases by 1, and the
# "late" counter resets to 0
__a : int = _calculate(days - 1 , absent + 1 , 0)
# 3) if we are on time, this resets the "late" counter and keeps the
# absent counter
__a : Union[str, Any] = _calculate(days - 1 , a__ , 0)
__a : Dict = state_late + state_absent + state_ontime
__a : str = prizestrings
return prizestrings
def __A ( a_ :List[Any] = 30) -> Tuple:
return _calculate(a__ , absent=0 , late=0)
if __name__ == "__main__":
print(solution()) | 160 |
from typing import Any
class A__:
"""simple docstring"""
def __init__( self , _lowercase ) -> List[str]:
a_ : List[str] = data
a_ : Optional[int] = None
def __repr__( self ) -> str:
return F'''Node({self.data})'''
class A__:
"""simple docstring"""
def __init__( self ) -> Optional[Any]:
a_ : Dict = None
def __iter__( self ) -> Any:
a_ : Optional[Any] = self.head
while node:
yield node.data
a_ : Union[str, Any] = node.next
def __len__( self ) -> int:
return sum(1 for _ in self )
def __repr__( self ) -> str:
return "->".join([str(_lowercase ) for item in self] )
def __getitem__( self , _lowercase ) -> Any:
if not 0 <= index < len(self ):
raise ValueError("""list index out of range.""" )
for i, node in enumerate(self ):
if i == index:
return node
return None
def __setitem__( self , _lowercase , _lowercase ) -> None:
if not 0 <= index < len(self ):
raise ValueError("""list index out of range.""" )
a_ : Optional[Any] = self.head
for _ in range(_lowercase ):
a_ : List[str] = current.next
a_ : Any = data
def UpperCamelCase__ ( self , _lowercase ) -> None:
self.insert_nth(len(self ) , _lowercase )
def UpperCamelCase__ ( self , _lowercase ) -> None:
self.insert_nth(0 , _lowercase )
def UpperCamelCase__ ( self , _lowercase , _lowercase ) -> None:
if not 0 <= index <= len(self ):
raise IndexError("""list index out of range""" )
a_ : Optional[int] = Node(_lowercase )
if self.head is None:
a_ : int = new_node
elif index == 0:
a_ : List[Any] = self.head # link new_node to head
a_ : Any = new_node
else:
a_ : Optional[int] = self.head
for _ in range(index - 1 ):
a_ : Optional[int] = temp.next
a_ : Optional[int] = temp.next
a_ : int = new_node
def UpperCamelCase__ ( self ) -> None: # print every node data
print(self )
def UpperCamelCase__ ( self ) -> Any:
return self.delete_nth(0 )
def UpperCamelCase__ ( self ) -> Any: # delete from tail
return self.delete_nth(len(self ) - 1 )
def UpperCamelCase__ ( self , _lowercase = 0 ) -> Any:
if not 0 <= index <= len(self ) - 1: # test if index is valid
raise IndexError("""List index out of range.""" )
a_ : Optional[int] = self.head # default first node
if index == 0:
a_ : List[Any] = self.head.next
else:
a_ : List[Any] = self.head
for _ in range(index - 1 ):
a_ : List[Any] = temp.next
a_ : Any = temp.next
a_ : Any = temp.next.next
return delete_node.data
def UpperCamelCase__ ( self ) -> bool:
return self.head is None
def UpperCamelCase__ ( self ) -> None:
a_ : Any = None
a_ : Union[str, Any] = self.head
while current:
# Store the current node's next node.
a_ : Dict = current.next
# Make the current node's next point backwards
a_ : Optional[Any] = prev
# Make the previous node be the current node
a_ : Optional[int] = current
# Make the current node the next node (to progress iteration)
a_ : List[str] = next_node
# Return prev in order to put the head at the end
a_ : Dict = prev
def _UpperCAmelCase ( ):
'''simple docstring'''
a_ : Union[str, Any] = LinkedList()
assert linked_list.is_empty() is True
assert str(a__) == ""
try:
linked_list.delete_head()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
try:
linked_list.delete_tail()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
for i in range(1_0):
assert len(a__) == i
linked_list.insert_nth(a__ , i + 1)
assert str(a__) == "->".join(str(a__) for i in range(1 , 1_1))
linked_list.insert_head(0)
linked_list.insert_tail(1_1)
assert str(a__) == "->".join(str(a__) for i in range(0 , 1_2))
assert linked_list.delete_head() == 0
assert linked_list.delete_nth(9) == 1_0
assert linked_list.delete_tail() == 1_1
assert len(a__) == 9
assert str(a__) == "->".join(str(a__) for i in range(1 , 1_0))
assert all(linked_list[i] == i + 1 for i in range(0 , 9)) is True
for i in range(0 , 9):
a_ : Dict = -i
assert all(linked_list[i] == -i for i in range(0 , 9)) is True
linked_list.reverse()
assert str(a__) == "->".join(str(a__) for i in range(-8 , 1))
def _UpperCAmelCase ( ):
'''simple docstring'''
a_ : int = [
-9,
1_0_0,
Node(7_7_3_4_5_1_1_2),
"""dlrow olleH""",
7,
5_5_5_5,
0,
-192.5_5555,
"""Hello, world!""",
77.9,
Node(1_0),
None,
None,
12.20,
]
a_ : Optional[int] = LinkedList()
for i in test_input:
linked_list.insert_tail(a__)
# Check if it's empty or not
assert linked_list.is_empty() is False
assert (
str(a__) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->"
"-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the head
a_ : Union[str, Any] = linked_list.delete_head()
assert result == -9
assert (
str(a__) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the tail
a_ : Any = linked_list.delete_tail()
assert result == 12.2
assert (
str(a__) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None"
)
# Delete a node in specific location in linked list
a_ : List[Any] = linked_list.delete_nth(1_0)
assert result is None
assert (
str(a__) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None"
)
# Add a Node instance to its head
linked_list.insert_head(Node("""Hello again, world!"""))
assert (
str(a__)
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None"
)
# Add None to its tail
linked_list.insert_tail(a__)
assert (
str(a__)
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None"
)
# Reverse the linked list
linked_list.reverse()
assert (
str(a__)
== "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->"
"7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)"
)
def _UpperCAmelCase ( ):
'''simple docstring'''
from doctest import testmod
testmod()
a_ : List[Any] = LinkedList()
linked_list.insert_head(input("""Inserting 1st at head """).strip())
linked_list.insert_head(input("""Inserting 2nd at head """).strip())
print("""\nPrint list:""")
linked_list.print_list()
linked_list.insert_tail(input("""\nInserting 1st at tail """).strip())
linked_list.insert_tail(input("""Inserting 2nd at tail """).strip())
print("""\nPrint list:""")
linked_list.print_list()
print("""\nDelete head""")
linked_list.delete_head()
print("""Delete tail""")
linked_list.delete_tail()
print("""\nPrint list:""")
linked_list.print_list()
print("""\nReverse linked list""")
linked_list.reverse()
print("""\nPrint list:""")
linked_list.print_list()
print("""\nString representation of linked list:""")
print(a__)
print("""\nReading/changing Node data using indexing:""")
print(f'''Element at Position 1: {linked_list[1]}''')
a_ : List[Any] = input("""Enter New Value: """).strip()
print("""New list:""")
print(a__)
print(f'''length of linked_list is : {len(a__)}''')
if __name__ == "__main__":
main()
| 248 | 0 |
"""simple docstring"""
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class UpperCAmelCase_ ( A_ ):
lowercase__ = ''''''
lowercase__ = '''hf-legacy''' # "hf://"" is reserved for hffs
def __init__( self : List[Any] , snake_case_ : Optional[DatasetInfo] = None , snake_case_ : Optional[str] = None , **snake_case_ : Dict , ) -> str:
'''simple docstring'''
super().__init__(self , **snake_case_ )
A__ = repo_info
A__ = token
A__ = None
def __magic_name__ ( self : Tuple ) -> Any:
'''simple docstring'''
if self.dir_cache is None:
A__ = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
A__ = {
"name": hf_file.rfilename,
"size": None,
"type": "file",
}
self.dir_cache.update(
{
str(snake_case_ ): {"name": str(snake_case_ ), "size": None, "type": "directory"}
for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1]
} )
def __magic_name__ ( self : List[Any] , snake_case_ : str , snake_case_ : str = "rb" , **snake_case_ : str , ) -> Union[str, Any]:
'''simple docstring'''
if not isinstance(self.repo_info , snake_case_ ):
raise NotImplementedError(F"""Open is only implemented for dataset repositories, but got {self.repo_info}""" )
A__ = hf_hub_url(self.repo_info.id , snake_case_ , revision=self.repo_info.sha )
return fsspec.open(
snake_case_ , mode=snake_case_ , headers=get_authentication_headers_for_url(snake_case_ , use_auth_token=self.token ) , client_kwargs={"trust_env": True} , ).open()
def __magic_name__ ( self : Union[str, Any] , snake_case_ : Dict , **snake_case_ : Dict ) -> Dict:
'''simple docstring'''
self._get_dirs()
A__ = self._strip_protocol(snake_case_ )
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(snake_case_ )
def __magic_name__ ( self : str , snake_case_ : str , snake_case_ : Optional[Any]=False , **snake_case_ : List[str] ) -> str:
'''simple docstring'''
self._get_dirs()
A__ = PurePosixPath(path.strip("/" ) )
A__ = {}
for p, f in self.dir_cache.items():
A__ = PurePosixPath(p.strip("/" ) )
A__ = p.parent
if root == path:
A__ = f
A__ = list(paths.values() )
if detail:
return out
else:
return sorted(f["name"] for f in out )
| 230 |
"""simple docstring"""
from argparse import ArgumentParser
from .env import EnvironmentCommand
def _SCREAMING_SNAKE_CASE ( ) -> List[Any]:
A__ = ArgumentParser("Diffusers CLI tool" , usage="diffusers-cli <command> [<args>]" )
A__ = parser.add_subparsers(help="diffusers-cli command helpers" )
# Register commands
EnvironmentCommand.register_subcommand(lowercase_ )
# Let's go
A__ = parser.parse_args()
if not hasattr(lowercase_ , "func" ):
parser.print_help()
exit(1 )
# Run
A__ = args.func(lowercase_ )
service.run()
if __name__ == "__main__":
main()
| 230 | 1 |
"""simple docstring"""
import argparse
import shutil
import time
from json import JSONDecodeError
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import (
SeqaSeqDataset,
calculate_bleu,
calculate_rouge,
chunks,
lmap,
load_json,
parse_numeric_n_bool_cl_kwargs,
save_json,
use_task_specific_params,
write_txt_file,
)
SCREAMING_SNAKE_CASE : List[str] = getLogger(__name__)
def lowercase ( _snake_case : Tuple , _snake_case : str , _snake_case : str , _snake_case : int = 8 , _snake_case : int = 1_024 , _snake_case : Any="val" , _snake_case : Tuple=None , _snake_case : Any=False , _snake_case : str="summarization" , _snake_case : Dict=None , _snake_case : Optional[Any]=1 , _snake_case : Dict = None , _snake_case : List[Any]="" , **_snake_case : int , ) ->Dict:
"""simple docstring"""
__snake_case : int = str(_snake_case )
assert local_rank is not None
torch.distributed.init_process_group(backend='''nccl''' , rank=_snake_case )
__snake_case : Optional[Any] = Path(_snake_case )
__snake_case : str = save_dir.joinpath(f"""rank_{local_rank}_output.json""" )
torch.cuda.set_device(_snake_case )
__snake_case : Tuple = AutoModelForSeqaSeqLM.from_pretrained(_snake_case ).cuda()
if fpaa:
__snake_case : List[str] = model.half()
# determine if we need to increase num_beams
use_task_specific_params(_snake_case , _snake_case ) # update config with task specific params
__snake_case : Dict = generate_kwargs.pop('''num_beams''' , model.config.num_beams ) # AttributeError risk?
if num_return_sequences > num_beams:
__snake_case : Optional[Any] = num_return_sequences
__snake_case : Dict = AutoTokenizer.from_pretrained(_snake_case )
logger.info(f"""Inferred tokenizer type: {tokenizer.__class__}""" ) # if this is wrong, check config.model_type.
if max_source_length is None:
__snake_case : List[str] = tokenizer.model_max_length
if prefix is None:
__snake_case : List[str] = prefix or getattr(model.config , '''prefix''' , '''''' ) or ''''''
__snake_case : List[str] = SeqaSeqDataset(
_snake_case , _snake_case , _snake_case , max_target_length=1_024 , type_path=_snake_case , n_obs=_snake_case , prefix=_snake_case , **_snake_case , )
# I set shuffle=True for a more accurate progress bar.
# If all the longest samples are first, the prog bar estimate is too high at the beginning.
__snake_case : Union[str, Any] = ds.make_sortish_sampler(_snake_case , distributed=_snake_case , add_extra_examples=_snake_case , shuffle=_snake_case )
__snake_case : List[Any] = DataLoader(_snake_case , sampler=_snake_case , batch_size=_snake_case , collate_fn=ds.collate_fn )
__snake_case : Union[str, Any] = []
for batch in tqdm(_snake_case ):
__snake_case : Tuple = model.generate(
input_ids=batch['''input_ids'''].to(model.device ) , attention_mask=batch['''attention_mask'''].to(model.device ) , num_return_sequences=_snake_case , num_beams=_snake_case , **_snake_case , )
__snake_case : List[Any] = tokenizer.batch_decode(_snake_case , skip_special_tokens=_snake_case , clean_up_tokenization_spaces=_snake_case )
__snake_case : List[str] = batch['''ids''']
if num_return_sequences > 1:
__snake_case : Dict = chunks(_snake_case , _snake_case ) # batch size chunks, each of size num_return_seq
for i, pred in enumerate(_snake_case ):
results.append({'''pred''': pred, '''id''': ids[i].item()} )
save_json(_snake_case , _snake_case )
return results, sampler.num_replicas
def lowercase ( ) ->int:
"""simple docstring"""
__snake_case : Any = argparse.ArgumentParser(
epilog='''Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate''' )
parser.add_argument('''--data_dir''' , type=_snake_case , help='''like cnn_dm/test.source''' )
parser.add_argument(
'''--model_name''' , type=_snake_case , help='''like facebook/bart-large-cnn,t5-base, etc.''' , default='''sshleifer/distilbart-xsum-12-3''' , )
parser.add_argument('''--save_dir''' , type=_snake_case , help='''where to save''' , default='''tmp_gen''' )
parser.add_argument('''--max_source_length''' , type=_snake_case , default=_snake_case )
parser.add_argument(
'''--type_path''' , type=_snake_case , default='''test''' , help='''which subset to evaluate typically train/val/test''' )
parser.add_argument('''--task''' , type=_snake_case , default='''summarization''' , help='''used for task_specific_params + metrics''' )
parser.add_argument('''--bs''' , type=_snake_case , default=8 , required=_snake_case , help='''batch size''' )
parser.add_argument(
'''--local_rank''' , type=_snake_case , default=-1 , required=_snake_case , help='''should be passed by distributed.launch''' )
parser.add_argument(
'''--n_obs''' , type=_snake_case , default=_snake_case , required=_snake_case , help='''How many observations. Defaults to all.''' )
parser.add_argument(
'''--num_return_sequences''' , type=_snake_case , default=1 , required=_snake_case , help='''How many sequences to return''' )
parser.add_argument(
'''--sync_timeout''' , type=_snake_case , default=600 , required=_snake_case , help='''How long should master process wait for other processes to finish.''' , )
parser.add_argument('''--src_lang''' , type=_snake_case , default=_snake_case , required=_snake_case )
parser.add_argument('''--tgt_lang''' , type=_snake_case , default=_snake_case , required=_snake_case )
parser.add_argument(
'''--prefix''' , type=_snake_case , required=_snake_case , default=_snake_case , help='''will be added to the begininng of src examples''' )
parser.add_argument('''--fp16''' , action='''store_true''' )
parser.add_argument('''--debug''' , action='''store_true''' )
__snake_case : str = time.time()
__snake_case , __snake_case : Any = parser.parse_known_args()
__snake_case : List[Any] = parse_numeric_n_bool_cl_kwargs(_snake_case )
if generate_kwargs and args.local_rank <= 0:
print(f"""parsed the following generate kwargs: {generate_kwargs}""" )
__snake_case : List[Any] = Path(args.save_dir + '''_tmp''' )
Path(_snake_case ).mkdir(exist_ok=_snake_case ) # this handles locking.
__snake_case : Optional[int] = list(json_save_dir.glob('''rank_*.json''' ) )
if intermediate_files:
raise ValueError(f"""Found files at {json_save_dir} please move or remove them.""" )
# In theory, a node could finish and save before another node hits this. If this happens, we can address later.
__snake_case : Dict = {}
if args.src_lang is not None:
__snake_case : Dict = args.src_lang
if args.tgt_lang is not None:
__snake_case : Dict = args.tgt_lang
Path(args.save_dir ).mkdir(exist_ok=_snake_case )
__snake_case , __snake_case : List[Any] = eval_data_dir(
args.data_dir , _snake_case , args.model_name , type_path=args.type_path , bs=args.bs , fpaa=args.fpaa , task=args.task , local_rank=args.local_rank , n_obs=args.n_obs , max_source_length=args.max_source_length , num_return_sequences=args.num_return_sequences , prefix=args.prefix , dataset_kwargs=_snake_case , **_snake_case , )
if args.local_rank <= 0:
__snake_case : int = Path(args.save_dir )
save_dir.mkdir(exist_ok=_snake_case )
__snake_case : Optional[Any] = gather_results_from_each_node(_snake_case , _snake_case , args.sync_timeout )
__snake_case : str = combine_partial_results(_snake_case )
if args.num_return_sequences > 1:
__snake_case : List[Any] = save_dir.joinpath('''pseudolabel_results.json''' )
print(f"""Saving aggregated results at {save_path}, intermediate in {json_save_dir}/""" )
save_json(_snake_case , _snake_case )
return
__snake_case : Tuple = Path(args.data_dir ).joinpath(args.type_path + '''.target''' )
with open(_snake_case ) as f:
__snake_case : Optional[Any] = [x.rstrip() for x in f.readlines()][: len(_snake_case )]
# Calculate metrics, save metrics, and save _generations.txt
__snake_case : List[str] = '''translation''' in args.task
__snake_case : List[Any] = calculate_bleu if calc_bleu else calculate_rouge
__snake_case : Dict = '''bleu''' if calc_bleu else '''rouge'''
__snake_case : Dict = score_fn(_snake_case , _snake_case )
__snake_case : int = len(_snake_case )
__snake_case : Dict = time.time() - start_time
__snake_case : Optional[Any] = round(runtime / metrics['''n_obs'''] , 4 )
__snake_case : List[Any] = num_replicas
# TODO(@stas00): add whatever metadata to metrics
__snake_case : int = save_dir.joinpath(f"""{args.type_path}_{metric_name}.json""" )
save_json(_snake_case , _snake_case , indent=_snake_case )
print(_snake_case )
write_txt_file(_snake_case , save_dir.joinpath(f"""{args.type_path}_generations.txt""" ) )
if args.debug:
write_txt_file(_snake_case , save_dir.joinpath(f"""{args.type_path}.target""" ) )
else:
shutil.rmtree(_snake_case )
def lowercase ( _snake_case : Union[str, Any] ) ->List:
"""simple docstring"""
__snake_case : List[Any] = []
for partial_result in partial_results:
records.extend(_snake_case )
__snake_case : List[str] = sorted(_snake_case , key=lambda _snake_case : x["id"] )
__snake_case : Tuple = [x['''pred'''] for x in records]
return preds
def lowercase ( _snake_case : int , _snake_case : List[str] , _snake_case : List[Any] ) ->List[Dict[str, List]]:
"""simple docstring"""
__snake_case : List[str] = time.time()
logger.info('''waiting for all nodes to finish''' )
__snake_case : List[str] = None
while (time.time() - start_wait) < timeout:
__snake_case : Any = list(save_dir.glob('''rank_*.json''' ) )
if len(_snake_case ) < num_replicas:
continue
try:
# make sure all json files are fully saved
__snake_case : Tuple = lmap(_snake_case , _snake_case )
return json_data
except JSONDecodeError:
continue
else:
raise TimeoutError('''Rank 0 gave up on waiting for other processes''' )
# Unreachable
if __name__ == "__main__":
# Usage for MT:
run_generate()
| 102 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"facebook/xmod-base": "https://huggingface.co/facebook/xmod-base/resolve/main/config.json",
"facebook/xmod-large-prenorm": "https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json",
"facebook/xmod-base-13-125k": "https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json",
"facebook/xmod-base-30-125k": "https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json",
"facebook/xmod-base-30-195k": "https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json",
"facebook/xmod-base-60-125k": "https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json",
"facebook/xmod-base-60-265k": "https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json",
"facebook/xmod-base-75-125k": "https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json",
"facebook/xmod-base-75-269k": "https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json",
}
class __A ( A ):
'''simple docstring'''
__lowerCamelCase : Any = 'xmod'
def __init__(self , A=30_522 , A=768 , A=12 , A=12 , A=3_072 , A="gelu" , A=0.1 , A=0.1 , A=512 , A=2 , A=0.02 , A=1E-12 , A=1 , A=0 , A=2 , A="absolute" , A=True , A=None , A=False , A=2 , A=False , A=True , A=True , A=("en_XX",) , A=None , **A , ) -> List[str]:
"""simple docstring"""
super().__init__(pad_token_id=A , bos_token_id=A , eos_token_id=A , **A )
_a = vocab_size
_a = hidden_size
_a = num_hidden_layers
_a = num_attention_heads
_a = hidden_act
_a = intermediate_size
_a = hidden_dropout_prob
_a = attention_probs_dropout_prob
_a = max_position_embeddings
_a = type_vocab_size
_a = initializer_range
_a = layer_norm_eps
_a = position_embedding_type
_a = use_cache
_a = classifier_dropout
_a = pre_norm
_a = adapter_reduction_factor
_a = adapter_layer_norm
_a = adapter_reuse_layer_norm
_a = ln_before_adapter
_a = list(A )
_a = default_language
class __A ( A ):
'''simple docstring'''
@property
def a__ (self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
_a = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
_a = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 211 | 0 |
"""simple docstring"""
import inspect
import unittest
import warnings
from transformers import DeiTConfig
from transformers.models.auto import get_values
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
)
from transformers.models.deit.modeling_deit import DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self : Union[str, Any] ,lowercase_ : Dict ,lowercase_ : Any=1_3 ,lowercase_ : Optional[int]=3_0 ,lowercase_ : Optional[int]=2 ,lowercase_ : Any=3 ,lowercase_ : List[str]=True ,lowercase_ : List[Any]=True ,lowercase_ : Tuple=3_2 ,lowercase_ : Any=5 ,lowercase_ : int=4 ,lowercase_ : Union[str, Any]=3_7 ,lowercase_ : Tuple="gelu" ,lowercase_ : List[str]=0.1 ,lowercase_ : Dict=0.1 ,lowercase_ : str=1_0 ,lowercase_ : Tuple=0.02 ,lowercase_ : Dict=3 ,lowercase_ : Any=None ,lowercase_ : Optional[Any]=2 ,):
lowerCAmelCase__ : int = parent
lowerCAmelCase__ : str = batch_size
lowerCAmelCase__ : Any = image_size
lowerCAmelCase__ : List[Any] = patch_size
lowerCAmelCase__ : List[str] = num_channels
lowerCAmelCase__ : int = is_training
lowerCAmelCase__ : Dict = use_labels
lowerCAmelCase__ : Dict = hidden_size
lowerCAmelCase__ : Tuple = num_hidden_layers
lowerCAmelCase__ : List[Any] = num_attention_heads
lowerCAmelCase__ : Dict = intermediate_size
lowerCAmelCase__ : int = hidden_act
lowerCAmelCase__ : Tuple = hidden_dropout_prob
lowerCAmelCase__ : Tuple = attention_probs_dropout_prob
lowerCAmelCase__ : Any = type_sequence_label_size
lowerCAmelCase__ : Union[str, Any] = initializer_range
lowerCAmelCase__ : Optional[int] = scope
lowerCAmelCase__ : Optional[int] = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
lowerCAmelCase__ : Dict = (image_size // patch_size) ** 2
lowerCAmelCase__ : Tuple = num_patches + 2
def __lowerCAmelCase ( self : int ):
lowerCAmelCase__ : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase__ : Optional[Any] = None
if self.use_labels:
lowerCAmelCase__ : Dict = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
lowerCAmelCase__ : List[Any] = self.get_config()
return config, pixel_values, labels
def __lowerCAmelCase ( self : Optional[Any] ):
return DeiTConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=lowercase_ ,initializer_range=self.initializer_range ,encoder_stride=self.encoder_stride ,)
def __lowerCAmelCase ( self : int ,lowercase_ : Any ,lowercase_ : int ,lowercase_ : Any ):
lowerCAmelCase__ : Optional[int] = DeiTModel(config=lowercase_ )
model.to(lowercase_ )
model.eval()
lowerCAmelCase__ : Tuple = model(lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCAmelCase ( self : Tuple ,lowercase_ : Tuple ,lowercase_ : Union[str, Any] ,lowercase_ : str ):
lowerCAmelCase__ : List[Any] = DeiTForMaskedImageModeling(config=lowercase_ )
model.to(lowercase_ )
model.eval()
lowerCAmelCase__ : Optional[Any] = model(lowercase_ )
self.parent.assertEqual(
result.reconstruction.shape ,(self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
lowerCAmelCase__ : Union[str, Any] = 1
lowerCAmelCase__ : Dict = DeiTForMaskedImageModeling(lowercase_ )
model.to(lowercase_ )
model.eval()
lowerCAmelCase__ : int = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCAmelCase__ : int = model(lowercase_ )
self.parent.assertEqual(result.reconstruction.shape ,(self.batch_size, 1, self.image_size, self.image_size) )
def __lowerCAmelCase ( self : Optional[int] ,lowercase_ : Optional[Any] ,lowercase_ : str ,lowercase_ : Union[str, Any] ):
lowerCAmelCase__ : List[str] = self.type_sequence_label_size
lowerCAmelCase__ : Any = DeiTForImageClassification(lowercase_ )
model.to(lowercase_ )
model.eval()
lowerCAmelCase__ : Optional[int] = model(lowercase_ ,labels=lowercase_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowerCAmelCase__ : List[Any] = 1
lowerCAmelCase__ : Optional[Any] = DeiTForImageClassification(lowercase_ )
model.to(lowercase_ )
model.eval()
lowerCAmelCase__ : List[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCAmelCase__ : int = model(lowercase_ ,labels=lowercase_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
def __lowerCAmelCase ( self : Dict ):
lowerCAmelCase__ : List[str] = self.prepare_config_and_inputs()
(
(
lowerCAmelCase__
) ,(
lowerCAmelCase__
) ,(
lowerCAmelCase__
) ,
) : Any = config_and_inputs
lowerCAmelCase__ : Dict = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE ( a_ , a_ , unittest.TestCase ):
"""simple docstring"""
lowercase__ = (
(
DeiTModel,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
lowercase__ = (
{
"feature-extraction": DeiTModel,
"image-classification": (DeiTForImageClassification, DeiTForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
lowercase__ = False
lowercase__ = False
lowercase__ = False
def __lowerCAmelCase ( self : Optional[Any] ):
lowerCAmelCase__ : Optional[Any] = DeiTModelTester(self )
lowerCAmelCase__ : str = ConfigTester(self ,config_class=lowercase_ ,has_text_modality=lowercase_ ,hidden_size=3_7 )
def __lowerCAmelCase ( self : Union[str, Any] ):
self.config_tester.run_common_tests()
@unittest.skip(reason='''DeiT does not use inputs_embeds''' )
def __lowerCAmelCase ( self : Union[str, Any] ):
pass
def __lowerCAmelCase ( self : List[Any] ):
lowerCAmelCase__ ,lowerCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ : List[Any] = model_class(lowercase_ )
self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) )
lowerCAmelCase__ : Union[str, Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowercase_ ,nn.Linear ) )
def __lowerCAmelCase ( self : str ):
lowerCAmelCase__ ,lowerCAmelCase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ : Any = model_class(lowercase_ )
lowerCAmelCase__ : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase__ : Optional[Any] = [*signature.parameters.keys()]
lowerCAmelCase__ : Any = ['''pixel_values''']
self.assertListEqual(arg_names[:1] ,lowercase_ )
def __lowerCAmelCase ( self : Tuple ):
lowerCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_ )
def __lowerCAmelCase ( self : Tuple ):
lowerCAmelCase__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowercase_ )
def __lowerCAmelCase ( self : Dict ):
lowerCAmelCase__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase_ )
def __lowerCAmelCase ( self : Tuple ,lowercase_ : Optional[Any] ,lowercase_ : Optional[int] ,lowercase_ : Dict=False ):
lowerCAmelCase__ : Optional[int] = super()._prepare_for_class(lowercase_ ,lowercase_ ,return_labels=lowercase_ )
if return_labels:
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def __lowerCAmelCase ( self : Any ):
if not self.model_tester.is_training:
return
lowerCAmelCase__ ,lowerCAmelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ : Dict = True
for model_class in self.all_model_classes:
# DeiTForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(lowercase_ )
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
lowerCAmelCase__ : Optional[Any] = model_class(lowercase_ )
model.to(lowercase_ )
model.train()
lowerCAmelCase__ : Dict = self._prepare_for_class(lowercase_ ,lowercase_ ,return_labels=lowercase_ )
lowerCAmelCase__ : Optional[int] = model(**lowercase_ ).loss
loss.backward()
def __lowerCAmelCase ( self : List[Any] ):
lowerCAmelCase__ ,lowerCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
lowerCAmelCase__ : List[Any] = False
lowerCAmelCase__ : List[Any] = True
for model_class in self.all_model_classes:
if model_class in get_values(lowercase_ ) or not model_class.supports_gradient_checkpointing:
continue
# DeiTForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
continue
lowerCAmelCase__ : List[Any] = model_class(lowercase_ )
model.gradient_checkpointing_enable()
model.to(lowercase_ )
model.train()
lowerCAmelCase__ : Optional[int] = self._prepare_for_class(lowercase_ ,lowercase_ ,return_labels=lowercase_ )
lowerCAmelCase__ : Optional[int] = model(**lowercase_ ).loss
loss.backward()
def __lowerCAmelCase ( self : Tuple ):
lowerCAmelCase__ ,lowerCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ : int = [
{'''title''': '''multi_label_classification''', '''num_labels''': 2, '''dtype''': torch.float},
{'''title''': '''single_label_classification''', '''num_labels''': 1, '''dtype''': torch.long},
{'''title''': '''regression''', '''num_labels''': 1, '''dtype''': torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(lowercase_ ),
*get_values(lowercase_ ),
]
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=F'Testing {model_class} with {problem_type["title"]}' ):
lowerCAmelCase__ : str = problem_type['''title''']
lowerCAmelCase__ : Tuple = problem_type['''num_labels''']
lowerCAmelCase__ : Any = model_class(lowercase_ )
model.to(lowercase_ )
model.train()
lowerCAmelCase__ : Optional[Any] = self._prepare_for_class(lowercase_ ,lowercase_ ,return_labels=lowercase_ )
if problem_type["num_labels"] > 1:
lowerCAmelCase__ : int = inputs['''labels'''].unsqueeze(1 ).repeat(1 ,problem_type['''num_labels'''] )
lowerCAmelCase__ : Optional[int] = inputs['''labels'''].to(problem_type['''dtype'''] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=lowercase_ ) as warning_list:
lowerCAmelCase__ : Dict = model(**lowercase_ ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
F'Something is going wrong in the regression problem: intercepted {w.message}' )
loss.backward()
@slow
def __lowerCAmelCase ( self : List[str] ):
for model_name in DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase__ : Union[str, Any] = DeiTModel.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
def __SCREAMING_SNAKE_CASE ( ):
lowerCAmelCase__ : Tuple = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __lowerCAmelCase ( self : Any ):
return (
DeiTImageProcessor.from_pretrained('''facebook/deit-base-distilled-patch16-224''' )
if is_vision_available()
else None
)
@slow
def __lowerCAmelCase ( self : Any ):
lowerCAmelCase__ : Union[str, Any] = DeiTForImageClassificationWithTeacher.from_pretrained('''facebook/deit-base-distilled-patch16-224''' ).to(
lowercase_ )
lowerCAmelCase__ : Optional[Any] = self.default_image_processor
lowerCAmelCase__ : Dict = prepare_img()
lowerCAmelCase__ : Tuple = image_processor(images=lowercase_ ,return_tensors='''pt''' ).to(lowercase_ )
# forward pass
with torch.no_grad():
lowerCAmelCase__ : Tuple = model(**lowercase_ )
# verify the logits
lowerCAmelCase__ : Optional[Any] = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape ,lowercase_ )
lowerCAmelCase__ : Dict = torch.tensor([-1.0266, 0.1912, -1.2861] ).to(lowercase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,lowercase_ ,atol=1E-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def __lowerCAmelCase ( self : Dict ):
lowerCAmelCase__ : Optional[int] = DeiTModel.from_pretrained(
'''facebook/deit-base-distilled-patch16-224''' ,torch_dtype=torch.floataa ,device_map='''auto''' )
lowerCAmelCase__ : Optional[Any] = self.default_image_processor
lowerCAmelCase__ : List[Any] = prepare_img()
lowerCAmelCase__ : int = image_processor(images=lowercase_ ,return_tensors='''pt''' )
lowerCAmelCase__ : Tuple = inputs.pixel_values.to(lowercase_ )
# forward pass to make sure inference works in fp16
with torch.no_grad():
lowerCAmelCase__ : Tuple = model(lowercase_ )
| 74 |
"""simple docstring"""
import webbrowser
from sys import argv
from urllib.parse import parse_qs, quote
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
__UpperCamelCase : int = '''%20'''.join(argv[1:]) if len(argv) > 1 else quote(str(input('''Search: ''')))
print('''Googling.....''')
__UpperCamelCase : Dict = F'''https://www.google.com/search?q={query}&num=100'''
__UpperCamelCase : Tuple = requests.get(
url,
headers={'''User-Agent''': str(UserAgent().random)},
)
try:
__UpperCamelCase : Tuple = (
BeautifulSoup(res.text, '''html.parser''')
.find('''div''', attrs={'''class''': '''yuRUbf'''})
.find('''a''')
.get('''href''')
)
except AttributeError:
__UpperCamelCase : Optional[Any] = parse_qs(
BeautifulSoup(res.text, '''html.parser''')
.find('''div''', attrs={'''class''': '''kCrYT'''})
.find('''a''')
.get('''href''')
)['''url'''][0]
webbrowser.open(link)
| 74 | 1 |
'''simple docstring'''
import inspect
import tempfile
from collections import OrderedDict, UserDict
from collections.abc import MutableMapping
from contextlib import ExitStack, contextmanager
from dataclasses import fields
from enum import Enum
from typing import Any, ContextManager, List, Tuple
import numpy as np
from .import_utils import is_flax_available, is_tf_available, is_torch_available, is_torch_fx_proxy
if is_flax_available():
import jax.numpy as jnp
class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
def __get__( self ,__UpperCAmelCase ,__UpperCAmelCase=None ) -> Union[str, Any]:
# See docs.python.org/3/howto/descriptor.html#properties
if obj is None:
return self
if self.fget is None:
raise AttributeError("""unreadable attribute""" )
lowerCAmelCase__ : Optional[Any] = """__cached_""" + self.fget.__name__
lowerCAmelCase__ : Union[str, Any] = getattr(__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase )
if cached is None:
lowerCAmelCase__ : List[str] = self.fget(__UpperCAmelCase )
setattr(__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase )
return cached
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ : List[Any] = val.lower()
if val in {"y", "yes", "t", "true", "on", "1"}:
return 1
if val in {"n", "no", "f", "false", "off", "0"}:
return 0
raise ValueError(f"""invalid truth value {val!r}""" )
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
if is_torch_fx_proxy(UpperCamelCase ):
return True
if is_torch_available():
import torch
if isinstance(UpperCamelCase , torch.Tensor ):
return True
if is_tf_available():
import tensorflow as tf
if isinstance(UpperCamelCase , tf.Tensor ):
return True
if is_flax_available():
import jax.numpy as jnp
from jax.core import Tracer
if isinstance(UpperCamelCase , (jnp.ndarray, Tracer) ):
return True
return isinstance(UpperCamelCase , np.ndarray )
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
return isinstance(UpperCamelCase , np.ndarray )
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
return _is_numpy(UpperCamelCase )
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
import torch
return isinstance(UpperCamelCase , torch.Tensor )
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
return False if not is_torch_available() else _is_torch(UpperCamelCase )
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
import torch
return isinstance(UpperCamelCase , torch.device )
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
return False if not is_torch_available() else _is_torch_device(UpperCamelCase )
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
import torch
if isinstance(UpperCamelCase , UpperCamelCase ):
if hasattr(UpperCamelCase , UpperCamelCase ):
lowerCAmelCase__ : Any = getattr(UpperCamelCase , UpperCamelCase )
else:
return False
return isinstance(UpperCamelCase , torch.dtype )
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
return False if not is_torch_available() else _is_torch_dtype(UpperCamelCase )
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
import tensorflow as tf
return isinstance(UpperCamelCase , tf.Tensor )
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
return False if not is_tf_available() else _is_tensorflow(UpperCamelCase )
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
import tensorflow as tf
# the `is_symbolic_tensor` predicate is only available starting with TF 2.14
if hasattr(UpperCamelCase , """is_symbolic_tensor""" ):
return tf.is_symbolic_tensor(UpperCamelCase )
return type(UpperCamelCase ) == tf.Tensor
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
return False if not is_tf_available() else _is_tf_symbolic_tensor(UpperCamelCase )
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
import jax.numpy as jnp # noqa: F811
return isinstance(UpperCamelCase , jnp.ndarray )
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
return False if not is_flax_available() else _is_jax(UpperCamelCase )
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
if isinstance(UpperCamelCase , (dict, UserDict) ):
return {k: to_py_obj(UpperCamelCase ) for k, v in obj.items()}
elif isinstance(UpperCamelCase , (list, tuple) ):
return [to_py_obj(UpperCamelCase ) for o in obj]
elif is_tf_tensor(UpperCamelCase ):
return obj.numpy().tolist()
elif is_torch_tensor(UpperCamelCase ):
return obj.detach().cpu().tolist()
elif is_jax_tensor(UpperCamelCase ):
return np.asarray(UpperCamelCase ).tolist()
elif isinstance(UpperCamelCase , (np.ndarray, np.number) ): # tolist also works on 0d np arrays
return obj.tolist()
else:
return obj
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
if isinstance(UpperCamelCase , (dict, UserDict) ):
return {k: to_numpy(UpperCamelCase ) for k, v in obj.items()}
elif isinstance(UpperCamelCase , (list, tuple) ):
return np.array(UpperCamelCase )
elif is_tf_tensor(UpperCamelCase ):
return obj.numpy()
elif is_torch_tensor(UpperCamelCase ):
return obj.detach().cpu().numpy()
elif is_jax_tensor(UpperCamelCase ):
return np.asarray(UpperCamelCase )
else:
return obj
class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
lowerCAmelCase__ : List[Any] = fields(self )
# Safety and consistency checks
if not len(__UpperCAmelCase ):
raise ValueError(F"""{self.__class__.__name__} has no fields.""" )
if not all(field.default is None for field in class_fields[1:] ):
raise ValueError(F"""{self.__class__.__name__} should not have more than one required field.""" )
lowerCAmelCase__ : Optional[int] = getattr(self ,class_fields[0].name )
lowerCAmelCase__ : Optional[int] = all(getattr(self ,field.name ) is None for field in class_fields[1:] )
if other_fields_are_none and not is_tensor(__UpperCAmelCase ):
if isinstance(__UpperCAmelCase ,__UpperCAmelCase ):
lowerCAmelCase__ : List[str] = first_field.items()
lowerCAmelCase__ : Union[str, Any] = True
else:
try:
lowerCAmelCase__ : Union[str, Any] = iter(__UpperCAmelCase )
lowerCAmelCase__ : Tuple = True
except TypeError:
lowerCAmelCase__ : int = False
# if we provided an iterator as first field and the iterator is a (key, value) iterator
# set the associated fields
if first_field_iterator:
for idx, element in enumerate(__UpperCAmelCase ):
if (
not isinstance(__UpperCAmelCase ,(list, tuple) )
or not len(__UpperCAmelCase ) == 2
or not isinstance(element[0] ,__UpperCAmelCase )
):
if idx == 0:
# If we do not have an iterator of key/values, set it as attribute
lowerCAmelCase__ : Any = first_field
else:
# If we have a mixed iterator, raise an error
raise ValueError(
F"""Cannot set key/value for {element}. It needs to be a tuple (key, value).""" )
break
setattr(self ,element[0] ,element[1] )
if element[1] is not None:
lowerCAmelCase__ : List[str] = element[1]
elif first_field is not None:
lowerCAmelCase__ : List[str] = first_field
else:
for field in class_fields:
lowerCAmelCase__ : str = getattr(self ,field.name )
if v is not None:
lowerCAmelCase__ : Optional[int] = v
def __delitem__( self ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Optional[Any]:
raise Exception(F"""You cannot use ``__delitem__`` on a {self.__class__.__name__} instance.""" )
def UpperCAmelCase_ ( self ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Tuple:
raise Exception(F"""You cannot use ``setdefault`` on a {self.__class__.__name__} instance.""" )
def UpperCAmelCase_ ( self ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Dict:
raise Exception(F"""You cannot use ``pop`` on a {self.__class__.__name__} instance.""" )
def UpperCAmelCase_ ( self ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Any:
raise Exception(F"""You cannot use ``update`` on a {self.__class__.__name__} instance.""" )
def __getitem__( self ,__UpperCAmelCase ) -> Dict:
if isinstance(__UpperCAmelCase ,__UpperCAmelCase ):
lowerCAmelCase__ : List[str] = dict(self.items() )
return inner_dict[k]
else:
return self.to_tuple()[k]
def __setattr__( self ,__UpperCAmelCase ,__UpperCAmelCase ) -> List[Any]:
if name in self.keys() and value is not None:
# Don't call self.__setitem__ to avoid recursion errors
super().__setitem__(__UpperCAmelCase ,__UpperCAmelCase )
super().__setattr__(__UpperCAmelCase ,__UpperCAmelCase )
def __setitem__( self ,__UpperCAmelCase ,__UpperCAmelCase ) -> Tuple:
# Will raise a KeyException if needed
super().__setitem__(__UpperCAmelCase ,__UpperCAmelCase )
# Don't call self.__setattr__ to avoid recursion errors
super().__setattr__(__UpperCAmelCase ,__UpperCAmelCase )
def UpperCAmelCase_ ( self ) -> Tuple[Any]:
return tuple(self[k] for k in self.keys() )
class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
@classmethod
def UpperCAmelCase_ ( cls ,__UpperCAmelCase ) -> Optional[int]:
raise ValueError(
F"""{value} is not a valid {cls.__name__}, please select one of {list(cls._valueamember_map_.keys() )}""" )
class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
__lowercase : Union[str, Any] = '''longest'''
__lowercase : int = '''max_length'''
__lowercase : Optional[int] = '''do_not_pad'''
class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
__lowercase : Optional[int] = '''pt'''
__lowercase : Any = '''tf'''
__lowercase : Any = '''np'''
__lowercase : Any = '''jax'''
class lowerCAmelCase_:
'''simple docstring'''
def __init__( self ,__UpperCAmelCase ) -> Tuple:
lowerCAmelCase__ : Union[str, Any] = context_managers
lowerCAmelCase__ : int = ExitStack()
def __enter__( self ) -> Optional[int]:
for context_manager in self.context_managers:
self.stack.enter_context(__UpperCAmelCase )
def __exit__( self ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> List[str]:
self.stack.__exit__(*__UpperCAmelCase ,**__UpperCAmelCase )
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = infer_framework(UpperCamelCase )
if framework == "tf":
lowerCAmelCase__ : str = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
lowerCAmelCase__ : str = inspect.signature(model_class.forward ) # PyTorch models
else:
lowerCAmelCase__ : Optional[Any] = inspect.signature(model_class.__call__ ) # Flax models
for p in signature.parameters:
if p == "return_loss" and signature.parameters[p].default is True:
return True
return False
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = model_class.__name__
lowerCAmelCase__ : Tuple = infer_framework(UpperCamelCase )
if framework == "tf":
lowerCAmelCase__ : Any = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
lowerCAmelCase__ : Optional[int] = inspect.signature(model_class.forward ) # PyTorch models
else:
lowerCAmelCase__ : Union[str, Any] = inspect.signature(model_class.__call__ ) # Flax models
if "QuestionAnswering" in model_name:
return [p for p in signature.parameters if "label" in p or p in ("start_positions", "end_positions")]
else:
return [p for p in signature.parameters if "label" in p]
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase = "" , UpperCamelCase = "." ):
"""simple docstring"""
def _flatten_dict(UpperCamelCase , UpperCamelCase="" , UpperCamelCase="." ):
for k, v in d.items():
lowerCAmelCase__ : str = str(UpperCamelCase ) + delimiter + str(UpperCamelCase ) if parent_key else k
if v and isinstance(UpperCamelCase , UpperCamelCase ):
yield from flatten_dict(UpperCamelCase , UpperCamelCase , delimiter=UpperCamelCase ).items()
else:
yield key, v
return dict(_flatten_dict(UpperCamelCase , UpperCamelCase , UpperCamelCase ) )
@contextmanager
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase = False ):
"""simple docstring"""
if use_temp_dir:
with tempfile.TemporaryDirectory() as tmp_dir:
yield tmp_dir
else:
yield working_dir
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase=None ):
"""simple docstring"""
if is_numpy_array(UpperCamelCase ):
return np.transpose(UpperCamelCase , axes=UpperCamelCase )
elif is_torch_tensor(UpperCamelCase ):
return array.T if axes is None else array.permute(*UpperCamelCase )
elif is_tf_tensor(UpperCamelCase ):
import tensorflow as tf
return tf.transpose(UpperCamelCase , perm=UpperCamelCase )
elif is_jax_tensor(UpperCamelCase ):
return jnp.transpose(UpperCamelCase , axes=UpperCamelCase )
else:
raise ValueError(f"""Type not supported for transpose: {type(UpperCamelCase )}.""" )
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
if is_numpy_array(UpperCamelCase ):
return np.reshape(UpperCamelCase , UpperCamelCase )
elif is_torch_tensor(UpperCamelCase ):
return array.reshape(*UpperCamelCase )
elif is_tf_tensor(UpperCamelCase ):
import tensorflow as tf
return tf.reshape(UpperCamelCase , UpperCamelCase )
elif is_jax_tensor(UpperCamelCase ):
return jnp.reshape(UpperCamelCase , UpperCamelCase )
else:
raise ValueError(f"""Type not supported for reshape: {type(UpperCamelCase )}.""" )
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase=None ):
"""simple docstring"""
if is_numpy_array(UpperCamelCase ):
return np.squeeze(UpperCamelCase , axis=UpperCamelCase )
elif is_torch_tensor(UpperCamelCase ):
return array.squeeze() if axis is None else array.squeeze(dim=UpperCamelCase )
elif is_tf_tensor(UpperCamelCase ):
import tensorflow as tf
return tf.squeeze(UpperCamelCase , axis=UpperCamelCase )
elif is_jax_tensor(UpperCamelCase ):
return jnp.squeeze(UpperCamelCase , axis=UpperCamelCase )
else:
raise ValueError(f"""Type not supported for squeeze: {type(UpperCamelCase )}.""" )
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
if is_numpy_array(UpperCamelCase ):
return np.expand_dims(UpperCamelCase , UpperCamelCase )
elif is_torch_tensor(UpperCamelCase ):
return array.unsqueeze(dim=UpperCamelCase )
elif is_tf_tensor(UpperCamelCase ):
import tensorflow as tf
return tf.expand_dims(UpperCamelCase , axis=UpperCamelCase )
elif is_jax_tensor(UpperCamelCase ):
return jnp.expand_dims(UpperCamelCase , axis=UpperCamelCase )
else:
raise ValueError(f"""Type not supported for expand_dims: {type(UpperCamelCase )}.""" )
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
if is_numpy_array(UpperCamelCase ):
return np.size(UpperCamelCase )
elif is_torch_tensor(UpperCamelCase ):
return array.numel()
elif is_tf_tensor(UpperCamelCase ):
import tensorflow as tf
return tf.size(UpperCamelCase )
elif is_jax_tensor(UpperCamelCase ):
return array.size
else:
raise ValueError(f"""Type not supported for expand_dims: {type(UpperCamelCase )}.""" )
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
for key, value in auto_map.items():
if isinstance(UpperCamelCase , (tuple, list) ):
lowerCAmelCase__ : Union[str, Any] = [f"""{repo_id}--{v}""" if (v is not None and """--""" not in v) else v for v in value]
elif value is not None and "--" not in value:
lowerCAmelCase__ : List[str] = f"""{repo_id}--{value}"""
return auto_map
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
for base_class in inspect.getmro(UpperCamelCase ):
lowerCAmelCase__ : Optional[Any] = base_class.__module__
lowerCAmelCase__ : Dict = base_class.__name__
if module.startswith("""tensorflow""" ) or module.startswith("""keras""" ) or name == "TFPreTrainedModel":
return "tf"
elif module.startswith("""torch""" ) or name == "PreTrainedModel":
return "pt"
elif module.startswith("""flax""" ) or module.startswith("""jax""" ) or name == "FlaxPreTrainedModel":
return "flax"
else:
raise TypeError(f"""Could not infer framework from class {model_class}.""" )
| 37 |
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( UpperCamelCase = "The quick brown fox jumps over the lazy dog" , ):
"""simple docstring"""
lowerCAmelCase__ : str = set()
# Replace all the whitespace in our sentence
lowerCAmelCase__ : Tuple = input_str.replace(""" """ , """""" )
for alpha in input_str:
if "a" <= alpha.lower() <= "z":
frequency.add(alpha.lower() )
return len(UpperCamelCase ) == 26
def _SCREAMING_SNAKE_CASE ( UpperCamelCase = "The quick brown fox jumps over the lazy dog" , ):
"""simple docstring"""
lowerCAmelCase__ : Any = [False] * 26
for char in input_str:
if char.islower():
lowerCAmelCase__ : Optional[Any] = True
elif char.isupper():
lowerCAmelCase__ : Any = True
return all(UpperCamelCase )
def _SCREAMING_SNAKE_CASE ( UpperCamelCase = "The quick brown fox jumps over the lazy dog" , ):
"""simple docstring"""
return len({char for char in input_str.lower() if char.isalpha()} ) == 26
def _SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
from timeit import timeit
lowerCAmelCase__ : Union[str, Any] = """from __main__ import is_pangram, is_pangram_faster, is_pangram_fastest"""
print(timeit("""is_pangram()""" , setup=UpperCamelCase ) )
print(timeit("""is_pangram_faster()""" , setup=UpperCamelCase ) )
print(timeit("""is_pangram_fastest()""" , setup=UpperCamelCase ) )
# 5.348480500048026, 2.6477354579837993, 1.8470395830227062
# 5.036091582966037, 2.644472333951853, 1.8869528750656173
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 37 | 1 |
"""simple docstring"""
from bisect import bisect
from itertools import accumulate
def UpperCamelCase ( __lowercase : Union[str, Any] ,__lowercase : Any ,__lowercase : Dict ,__lowercase : Tuple ):
'''simple docstring'''
A_ : List[str] = sorted(zip(__lowercase ,__lowercase ) ,key=lambda __lowercase : x[0] / x[1] ,reverse=__lowercase )
A_ : Tuple = [i[0] for i in r], [i[1] for i in r]
A_ : Tuple = list(accumulate(__lowercase ) )
A_ : Dict = bisect(__lowercase ,__lowercase )
return (
0
if k == 0
else sum(vl[:k] ) + (w - acc[k - 1]) * (vl[k]) / (wt[k])
if k != n
else sum(vl[:k] )
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 363 | from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from transformers import CLIPImageProcessor, CLIPVisionModel
from ...models import PriorTransformer
from ...pipelines import DiffusionPipeline
from ...schedulers import HeunDiscreteScheduler
from ...utils import (
BaseOutput,
is_accelerate_available,
logging,
randn_tensor,
replace_example_docstring,
)
from .renderer import ShapERenderer
_UpperCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
_UpperCAmelCase = """
Examples:
```py
>>> from PIL import Image
>>> import torch
>>> from diffusers import DiffusionPipeline
>>> from diffusers.utils import export_to_gif, load_image
>>> device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")
>>> repo = \"openai/shap-e-img2img\"
>>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16)
>>> pipe = pipe.to(device)
>>> guidance_scale = 3.0
>>> image_url = \"https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png\"
>>> image = load_image(image_url).convert(\"RGB\")
>>> images = pipe(
... image,
... guidance_scale=guidance_scale,
... num_inference_steps=64,
... frame_size=256,
... ).images
>>> gif_path = export_to_gif(images[0], \"corgi_3d.gif\")
```
"""
@dataclass
class UpperCAmelCase ( __A ):
'''simple docstring'''
lowerCamelCase_ = 42
class UpperCAmelCase ( __A ):
'''simple docstring'''
def __init__( self , lowercase , lowercase , lowercase , lowercase , lowercase , ):
"""simple docstring"""
super().__init__()
self.register_modules(
prior=lowercase , image_encoder=lowercase , image_processor=lowercase , scheduler=lowercase , renderer=lowercase , )
def lowerCAmelCase_ ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ):
"""simple docstring"""
if latents is None:
A_ : Optional[Any] = randn_tensor(lowercase , generator=lowercase , device=lowercase , dtype=lowercase )
else:
if latents.shape != shape:
raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {shape}''' )
A_ : Optional[int] = latents.to(lowercase )
A_ : List[Any] = latents * scheduler.init_noise_sigma
return latents
def lowerCAmelCase_ ( self , lowercase=0 ):
"""simple docstring"""
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
A_ : Tuple = torch.device(F'''cuda:{gpu_id}''' )
A_ : Dict = [self.image_encoder, self.prior]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(lowercase , lowercase )
@property
def lowerCAmelCase_ ( self ):
"""simple docstring"""
if self.device != torch.device('meta' ) or not hasattr(self.image_encoder , '_hf_hook' ):
return self.device
for module in self.image_encoder.modules():
if (
hasattr(lowercase , '_hf_hook' )
and hasattr(module._hf_hook , 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
def lowerCAmelCase_ ( self , lowercase , lowercase , lowercase , lowercase , ):
"""simple docstring"""
if isinstance(lowercase , lowercase ) and isinstance(image[0] , torch.Tensor ):
A_ : Tuple = torch.cat(lowercase , axis=0 ) if image[0].ndim == 4 else torch.stack(lowercase , axis=0 )
if not isinstance(lowercase , torch.Tensor ):
A_ : Dict = self.image_processor(lowercase , return_tensors='pt' ).pixel_values[0].unsqueeze(0 )
A_ : List[str] = image.to(dtype=self.image_encoder.dtype , device=lowercase )
A_ : Tuple = self.image_encoder(lowercase )['last_hidden_state']
A_ : Dict = image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256
A_ : List[str] = image_embeds.repeat_interleave(lowercase , dim=0 )
if do_classifier_free_guidance:
A_ : str = torch.zeros_like(lowercase )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
A_ : Optional[Any] = torch.cat([negative_image_embeds, image_embeds] )
return image_embeds
@torch.no_grad()
@replace_example_docstring(lowercase )
def __call__( self , lowercase , lowercase = 1 , lowercase = 2_5 , lowercase = None , lowercase = None , lowercase = 4.0 , lowercase = 6_4 , lowercase = "pil" , lowercase = True , ):
"""simple docstring"""
if isinstance(lowercase , PIL.Image.Image ):
A_ : int = 1
elif isinstance(lowercase , torch.Tensor ):
A_ : int = image.shape[0]
elif isinstance(lowercase , lowercase ) and isinstance(image[0] , (torch.Tensor, PIL.Image.Image) ):
A_ : List[str] = len(lowercase )
else:
raise ValueError(
F'''`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(lowercase )}''' )
A_ : Any = self._execution_device
A_ : List[Any] = batch_size * num_images_per_prompt
A_ : int = guidance_scale > 1.0
A_ : Optional[int] = self._encode_image(lowercase , lowercase , lowercase , lowercase )
# prior
self.scheduler.set_timesteps(lowercase , device=lowercase )
A_ : Dict = self.scheduler.timesteps
A_ : int = self.prior.config.num_embeddings
A_ : int = self.prior.config.embedding_dim
A_ : Dict = self.prepare_latents(
(batch_size, num_embeddings * embedding_dim) , image_embeds.dtype , lowercase , lowercase , lowercase , self.scheduler , )
# YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim
A_ : Union[str, Any] = latents.reshape(latents.shape[0] , lowercase , lowercase )
for i, t in enumerate(self.progress_bar(lowercase ) ):
# expand the latents if we are doing classifier free guidance
A_ : List[str] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
A_ : List[Any] = self.scheduler.scale_model_input(lowercase , lowercase )
A_ : Any = self.prior(
lowercase , timestep=lowercase , proj_embedding=lowercase , ).predicted_image_embedding
# remove the variance
A_ , A_ : int = noise_pred.split(
scaled_model_input.shape[2] , dim=2 ) # batch_size, num_embeddings, embedding_dim
if do_classifier_free_guidance is not None:
A_ , A_ : List[Any] = noise_pred.chunk(2 )
A_ : str = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond)
A_ : Optional[int] = self.scheduler.step(
lowercase , timestep=lowercase , sample=lowercase , ).prev_sample
if output_type == "latent":
return ShapEPipelineOutput(images=lowercase )
A_ : str = []
for i, latent in enumerate(lowercase ):
print()
A_ : Optional[Any] = self.renderer.decode(
latent[None, :] , lowercase , size=lowercase , ray_batch_size=4_0_9_6 , n_coarse_samples=6_4 , n_fine_samples=1_2_8 , )
images.append(lowercase )
A_ : Dict = torch.stack(lowercase )
if output_type not in ["np", "pil"]:
raise ValueError(F'''Only the output types `pil` and `np` are supported not output_type={output_type}''' )
A_ : Dict = images.cpu().numpy()
if output_type == "pil":
A_ : str = [self.numpy_to_pil(lowercase ) for image in images]
# Offload last model to CPU
if hasattr(self , 'final_offload_hook' ) and self.final_offload_hook is not None:
self.final_offload_hook.offload()
if not return_dict:
return (images,)
return ShapEPipelineOutput(images=lowercase )
| 192 | 0 |
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class __snake_case ( UpperCamelCase_ ):
def UpperCAmelCase__ ( self : List[Any]):
lowerCAmelCase_ : str = self.config_class(**self.inputs_dict)
self.parent.assertTrue(hasattr(A_ , '''tf_padding'''))
self.parent.assertTrue(hasattr(A_ , '''depth_multiplier'''))
class __snake_case :
def __init__( self : List[str] , A_ : Any , A_ : Tuple=1_3 , A_ : Tuple=3 , A_ : Tuple=3_2 , A_ : List[str]=0.25 , A_ : Dict=8 , A_ : Optional[Any]=8 , A_ : int=6 , A_ : Tuple=3_2 , A_ : Union[str, Any]=True , A_ : Optional[int]=True , A_ : Optional[Any]=True , A_ : Tuple="relu6" , A_ : Union[str, Any]=1_2_8_0 , A_ : List[str]=0.1 , A_ : List[Any]=0.02 , A_ : Optional[int]=True , A_ : Union[str, Any]=True , A_ : List[Any]=1_0 , A_ : Tuple=None , ):
lowerCAmelCase_ : List[str] = parent
lowerCAmelCase_ : Dict = batch_size
lowerCAmelCase_ : Optional[Any] = num_channels
lowerCAmelCase_ : Optional[int] = image_size
lowerCAmelCase_ : Union[str, Any] = depth_multiplier
lowerCAmelCase_ : List[str] = depth_divisible_by
lowerCAmelCase_ : List[Any] = min_depth
lowerCAmelCase_ : str = expand_ratio
lowerCAmelCase_ : str = tf_padding
lowerCAmelCase_ : str = output_stride
lowerCAmelCase_ : Optional[int] = first_layer_is_expansion
lowerCAmelCase_ : Optional[Any] = finegrained_output
lowerCAmelCase_ : Optional[int] = hidden_act
lowerCAmelCase_ : List[str] = last_hidden_size if finegrained_output else int(last_hidden_size * depth_multiplier)
lowerCAmelCase_ : Any = classifier_dropout_prob
lowerCAmelCase_ : List[Any] = use_labels
lowerCAmelCase_ : Dict = is_training
lowerCAmelCase_ : List[str] = num_labels
lowerCAmelCase_ : str = initializer_range
lowerCAmelCase_ : str = scope
def UpperCAmelCase__ ( self : List[str]):
lowerCAmelCase_ : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
lowerCAmelCase_ : Any = None
lowerCAmelCase_ : List[Any] = None
if self.use_labels:
lowerCAmelCase_ : List[Any] = ids_tensor([self.batch_size] , self.num_labels)
lowerCAmelCase_ : List[Any] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels)
lowerCAmelCase_ : List[str] = self.get_config()
return config, pixel_values, labels, pixel_labels
def UpperCAmelCase__ ( self : Dict):
return MobileNetVaConfig(
num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , depth_divisible_by=self.depth_divisible_by , min_depth=self.min_depth , expand_ratio=self.expand_ratio , output_stride=self.output_stride , first_layer_is_expansion=self.first_layer_is_expansion , finegrained_output=self.finegrained_output , hidden_act=self.hidden_act , tf_padding=self.tf_padding , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def UpperCAmelCase__ ( self : Any , A_ : Any , A_ : List[Any] , A_ : List[str] , A_ : Tuple):
lowerCAmelCase_ : int = MobileNetVaModel(config=A_)
model.to(A_)
model.eval()
lowerCAmelCase_ : str = model(A_)
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
self.parent.assertEqual(
result.pooler_output.shape , (self.batch_size, self.last_hidden_size) , )
def UpperCAmelCase__ ( self : Optional[Any] , A_ : Tuple , A_ : List[Any] , A_ : Optional[int] , A_ : Optional[Any]):
lowerCAmelCase_ : Any = self.num_labels
lowerCAmelCase_ : List[str] = MobileNetVaForImageClassification(A_)
model.to(A_)
model.eval()
lowerCAmelCase_ : Optional[int] = model(A_ , labels=A_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def UpperCAmelCase__ ( self : Optional[Any] , A_ : Union[str, Any] , A_ : int , A_ : Tuple , A_ : Optional[int]):
lowerCAmelCase_ : Any = self.num_labels
lowerCAmelCase_ : Optional[int] = MobileNetVaForSemanticSegmentation(A_)
model.to(A_)
model.eval()
lowerCAmelCase_ : str = model(A_)
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
lowerCAmelCase_ : Union[str, Any] = model(A_ , labels=A_)
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def UpperCAmelCase__ ( self : int):
lowerCAmelCase_ : Tuple = self.prepare_config_and_inputs()
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : Optional[Any] = config_and_inputs
lowerCAmelCase_ : Dict = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __snake_case ( UpperCamelCase_ ,UpperCamelCase_ ,unittest.TestCase ):
_a = (
(MobileNetVaModel, MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation)
if is_torch_available()
else ()
)
_a = (
{
'''feature-extraction''': MobileNetVaModel,
'''image-classification''': MobileNetVaForImageClassification,
'''image-segmentation''': MobileNetVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
_a = False
_a = False
_a = False
_a = False
def UpperCAmelCase__ ( self : Optional[Any]):
lowerCAmelCase_ : List[Any] = MobileNetVaModelTester(self)
lowerCAmelCase_ : int = MobileNetVaConfigTester(self , config_class=A_ , has_text_modality=A_)
def UpperCAmelCase__ ( self : Any):
self.config_tester.run_common_tests()
@unittest.skip(reason='''MobileNetV2 does not use inputs_embeds''')
def UpperCAmelCase__ ( self : List[Any]):
pass
@unittest.skip(reason='''MobileNetV2 does not support input and output embeddings''')
def UpperCAmelCase__ ( self : int):
pass
@unittest.skip(reason='''MobileNetV2 does not output attentions''')
def UpperCAmelCase__ ( self : List[str]):
pass
def UpperCAmelCase__ ( self : int):
lowerCAmelCase_ , lowerCAmelCase_ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase_ : Optional[Any] = model_class(A_)
lowerCAmelCase_ : Optional[Any] = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase_ : Union[str, Any] = [*signature.parameters.keys()]
lowerCAmelCase_ : str = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , A_)
def UpperCAmelCase__ ( self : int):
lowerCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_)
def UpperCAmelCase__ ( self : int):
def check_hidden_states_output(A_ : List[str] , A_ : int , A_ : Any):
lowerCAmelCase_ : Tuple = model_class(A_)
model.to(A_)
model.eval()
with torch.no_grad():
lowerCAmelCase_ : Any = model(**self._prepare_for_class(A_ , A_))
lowerCAmelCase_ : str = outputs.hidden_states
lowerCAmelCase_ : List[Any] = 1_6
self.assertEqual(len(A_) , A_)
lowerCAmelCase_ , lowerCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase_ : Optional[int] = True
check_hidden_states_output(A_ , A_ , A_)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCAmelCase_ : int = True
check_hidden_states_output(A_ , A_ , A_)
def UpperCAmelCase__ ( self : str):
lowerCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A_)
def UpperCAmelCase__ ( self : Union[str, Any]):
lowerCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*A_)
@slow
def UpperCAmelCase__ ( self : List[str]):
for model_name in MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase_ : List[Any] = MobileNetVaModel.from_pretrained(A_)
self.assertIsNotNone(A_)
def UpperCamelCase( ):
lowerCAmelCase_ : List[str] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class __snake_case ( unittest.TestCase ):
@cached_property
def UpperCAmelCase__ ( self : Union[str, Any]):
return (
MobileNetVaImageProcessor.from_pretrained('''google/mobilenet_v2_1.0_224''') if is_vision_available() else None
)
@slow
def UpperCAmelCase__ ( self : Dict):
lowerCAmelCase_ : List[str] = MobileNetVaForImageClassification.from_pretrained('''google/mobilenet_v2_1.0_224''').to(A_)
lowerCAmelCase_ : List[str] = self.default_image_processor
lowerCAmelCase_ : List[str] = prepare_img()
lowerCAmelCase_ : List[str] = image_processor(images=A_ , return_tensors='''pt''').to(A_)
# forward pass
with torch.no_grad():
lowerCAmelCase_ : int = model(**A_)
# verify the logits
lowerCAmelCase_ : Tuple = torch.Size((1, 1_0_0_1))
self.assertEqual(outputs.logits.shape , A_)
lowerCAmelCase_ : str = torch.tensor([0.2445, -1.1993, 0.1905]).to(A_)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , A_ , atol=1e-4))
@slow
def UpperCAmelCase__ ( self : Union[str, Any]):
lowerCAmelCase_ : str = MobileNetVaForSemanticSegmentation.from_pretrained('''google/deeplabv3_mobilenet_v2_1.0_513''')
lowerCAmelCase_ : int = model.to(A_)
lowerCAmelCase_ : Optional[Any] = MobileNetVaImageProcessor.from_pretrained('''google/deeplabv3_mobilenet_v2_1.0_513''')
lowerCAmelCase_ : int = prepare_img()
lowerCAmelCase_ : Tuple = image_processor(images=A_ , return_tensors='''pt''').to(A_)
# forward pass
with torch.no_grad():
lowerCAmelCase_ : str = model(**A_)
lowerCAmelCase_ : Optional[int] = outputs.logits
# verify the logits
lowerCAmelCase_ : Dict = torch.Size((1, 2_1, 6_5, 6_5))
self.assertEqual(logits.shape , A_)
lowerCAmelCase_ : Optional[Any] = torch.tensor(
[
[[17.5790, 17.7581, 18.3355], [18.3257, 18.4230, 18.8973], [18.6169, 18.8650, 19.2187]],
[[-2.1595, -2.0977, -2.3741], [-2.4226, -2.3028, -2.6835], [-2.7819, -2.5991, -2.7706]],
[[4.2058, 4.8317, 4.7638], [4.4136, 5.0361, 4.9383], [4.5028, 4.9644, 4.8734]],
] , device=A_ , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , A_ , atol=1e-4))
| 103 |
# Lint as: python3
import os
import re
import urllib.parse
from pathlib import Path
from typing import Callable, List, Optional, Union
from zipfile import ZipFile
from ..utils.file_utils import cached_path, hf_github_url
from ..utils.logging import get_logger
from ..utils.version import Version
A__ : Tuple = get_logger(__name__)
class __snake_case :
_a = '''dummy_data'''
_a = '''datasets'''
_a = False
def __init__( self : Optional[Any] , A_ : str , A_ : str , A_ : Union[Version, str] , A_ : Optional[str] = None , A_ : bool = False , A_ : bool = True , A_ : Optional[List[Callable]] = None , ):
lowerCAmelCase_ : List[Any] = 0
lowerCAmelCase_ : Any = dataset_name
lowerCAmelCase_ : Union[str, Any] = cache_dir
lowerCAmelCase_ : List[Any] = use_local_dummy_data
lowerCAmelCase_ : Optional[Any] = config
# download_callbacks take a single url as input
lowerCAmelCase_ : List[Callable] = download_callbacks or []
# if False, it doesn't load existing files and it returns the paths of the dummy files relative
# to the dummy_data zip file root
lowerCAmelCase_ : Tuple = load_existing_dummy_data
# TODO(PVP, QL) might need to make this more general
lowerCAmelCase_ : int = str(A_)
# to be downloaded
lowerCAmelCase_ : Dict = None
lowerCAmelCase_ : Optional[int] = None
@property
def UpperCAmelCase__ ( self : List[str]):
if self._dummy_file is None:
lowerCAmelCase_ : int = self.download_dummy_data()
return self._dummy_file
@property
def UpperCAmelCase__ ( self : str):
if self.config is not None:
# structure is dummy / config_name / version_name
return os.path.join('''dummy''' , self.config.name , self.version_name)
# structure is dummy / version_name
return os.path.join('''dummy''' , self.version_name)
@property
def UpperCAmelCase__ ( self : str):
return os.path.join(self.dummy_data_folder , '''dummy_data.zip''')
def UpperCAmelCase__ ( self : Any):
lowerCAmelCase_ : Any = (
self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data
)
lowerCAmelCase_ : Union[str, Any] = cached_path(
A_ , cache_dir=self.cache_dir , extract_compressed_file=A_ , force_extract=A_)
return os.path.join(A_ , self.dummy_file_name)
@property
def UpperCAmelCase__ ( self : List[str]):
return os.path.join(self.datasets_scripts_dir , self.dataset_name , self.dummy_zip_file)
@property
def UpperCAmelCase__ ( self : Optional[int]):
if self._bucket_url is None:
lowerCAmelCase_ : str = hf_github_url(self.dataset_name , self.dummy_zip_file.replace(os.sep , '''/'''))
return self._bucket_url
@property
def UpperCAmelCase__ ( self : List[Any]):
# return full path if its a dir
if os.path.isdir(self.dummy_file):
return self.dummy_file
# else cut off path to file -> example `xsum`.
return "/".join(self.dummy_file.replace(os.sep , '''/''').split('''/''')[:-1])
def UpperCAmelCase__ ( self : Union[str, Any] , A_ : Dict , *A_ : List[Any]):
if self.load_existing_dummy_data:
# dummy data is downloaded and tested
lowerCAmelCase_ : Union[str, Any] = self.dummy_file
else:
# dummy data cannot be downloaded and only the path to dummy file is returned
lowerCAmelCase_ : Optional[int] = self.dummy_file_name
# special case when data_url is a dict
if isinstance(A_ , A_):
return self.create_dummy_data_dict(A_ , A_)
elif isinstance(A_ , (list, tuple)):
return self.create_dummy_data_list(A_ , A_)
else:
return self.create_dummy_data_single(A_ , A_)
def UpperCAmelCase__ ( self : Optional[int] , A_ : Tuple , *A_ : int):
return self.download_and_extract(A_)
def UpperCAmelCase__ ( self : Tuple , A_ : List[str] , A_ : Optional[Any]):
return self.download_and_extract(A_)
def UpperCAmelCase__ ( self : int , A_ : Optional[int] , *A_ : str , **A_ : List[Any]):
return path
def UpperCAmelCase__ ( self : Tuple):
return {}
def UpperCAmelCase__ ( self : Optional[Any] , A_ : Union[str, Any] , A_ : List[Any]):
lowerCAmelCase_ : Union[str, Any] = {}
for key, single_urls in data_url.items():
for download_callback in self.download_callbacks:
if isinstance(A_ , A_):
for single_url in single_urls:
download_callback(A_)
else:
lowerCAmelCase_ : Any = single_urls
download_callback(A_)
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
if isinstance(A_ , A_):
lowerCAmelCase_ : Any = [os.path.join(A_ , urllib.parse.quote_plus(Path(A_).name)) for x in single_urls]
else:
lowerCAmelCase_ : Optional[int] = single_urls
lowerCAmelCase_ : List[str] = os.path.join(A_ , urllib.parse.quote_plus(Path(A_).name))
lowerCAmelCase_ : Dict = value
# make sure that values are unique
if all(isinstance(A_ , A_) for i in dummy_data_dict.values()) and len(set(dummy_data_dict.values())) < len(
dummy_data_dict.values()):
# append key to value to make its name unique
lowerCAmelCase_ : Tuple = {key: value + key for key, value in dummy_data_dict.items()}
return dummy_data_dict
def UpperCAmelCase__ ( self : Dict , A_ : List[str] , A_ : str):
lowerCAmelCase_ : Optional[Any] = []
# trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one
lowerCAmelCase_ : str = all(bool(re.findall('''[0-9]{3,}-of-[0-9]{3,}''' , A_)) for url in data_url)
lowerCAmelCase_ : Optional[Any] = all(
url.startswith('''https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed''') for url in data_url)
if data_url and (is_tf_records or is_pubmed_records):
lowerCAmelCase_ : Any = [data_url[0]] * len(A_)
for single_url in data_url:
for download_callback in self.download_callbacks:
download_callback(A_)
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
lowerCAmelCase_ : int = os.path.join(A_ , urllib.parse.quote_plus(single_url.split('''/''')[-1]))
dummy_data_list.append(A_)
return dummy_data_list
def UpperCAmelCase__ ( self : List[str] , A_ : Optional[Any] , A_ : Tuple):
for download_callback in self.download_callbacks:
download_callback(A_)
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
lowerCAmelCase_ : Tuple = os.path.join(A_ , urllib.parse.quote_plus(data_url.split('''/''')[-1]))
if os.path.exists(A_) or not self.load_existing_dummy_data:
return value
else:
# Backward compatibility, maybe deprecate at one point.
# For many datasets with single url calls to dl_manager.download_and_extract,
# the dummy_data.zip file is actually the zipped downloaded file
# while now we expected the dummy_data.zip file to be a directory containing
# the downloaded file.
return path_to_dummy_data
def UpperCAmelCase__ ( self : int):
pass
def UpperCAmelCase__ ( self : Optional[int]):
pass
def UpperCAmelCase__ ( self : List[str] , A_ : str):
def _iter_archive_members(A_ : Any):
# this preserves the order of the members inside the ZIP archive
lowerCAmelCase_ : Optional[int] = Path(self.dummy_file).parent
lowerCAmelCase_ : Optional[int] = path.relative_to(A_)
with ZipFile(self.local_path_to_dummy_data) as zip_file:
lowerCAmelCase_ : Tuple = zip_file.namelist()
for member in members:
if member.startswith(relative_path.as_posix()):
yield dummy_parent_path.joinpath(A_)
lowerCAmelCase_ : List[Any] = Path(A_)
lowerCAmelCase_ : Optional[int] = _iter_archive_members(A_) if self.use_local_dummy_data else path.rglob('''*''')
for file_path in file_paths:
if file_path.is_file() and not file_path.name.startswith(('''.''', '''__''')):
yield file_path.relative_to(A_).as_posix(), file_path.open('''rb''')
def UpperCAmelCase__ ( self : Dict , A_ : Any):
if not isinstance(A_ , A_):
lowerCAmelCase_ : Dict = [paths]
for path in paths:
if os.path.isfile(A_):
if os.path.basename(A_).startswith(('''.''', '''__''')):
return
yield path
else:
for dirpath, dirnames, filenames in os.walk(A_):
if os.path.basename(A_).startswith(('''.''', '''__''')):
continue
dirnames.sort()
for filename in sorted(A_):
if filename.startswith(('''.''', '''__''')):
continue
yield os.path.join(A_ , A_)
| 103 | 1 |
"""simple docstring"""
from __future__ import annotations
import collections
import pprint
from pathlib import Path
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> str:
return "".join(sorted(__lowerCAmelCase ) )
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> list[str]:
return word_by_signature[signature(__lowerCAmelCase )]
lowerCamelCase : str =Path(__file__).parent.joinpath('''words.txt''').read_text(encoding='''utf-8''')
lowerCamelCase : List[Any] =sorted({word.strip().lower() for word in data.splitlines()})
lowerCamelCase : int =collections.defaultdict(list)
for word in word_list:
word_by_signature[signature(word)].append(word)
if __name__ == "__main__":
lowerCamelCase : int ={word: anagram(word) for word in word_list if len(anagram(word)) > 1}
with open('''anagrams.txt''', '''w''') as file:
file.write('''all_anagrams = \n ''')
file.write(pprint.pformat(all_anagrams)) | 362 |
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=A__ )
class __a ( A__ ):
_lowerCAmelCase : str = field(default='''language-modeling''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
_lowerCAmelCase : ClassVar[Features] = Features({'''text''': Value('''string''' )} )
_lowerCAmelCase : ClassVar[Features] = Features({} )
_lowerCAmelCase : str = "text"
@property
def __lowercase ( self : str ):
'''simple docstring'''
return {self.text_column: "text"} | 196 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_a = logging.get_logger(__name__)
_a = {
'tiiuae/falcon-40b': 'https://huggingface.co/tiiuae/falcon-40b/resolve/main/config.json',
'tiiuae/falcon-7b': 'https://huggingface.co/tiiuae/falcon-7b/resolve/main/config.json',
}
class A_ (lowercase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = """falcon"""
SCREAMING_SNAKE_CASE__ : Optional[int] = ["""past_key_values"""]
def __init__( self , lowercase_=6_5024 , lowercase_=4544 , lowercase_=32 , lowercase_=71 , lowercase_=1E-5 , lowercase_=0.02 , lowercase_=True , lowercase_=0.0 , lowercase_=0.0 , lowercase_=None , lowercase_=False , lowercase_=False , lowercase_=True , lowercase_=True , lowercase_=False , lowercase_=11 , lowercase_=11 , **lowercase_ , ):
"""simple docstring"""
UpperCAmelCase_ : str = vocab_size
# Backward compatibility with n_embed kwarg
UpperCAmelCase_ : Dict = kwargs.pop("n_embed" , lowercase_ )
UpperCAmelCase_ : Any = hidden_size if n_embed is None else n_embed
UpperCAmelCase_ : Union[str, Any] = num_hidden_layers
UpperCAmelCase_ : List[Any] = num_attention_heads
UpperCAmelCase_ : str = layer_norm_epsilon
UpperCAmelCase_ : int = initializer_range
UpperCAmelCase_ : List[str] = use_cache
UpperCAmelCase_ : List[Any] = hidden_dropout
UpperCAmelCase_ : List[Any] = attention_dropout
UpperCAmelCase_ : List[Any] = bos_token_id
UpperCAmelCase_ : Tuple = eos_token_id
UpperCAmelCase_ : List[str] = num_attention_heads if num_kv_heads is None else num_kv_heads
UpperCAmelCase_ : Any = alibi
UpperCAmelCase_ : Tuple = new_decoder_architecture
UpperCAmelCase_ : Union[str, Any] = multi_query # Ignored when new_decoder_architecture is True
UpperCAmelCase_ : List[str] = parallel_attn
UpperCAmelCase_ : int = bias
super().__init__(bos_token_id=lowercase_ , eos_token_id=lowercase_ , **lowercase_ )
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return self.hidden_size // self.num_attention_heads
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return not self.alibi
| 61 | import unittest
from dataclasses import dataclass
import pytest
from accelerate.commands.config.config_args import SageMakerConfig
from accelerate.utils import ComputeEnvironment
from accelerate.utils.launch import _convert_nargs_to_dict
@dataclass
class __snake_case ( lowerCamelCase_ ):
lowerCAmelCase_ = ComputeEnvironment.AMAZON_SAGEMAKER
lowerCAmelCase_ = True
lowerCAmelCase_ = "ml.p3.2xlarge"
lowerCAmelCase_ = "accelerate_sagemaker_execution_role"
lowerCAmelCase_ = "hf-sm"
lowerCAmelCase_ = "us-east-1"
lowerCAmelCase_ = 1
lowerCAmelCase_ = "accelerate-sagemaker-1"
lowerCAmelCase_ = "1.6"
lowerCAmelCase_ = "4.4"
lowerCAmelCase_ = "train.py"
lowerCAmelCase_ = [
"--model_name_or_path",
"bert",
"--do_train",
"False",
"--epochs",
"3",
"--learning_rate",
"5e-5",
"--max_steps",
"50.5",
]
lowerCAmelCase_ = [
"--model_name_or_path",
"bert",
"--do_train",
"--do_test",
"False",
"--do_predict",
"--epochs",
"3",
"--learning_rate",
"5e-5",
"--max_steps",
"50.5",
]
class __snake_case ( unittest.TestCase ):
def __a ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = _convert_nargs_to_dict(MockLaunchConfig.success_training_script_args )
assert isinstance(converted_args["""model_name_or_path"""] , _lowercase )
assert isinstance(converted_args["""do_train"""] , _lowercase )
assert isinstance(converted_args["""epochs"""] , _lowercase )
assert isinstance(converted_args["""learning_rate"""] , _lowercase )
assert isinstance(converted_args["""max_steps"""] , _lowercase )
with pytest.raises(_lowercase ):
_convert_nargs_to_dict(MockLaunchConfig.fail_training_script_args )
| 219 | 0 |
"""simple docstring"""
import random
def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: List[str] = a[left_index]
SCREAMING_SNAKE_CASE_: Dict = left_index + 1
for j in range(left_index + 1 , _UpperCAmelCase ):
if a[j] < pivot:
SCREAMING_SNAKE_CASE_: Dict = a[i], a[j]
i += 1
SCREAMING_SNAKE_CASE_: Optional[int] = a[i - 1], a[left_index]
return i - 1
def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
if left < right:
SCREAMING_SNAKE_CASE_: List[str] = random.randint(_UpperCAmelCase , right - 1 )
SCREAMING_SNAKE_CASE_: str = (
a[left],
a[pivot],
) # switches the pivot with the left most bound
SCREAMING_SNAKE_CASE_: Any = partition(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
quick_sort_random(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) # recursive quicksort to the left of the pivot point
quick_sort_random(
_UpperCAmelCase , pivot_index + 1 , _UpperCAmelCase ) # recursive quicksort to the right of the pivot point
def A_ ( ):
SCREAMING_SNAKE_CASE_: Optional[Any] = input("Enter numbers separated by a comma:\n" ).strip()
SCREAMING_SNAKE_CASE_: Union[str, Any] = [int(_UpperCAmelCase ) for item in user_input.split("," )]
quick_sort_random(_UpperCAmelCase , 0 , len(_UpperCAmelCase ) )
print(_UpperCAmelCase )
if __name__ == "__main__":
main()
| 358 |
import doctest
from collections import deque
import numpy as np
class __lowercase :
"""simple docstring"""
def __init__( self : Union[str, Any]):
SCREAMING_SNAKE_CASE_: int = [2, 1, 2, -1]
SCREAMING_SNAKE_CASE_: Optional[Any] = [1, 2, 3, 4]
def _SCREAMING_SNAKE_CASE ( self : Tuple):
SCREAMING_SNAKE_CASE_: Any = len(self.first_signal)
SCREAMING_SNAKE_CASE_: Dict = len(self.second_signal)
SCREAMING_SNAKE_CASE_: Union[str, Any] = max(lowerCAmelCase__ , lowerCAmelCase__)
# create a zero matrix of max_length x max_length
SCREAMING_SNAKE_CASE_: List[Any] = [[0] * max_length for i in range(lowerCAmelCase__)]
# fills the smaller signal with zeros to make both signals of same length
if length_first_signal < length_second_signal:
self.first_signal += [0] * (max_length - length_first_signal)
elif length_first_signal > length_second_signal:
self.second_signal += [0] * (max_length - length_second_signal)
for i in range(lowerCAmelCase__):
SCREAMING_SNAKE_CASE_: Tuple = deque(self.second_signal)
rotated_signal.rotate(lowerCAmelCase__)
for j, item in enumerate(lowerCAmelCase__):
matrix[i][j] += item
# multiply the matrix with the first signal
SCREAMING_SNAKE_CASE_: Optional[Any] = np.matmul(np.transpose(lowerCAmelCase__) , np.transpose(self.first_signal))
# rounding-off to two decimal places
return [round(lowerCAmelCase__ , 2) for i in final_signal]
if __name__ == "__main__":
doctest.testmod()
| 127 | 0 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..models.auto import AutoProcessor
from ..models.vision_encoder_decoder import VisionEncoderDecoderModel
from ..utils import is_vision_available
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class snake_case__( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any = """naver-clova-ix/donut-base-finetuned-docvqa"""
SCREAMING_SNAKE_CASE__ : str = (
"""This is a tool that answers a question about an document (pdf). It takes an input named `document` which """
"""should be the document containing the information, as well as a `question` that is the question about the """
"""document. It returns a text that contains the answer to the question."""
)
SCREAMING_SNAKE_CASE__ : str = """document_qa"""
SCREAMING_SNAKE_CASE__ : Tuple = AutoProcessor
SCREAMING_SNAKE_CASE__ : Optional[int] = VisionEncoderDecoderModel
SCREAMING_SNAKE_CASE__ : List[str] = ["""image""", """text"""]
SCREAMING_SNAKE_CASE__ : Any = ["""text"""]
def __init__( self , *__lowercase , **__lowercase ) -> Any:
if not is_vision_available():
raise ValueError('''Pillow must be installed to use the DocumentQuestionAnsweringTool.''' )
super().__init__(*__lowercase , **__lowercase )
def lowercase_ ( self , __lowercase , __lowercase ) -> Any:
lowerCAmelCase_ : Union[str, Any] = '''<s_docvqa><s_question>{user_input}</s_question><s_answer>'''
lowerCAmelCase_ : List[Any] = task_prompt.replace('''{user_input}''' , __lowercase )
lowerCAmelCase_ : int = self.pre_processor.tokenizer(
__lowercase , add_special_tokens=__lowercase , return_tensors='''pt''' ).input_ids
lowerCAmelCase_ : str = self.pre_processor(__lowercase , return_tensors='''pt''' ).pixel_values
return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values}
def lowercase_ ( self , __lowercase ) -> int:
return self.model.generate(
inputs['''pixel_values'''].to(self.device ) , decoder_input_ids=inputs['''decoder_input_ids'''].to(self.device ) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=__lowercase , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=__lowercase , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=__lowercase , ).sequences
def lowercase_ ( self , __lowercase ) -> List[Any]:
lowerCAmelCase_ : Optional[Any] = self.pre_processor.batch_decode(__lowercase )[0]
lowerCAmelCase_ : List[str] = sequence.replace(self.pre_processor.tokenizer.eos_token , '''''' )
lowerCAmelCase_ : List[str] = sequence.replace(self.pre_processor.tokenizer.pad_token , '''''' )
lowerCAmelCase_ : List[str] = re.sub(R'''<.*?>''' , '''''' , __lowercase , count=1 ).strip() # remove first task start token
lowerCAmelCase_ : Tuple = self.pre_processor.tokenajson(__lowercase )
return sequence["answer"] | 262 |
from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels
from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor
from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels | 262 | 1 |
import math
def A__ ( lowerCamelCase ) -> list[int]:
UpperCamelCase_: Optional[int] = []
UpperCamelCase_: int = 2
UpperCamelCase_: Union[str, Any] = int(math.sqrt(lowerCamelCase ) ) # Size of every segment
UpperCamelCase_: Union[str, Any] = [True] * (end + 1)
UpperCamelCase_: List[Any] = []
while start <= end:
if temp[start] is True:
in_prime.append(lowerCamelCase )
for i in range(start * start , end + 1 , lowerCamelCase ):
UpperCamelCase_: List[Any] = False
start += 1
prime += in_prime
UpperCamelCase_: List[Any] = end + 1
UpperCamelCase_: Dict = min(2 * end , lowerCamelCase )
while low <= n:
UpperCamelCase_: Union[str, Any] = [True] * (high - low + 1)
for each in in_prime:
UpperCamelCase_: Tuple = math.floor(low / each ) * each
if t < low:
t += each
for j in range(lowerCamelCase , high + 1 , lowerCamelCase ):
UpperCamelCase_: Any = False
for j in range(len(lowerCamelCase ) ):
if temp[j] is True:
prime.append(j + low )
UpperCamelCase_: int = high + 1
UpperCamelCase_: Dict = min(high + end , lowerCamelCase )
return prime
print(sieve(10**6))
| 223 |
from typing import TYPE_CHECKING
from ...utils import _LazyModule
lowerCamelCase_ : Dict = {"""tokenization_byt5""": ["""ByT5Tokenizer"""]}
if TYPE_CHECKING:
from .tokenization_byta import ByTaTokenizer
else:
import sys
lowerCamelCase_ : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 223 | 1 |
import inspect
import unittest
from transformers import ConvNextVaConfig
from transformers.models.auto import get_values
from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel
from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowercase__ :
def __init__( self : Dict , UpperCamelCase__ : Dict , UpperCamelCase__ : Tuple=13 , UpperCamelCase__ : List[Any]=32 , UpperCamelCase__ : Optional[Any]=3 , UpperCamelCase__ : str=4 , UpperCamelCase__ : List[str]=[10, 20, 30, 40] , UpperCamelCase__ : List[Any]=[2, 2, 3, 2] , UpperCamelCase__ : List[Any]=True , UpperCamelCase__ : Any=True , UpperCamelCase__ : List[str]=37 , UpperCamelCase__ : Optional[int]="gelu" , UpperCamelCase__ : Any=10 , UpperCamelCase__ : Dict=0.02 , UpperCamelCase__ : List[Any]=["stage2", "stage3", "stage4"] , UpperCamelCase__ : List[Any]=[2, 3, 4] , UpperCamelCase__ : Any=None , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = parent
SCREAMING_SNAKE_CASE : Tuple = batch_size
SCREAMING_SNAKE_CASE : List[str] = image_size
SCREAMING_SNAKE_CASE : List[Any] = num_channels
SCREAMING_SNAKE_CASE : Tuple = num_stages
SCREAMING_SNAKE_CASE : Tuple = hidden_sizes
SCREAMING_SNAKE_CASE : str = depths
SCREAMING_SNAKE_CASE : Optional[int] = is_training
SCREAMING_SNAKE_CASE : Any = use_labels
SCREAMING_SNAKE_CASE : Dict = intermediate_size
SCREAMING_SNAKE_CASE : Optional[int] = hidden_act
SCREAMING_SNAKE_CASE : Union[str, Any] = num_labels
SCREAMING_SNAKE_CASE : int = initializer_range
SCREAMING_SNAKE_CASE : Optional[Any] = out_features
SCREAMING_SNAKE_CASE : int = out_indices
SCREAMING_SNAKE_CASE : List[str] = scope
def __A ( self : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE : Any = None
if self.use_labels:
SCREAMING_SNAKE_CASE : Optional[Any] = ids_tensor([self.batch_size] , self.num_labels )
SCREAMING_SNAKE_CASE : Tuple = self.get_config()
return config, pixel_values, labels
def __A ( self : Any ):
'''simple docstring'''
return ConvNextVaConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def __A ( self : List[str] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = ConvNextVaModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
SCREAMING_SNAKE_CASE : Dict = model(UpperCamelCase__ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def __A ( self : Any , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = ConvNextVaForImageClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
SCREAMING_SNAKE_CASE : Any = model(UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __A ( self : Union[str, Any] , UpperCamelCase__ : int , UpperCamelCase__ : str , UpperCamelCase__ : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = ConvNextVaBackbone(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
SCREAMING_SNAKE_CASE : List[Any] = model(UpperCamelCase__ )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
SCREAMING_SNAKE_CASE : str = None
SCREAMING_SNAKE_CASE : List[str] = ConvNextVaBackbone(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
SCREAMING_SNAKE_CASE : List[str] = model(UpperCamelCase__ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def __A ( self : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = config_and_inputs
SCREAMING_SNAKE_CASE : Optional[Any] = {'''pixel_values''': pixel_values}
return config, inputs_dict
def __A ( self : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = config_and_inputs
SCREAMING_SNAKE_CASE : str = {'''pixel_values''': pixel_values, '''labels''': labels}
return config, inputs_dict
@require_torch
class lowercase__ ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase):
UpperCamelCase_ = (
(
ConvNextVaModel,
ConvNextVaForImageClassification,
ConvNextVaBackbone,
)
if is_torch_available()
else ()
)
UpperCamelCase_ = (
{"""feature-extraction""": ConvNextVaModel, """image-classification""": ConvNextVaForImageClassification}
if is_torch_available()
else {}
)
UpperCamelCase_ = False
UpperCamelCase_ = False
UpperCamelCase_ = False
UpperCamelCase_ = False
UpperCamelCase_ = False
def __A ( self : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = ConvNextVaModelTester(self )
SCREAMING_SNAKE_CASE : str = ConfigTester(self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__ , hidden_size=37 )
def __A ( self : List[Any] ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __A ( self : Union[str, Any] ):
'''simple docstring'''
return
@unittest.skip(reason='''ConvNextV2 does not use inputs_embeds''' )
def __A ( self : Tuple ):
'''simple docstring'''
pass
@unittest.skip(reason='''ConvNextV2 does not support input and output embeddings''' )
def __A ( self : Union[str, Any] ):
'''simple docstring'''
pass
@unittest.skip(reason='''ConvNextV2 does not use feedforward chunking''' )
def __A ( self : int ):
'''simple docstring'''
pass
def __A ( self : str ):
'''simple docstring'''
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs_with_labels()
SCREAMING_SNAKE_CASE : Union[str, Any] = True
if model_class.__name__ in [
*get_values(UpperCamelCase__ ),
*get_values(UpperCamelCase__ ),
]:
continue
SCREAMING_SNAKE_CASE : str = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.train()
SCREAMING_SNAKE_CASE : List[Any] = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ , return_labels=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Tuple = model(**UpperCamelCase__ ).loss
loss.backward()
def __A ( self : Dict ):
'''simple docstring'''
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs_with_labels()
SCREAMING_SNAKE_CASE : Optional[int] = False
SCREAMING_SNAKE_CASE : Any = True
if (
model_class.__name__
in [*get_values(UpperCamelCase__ ), *get_values(UpperCamelCase__ )]
or not model_class.supports_gradient_checkpointing
):
continue
SCREAMING_SNAKE_CASE : List[Any] = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.gradient_checkpointing_enable()
model.train()
SCREAMING_SNAKE_CASE : Any = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ , return_labels=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[int] = model(**UpperCamelCase__ ).loss
loss.backward()
def __A ( self : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Dict = model_class(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[int] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE : str = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE : Optional[Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , UpperCamelCase__ )
def __A ( self : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def __A ( self : List[Any] ):
'''simple docstring'''
def check_hidden_states_output(UpperCamelCase__ : Dict , UpperCamelCase__ : Any , UpperCamelCase__ : Optional[int] ):
SCREAMING_SNAKE_CASE : int = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE : str = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
SCREAMING_SNAKE_CASE : Optional[int] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
SCREAMING_SNAKE_CASE : Any = self.model_tester.num_stages
self.assertEqual(len(UpperCamelCase__ ) , expected_num_stages + 1 )
# ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : List[Any] = True
check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE : Optional[Any] = True
check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def __A ( self : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCamelCase__ )
@slow
def __A ( self : Tuple ):
'''simple docstring'''
for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE : Optional[int] = ConvNextVaModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
def A ( ):
SCREAMING_SNAKE_CASE : Dict = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class lowercase__ ( unittest.TestCase):
@cached_property
def __A ( self : Optional[int] ):
'''simple docstring'''
return AutoImageProcessor.from_pretrained('''facebook/convnextv2-tiny-1k-224''' ) if is_vision_available() else None
@slow
def __A ( self : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = ConvNextVaForImageClassification.from_pretrained('''facebook/convnextv2-tiny-1k-224''' ).to(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[Any] = self.default_image_processor
SCREAMING_SNAKE_CASE : Dict = prepare_img()
SCREAMING_SNAKE_CASE : str = preprocessor(images=UpperCamelCase__ , return_tensors='''pt''' ).to(UpperCamelCase__ )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE : Dict = model(**UpperCamelCase__ )
# verify the logits
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([0.9996, 0.1966, -0.4386] ).to(UpperCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCamelCase__ , atol=1E-4 ) )
| 182 | from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__UpperCamelCase : Tuple = {
'configuration_swiftformer': [
'SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'SwiftFormerConfig',
'SwiftFormerOnnxConfig',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Optional[Any] = [
'SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'SwiftFormerForImageClassification',
'SwiftFormerModel',
'SwiftFormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_swiftformer import (
SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
SwiftFormerConfig,
SwiftFormerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swiftformer import (
SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
SwiftFormerForImageClassification,
SwiftFormerModel,
SwiftFormerPreTrainedModel,
)
else:
import sys
__UpperCamelCase : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 182 | 1 |
import hashlib
import unittest
from typing import Dict
import numpy as np
from transformers import (
MODEL_FOR_MASK_GENERATION_MAPPING,
TF_MODEL_FOR_MASK_GENERATION_MAPPING,
is_vision_available,
pipeline,
)
from transformers.pipelines import MaskGenerationPipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
if is_vision_available():
from PIL import Image
else:
class __UpperCAmelCase :
@staticmethod
def UpperCamelCase ( *UpperCAmelCase_: Optional[Any] , **UpperCAmelCase_: int ):
'''simple docstring'''
pass
def __lowerCamelCase ( snake_case__ ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = hashlib.mda(image.tobytes() )
return m.hexdigest()[:10]
def __lowerCamelCase ( snake_case__ ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = np.array(snake_case__ )
_SCREAMING_SNAKE_CASE = npimg.shape
return {"hash": hashimage(snake_case__ ), "shape": shape}
@is_pipeline_test
@require_vision
@require_torch
class __UpperCAmelCase (unittest.TestCase ):
__snake_case : str = dict(
(list(MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if MODEL_FOR_MASK_GENERATION_MAPPING else []) )
__snake_case : Any = dict(
(list(TF_MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if TF_MODEL_FOR_MASK_GENERATION_MAPPING else []) )
def UpperCamelCase ( self: Any , UpperCAmelCase_: int , UpperCAmelCase_: List[Any] , UpperCAmelCase_: Union[str, Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = MaskGenerationPipeline(model=UpperCAmelCase_ , image_processor=UpperCAmelCase_ )
return image_segmenter, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def UpperCamelCase ( self: int , UpperCAmelCase_: int , UpperCAmelCase_: Tuple ):
'''simple docstring'''
pass
@require_tf
@unittest.skip("""Image segmentation not implemented in TF""" )
def UpperCamelCase ( self: Optional[int] ):
'''simple docstring'''
pass
@slow
@require_torch
def UpperCamelCase ( self: int ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = pipeline("""mask-generation""" , model="""facebook/sam-vit-huge""" )
_SCREAMING_SNAKE_CASE = image_segmenter("""http://images.cocodataset.org/val2017/000000039769.jpg""" , points_per_batch=256 )
# Shortening by hashing
_SCREAMING_SNAKE_CASE = []
for i, o in enumerate(outputs["""masks"""] ):
new_outupt += [{"mask": mask_to_test_readable(UpperCAmelCase_ ), "scores": outputs["scores"][i]}]
# fmt: off
self.assertEqual(
nested_simplify(UpperCAmelCase_ , decimals=4 ) , [
{"""mask""": {"""hash""": """115ad19f5f""", """shape""": (480, 640)}, """scores""": 1.04_44},
{"""mask""": {"""hash""": """6affa964c6""", """shape""": (480, 640)}, """scores""": 1.0_21},
{"""mask""": {"""hash""": """dfe28a0388""", """shape""": (480, 640)}, """scores""": 1.01_67},
{"""mask""": {"""hash""": """c0a5f4a318""", """shape""": (480, 640)}, """scores""": 1.01_32},
{"""mask""": {"""hash""": """fe8065c197""", """shape""": (480, 640)}, """scores""": 1.00_53},
{"""mask""": {"""hash""": """e2d0b7a0b7""", """shape""": (480, 640)}, """scores""": 0.99_67},
{"""mask""": {"""hash""": """453c7844bd""", """shape""": (480, 640)}, """scores""": 0.9_93},
{"""mask""": {"""hash""": """3d44f2926d""", """shape""": (480, 640)}, """scores""": 0.99_09},
{"""mask""": {"""hash""": """64033ddc3f""", """shape""": (480, 640)}, """scores""": 0.98_79},
{"""mask""": {"""hash""": """801064ff79""", """shape""": (480, 640)}, """scores""": 0.98_34},
{"""mask""": {"""hash""": """6172f276ef""", """shape""": (480, 640)}, """scores""": 0.97_16},
{"""mask""": {"""hash""": """b49e60e084""", """shape""": (480, 640)}, """scores""": 0.96_12},
{"""mask""": {"""hash""": """a811e775fd""", """shape""": (480, 640)}, """scores""": 0.95_99},
{"""mask""": {"""hash""": """a6a8ebcf4b""", """shape""": (480, 640)}, """scores""": 0.95_52},
{"""mask""": {"""hash""": """9d8257e080""", """shape""": (480, 640)}, """scores""": 0.95_32},
{"""mask""": {"""hash""": """32de6454a8""", """shape""": (480, 640)}, """scores""": 0.95_16},
{"""mask""": {"""hash""": """af3d4af2c8""", """shape""": (480, 640)}, """scores""": 0.94_99},
{"""mask""": {"""hash""": """3c6db475fb""", """shape""": (480, 640)}, """scores""": 0.94_83},
{"""mask""": {"""hash""": """c290813fb9""", """shape""": (480, 640)}, """scores""": 0.94_64},
{"""mask""": {"""hash""": """b6f0b8f606""", """shape""": (480, 640)}, """scores""": 0.9_43},
{"""mask""": {"""hash""": """92ce16bfdf""", """shape""": (480, 640)}, """scores""": 0.9_43},
{"""mask""": {"""hash""": """c749b25868""", """shape""": (480, 640)}, """scores""": 0.94_08},
{"""mask""": {"""hash""": """efb6cab859""", """shape""": (480, 640)}, """scores""": 0.93_35},
{"""mask""": {"""hash""": """1ff2eafb30""", """shape""": (480, 640)}, """scores""": 0.93_26},
{"""mask""": {"""hash""": """788b798e24""", """shape""": (480, 640)}, """scores""": 0.92_62},
{"""mask""": {"""hash""": """abea804f0e""", """shape""": (480, 640)}, """scores""": 0.89_99},
{"""mask""": {"""hash""": """7b9e8ddb73""", """shape""": (480, 640)}, """scores""": 0.89_86},
{"""mask""": {"""hash""": """cd24047c8a""", """shape""": (480, 640)}, """scores""": 0.89_84},
{"""mask""": {"""hash""": """6943e6bcbd""", """shape""": (480, 640)}, """scores""": 0.88_73},
{"""mask""": {"""hash""": """b5f47c9191""", """shape""": (480, 640)}, """scores""": 0.88_71}
] , )
# fmt: on
@require_torch
@slow
def UpperCamelCase ( self: str ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = """facebook/sam-vit-huge"""
_SCREAMING_SNAKE_CASE = pipeline("""mask-generation""" , model=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = image_segmenter(
"""http://images.cocodataset.org/val2017/000000039769.jpg""" , pred_iou_thresh=1 , points_per_batch=256 )
# Shortening by hashing
_SCREAMING_SNAKE_CASE = []
for i, o in enumerate(outputs["""masks"""] ):
new_outupt += [{"mask": mask_to_test_readable(UpperCAmelCase_ ), "scores": outputs["scores"][i]}]
self.assertEqual(
nested_simplify(UpperCAmelCase_ , decimals=4 ) , [
{"""mask""": {"""hash""": """115ad19f5f""", """shape""": (480, 640)}, """scores""": 1.04_44},
{"""mask""": {"""hash""": """6affa964c6""", """shape""": (480, 640)}, """scores""": 1.02_10},
{"""mask""": {"""hash""": """dfe28a0388""", """shape""": (480, 640)}, """scores""": 1.01_67},
{"""mask""": {"""hash""": """c0a5f4a318""", """shape""": (480, 640)}, """scores""": 1.01_32},
{"""mask""": {"""hash""": """fe8065c197""", """shape""": (480, 640)}, """scores""": 1.00_53},
] , )
| 355 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaInpaintPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __UpperCAmelCase (_UpperCAmelCase ,unittest.TestCase ):
__snake_case : List[str] = KandinskyVaaInpaintPipeline
__snake_case : Union[str, Any] = ["image_embeds", "negative_image_embeds", "image", "mask_image"]
__snake_case : Tuple = [
"image_embeds",
"negative_image_embeds",
"image",
"mask_image",
]
__snake_case : str = [
"generator",
"height",
"width",
"latents",
"guidance_scale",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
__snake_case : List[str] = False
@property
def UpperCamelCase ( self: Tuple ):
'''simple docstring'''
return 32
@property
def UpperCamelCase ( self: Optional[int] ):
'''simple docstring'''
return 32
@property
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
return self.time_input_dim
@property
def UpperCamelCase ( self: Optional[Any] ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def UpperCamelCase ( self: Union[str, Any] ):
'''simple docstring'''
return 100
@property
def UpperCamelCase ( self: str ):
'''simple docstring'''
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE = {
"""in_channels""": 9,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """image""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
_SCREAMING_SNAKE_CASE = UNetaDConditionModel(**UpperCAmelCase_ )
return model
@property
def UpperCamelCase ( self: Union[str, Any] ):
'''simple docstring'''
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE = VQModel(**self.dummy_movq_kwargs )
return model
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.dummy_unet
_SCREAMING_SNAKE_CASE = self.dummy_movq
_SCREAMING_SNAKE_CASE = DDIMScheduler(
num_train_timesteps=1_000 , beta_schedule="""linear""" , beta_start=0.0_00_85 , beta_end=0.0_12 , clip_sample=UpperCAmelCase_ , set_alpha_to_one=UpperCAmelCase_ , steps_offset=1 , prediction_type="""epsilon""" , thresholding=UpperCAmelCase_ , )
_SCREAMING_SNAKE_CASE = {
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def UpperCamelCase ( self: Dict , UpperCAmelCase_: Optional[int] , UpperCAmelCase_: List[str]=0 ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(UpperCAmelCase_ ) ).to(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
UpperCAmelCase_ )
# create init_image
_SCREAMING_SNAKE_CASE = floats_tensor((1, 3, 64, 64) , rng=random.Random(UpperCAmelCase_ ) ).to(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_SCREAMING_SNAKE_CASE = Image.fromarray(np.uinta(UpperCAmelCase_ ) ).convert("""RGB""" ).resize((256, 256) )
# create mask
_SCREAMING_SNAKE_CASE = np.ones((64, 64) , dtype=np.floataa )
_SCREAMING_SNAKE_CASE = 0
if str(UpperCAmelCase_ ).startswith("""mps""" ):
_SCREAMING_SNAKE_CASE = torch.manual_seed(UpperCAmelCase_ )
else:
_SCREAMING_SNAKE_CASE = torch.Generator(device=UpperCAmelCase_ ).manual_seed(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = {
"""image""": init_image,
"""mask_image""": mask,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""num_inference_steps""": 2,
"""guidance_scale""": 4.0,
"""output_type""": """np""",
}
return inputs
def UpperCamelCase ( self: str ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = """cpu"""
_SCREAMING_SNAKE_CASE = self.get_dummy_components()
_SCREAMING_SNAKE_CASE = self.pipeline_class(**UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = pipe.to(UpperCAmelCase_ )
pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = pipe(**self.get_dummy_inputs(UpperCAmelCase_ ) )
_SCREAMING_SNAKE_CASE = output.images
_SCREAMING_SNAKE_CASE = pipe(
**self.get_dummy_inputs(UpperCAmelCase_ ) , return_dict=UpperCAmelCase_ , )[0]
_SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
_SCREAMING_SNAKE_CASE = image_from_tuple[0, -3:, -3:, -1]
print(F'image.shape {image.shape}' )
assert image.shape == (1, 64, 64, 3)
_SCREAMING_SNAKE_CASE = np.array(
[0.50_77_59_03, 0.49_52_71_95, 0.48_82_45_43, 0.50_19_22_37, 0.48_64_49_06, 0.49_37_38_14, 0.4_78_05_98, 0.47_23_48_27, 0.48_32_78_48] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), F' expected_slice {expected_slice}, but got {image_slice.flatten()}'
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), F' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'
def UpperCamelCase ( self: int ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class __UpperCAmelCase (unittest.TestCase ):
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase ( self: List[str] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/kandinskyv22_inpaint_cat_with_hat_fp16.npy""" )
_SCREAMING_SNAKE_CASE = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" )
_SCREAMING_SNAKE_CASE = np.ones((768, 768) , dtype=np.floataa )
_SCREAMING_SNAKE_CASE = 0
_SCREAMING_SNAKE_CASE = """a hat"""
_SCREAMING_SNAKE_CASE = KandinskyVaaPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = KandinskyVaaInpaintPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-decoder-inpaint""" , torch_dtype=torch.floataa )
_SCREAMING_SNAKE_CASE = pipeline.to(UpperCAmelCase_ )
pipeline.set_progress_bar_config(disable=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = torch.Generator(device="""cpu""" ).manual_seed(0 )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = pipe_prior(
UpperCAmelCase_ , generator=UpperCAmelCase_ , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple()
_SCREAMING_SNAKE_CASE = pipeline(
image=UpperCAmelCase_ , mask_image=UpperCAmelCase_ , image_embeds=UpperCAmelCase_ , negative_image_embeds=UpperCAmelCase_ , generator=UpperCAmelCase_ , num_inference_steps=100 , height=768 , width=768 , output_type="""np""" , )
_SCREAMING_SNAKE_CASE = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(UpperCAmelCase_ , UpperCAmelCase_ )
| 125 | 0 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaInpaintPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = KandinskyVaaInpaintPipeline
__UpperCAmelCase : List[Any] = ['image_embeds', 'negative_image_embeds', 'image', 'mask_image']
__UpperCAmelCase : Dict = [
'image_embeds',
'negative_image_embeds',
'image',
'mask_image',
]
__UpperCAmelCase : List[str] = [
'generator',
'height',
'width',
'latents',
'guidance_scale',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
__UpperCAmelCase : Dict = False
@property
def __UpperCAmelCase ( self ):
return 32
@property
def __UpperCAmelCase ( self ):
return 32
@property
def __UpperCAmelCase ( self ):
return self.time_input_dim
@property
def __UpperCAmelCase ( self ):
return self.time_input_dim * 4
@property
def __UpperCAmelCase ( self ):
return 100
@property
def __UpperCAmelCase ( self ):
torch.manual_seed(0 )
__a = {
'''in_channels''': 9,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''image''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
__a = UNetaDConditionModel(**_a )
return model
@property
def __UpperCAmelCase ( self ):
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def __UpperCAmelCase ( self ):
torch.manual_seed(0 )
__a = VQModel(**self.dummy_movq_kwargs )
return model
def __UpperCAmelCase ( self ):
__a = self.dummy_unet
__a = self.dummy_movq
__a = DDIMScheduler(
num_train_timesteps=1_000 , beta_schedule='''linear''' , beta_start=0.0_0085 , beta_end=0.012 , clip_sample=_a , set_alpha_to_one=_a , steps_offset=1 , prediction_type='''epsilon''' , thresholding=_a , )
__a = {
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def __UpperCAmelCase ( self , _a , _a=0 ):
__a = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(_a ) ).to(_a )
__a = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
_a )
# create init_image
__a = floats_tensor((1, 3, 64, 64) , rng=random.Random(_a ) ).to(_a )
__a = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__a = Image.fromarray(np.uinta(_a ) ).convert('''RGB''' ).resize((256, 256) )
# create mask
__a = np.ones((64, 64) , dtype=np.floataa )
__a = 0
if str(_a ).startswith('''mps''' ):
__a = torch.manual_seed(_a )
else:
__a = torch.Generator(device=_a ).manual_seed(_a )
__a = {
'''image''': init_image,
'''mask_image''': mask,
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''generator''': generator,
'''height''': 64,
'''width''': 64,
'''num_inference_steps''': 2,
'''guidance_scale''': 4.0,
'''output_type''': '''np''',
}
return inputs
def __UpperCAmelCase ( self ):
__a = '''cpu'''
__a = self.get_dummy_components()
__a = self.pipeline_class(**_a )
__a = pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
__a = pipe(**self.get_dummy_inputs(_a ) )
__a = output.images
__a = pipe(
**self.get_dummy_inputs(_a ) , return_dict=_a , )[0]
__a = image[0, -3:, -3:, -1]
__a = image_from_tuple[0, -3:, -3:, -1]
print(f'''image.shape {image.shape}''' )
assert image.shape == (1, 64, 64, 3)
__a = np.array(
[0.5077_5903, 0.4952_7195, 0.4882_4543, 0.5019_2237, 0.4864_4906, 0.4937_3814, 0.478_0598, 0.4723_4827, 0.4832_7848] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), f''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), f''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
def __UpperCAmelCase ( self ):
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCAmelCase ( self ):
__a = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/kandinskyv22_inpaint_cat_with_hat_fp16.npy''' )
__a = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' )
__a = np.ones((768, 768) , dtype=np.floataa )
__a = 0
__a = '''a hat'''
__a = KandinskyVaaPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(_a )
__a = KandinskyVaaInpaintPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-decoder-inpaint''' , torch_dtype=torch.floataa )
__a = pipeline.to(_a )
pipeline.set_progress_bar_config(disable=_a )
__a = torch.Generator(device='''cpu''' ).manual_seed(0 )
__a , __a = pipe_prior(
_a , generator=_a , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple()
__a = pipeline(
image=_a , mask_image=_a , image_embeds=_a , negative_image_embeds=_a , generator=_a , num_inference_steps=100 , height=768 , width=768 , output_type='''np''' , )
__a = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(_a , _a )
| 45 |
'''simple docstring'''
from __future__ import annotations
from fractions import Fraction
from math import gcd, sqrt
def a ( __a ) -> bool:
'''simple docstring'''
UpperCamelCase__ :int = int(number**0.5 )
return number == sq * sq
def a ( __a , __a , __a , __a , __a , __a ) -> tuple[int, int]:
'''simple docstring'''
UpperCamelCase__ :int = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den
UpperCamelCase__ :int = x_den * y_den * z_den
UpperCamelCase__ :int = gcd(__a , __a )
top //= hcf
bottom //= hcf
return top, bottom
def a ( __a = 35 ) -> int:
'''simple docstring'''
UpperCamelCase__ :set = set()
UpperCamelCase__ :int
UpperCamelCase__ :Fraction = Fraction(0 )
UpperCamelCase__ :tuple[int, int]
for x_num in range(1 , order + 1 ):
for x_den in range(x_num + 1 , order + 1 ):
for y_num in range(1 , order + 1 ):
for y_den in range(y_num + 1 , order + 1 ):
# n=1
UpperCamelCase__ :int = x_num * y_den + x_den * y_num
UpperCamelCase__ :Any = x_den * y_den
UpperCamelCase__ :Tuple = gcd(__a , __a )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
UpperCamelCase__ :Tuple = add_three(
__a , __a , __a , __a , __a , __a )
unique_s.add(__a )
# n=2
UpperCamelCase__ :List[str] = (
x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num
)
UpperCamelCase__ :Dict = x_den * x_den * y_den * y_den
if is_sq(__a ) and is_sq(__a ):
UpperCamelCase__ :Any = int(sqrt(__a ) )
UpperCamelCase__ :Optional[int] = int(sqrt(__a ) )
UpperCamelCase__ :int = gcd(__a , __a )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
UpperCamelCase__ :Tuple = add_three(
__a , __a , __a , __a , __a , __a )
unique_s.add(__a )
# n=-1
UpperCamelCase__ :Tuple = x_num * y_num
UpperCamelCase__ :Union[str, Any] = x_den * y_num + x_num * y_den
UpperCamelCase__ :List[str] = gcd(__a , __a )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
UpperCamelCase__ :Union[str, Any] = add_three(
__a , __a , __a , __a , __a , __a )
unique_s.add(__a )
# n=2
UpperCamelCase__ :Optional[Any] = x_num * x_num * y_num * y_num
UpperCamelCase__ :Tuple = (
x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den
)
if is_sq(__a ) and is_sq(__a ):
UpperCamelCase__ :str = int(sqrt(__a ) )
UpperCamelCase__ :Any = int(sqrt(__a ) )
UpperCamelCase__ :Dict = gcd(__a , __a )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
UpperCamelCase__ :int = add_three(
__a , __a , __a , __a , __a , __a )
unique_s.add(__a )
for num, den in unique_s:
total += Fraction(__a , __a )
return total.denominator + total.numerator
if __name__ == "__main__":
print(F"""{solution() = }""") | 97 | 0 |
'''simple docstring'''
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase : Dict =logging.get_logger(__name__)
__lowerCAmelCase : Optional[int] ={
"xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/config.json",
"xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/config.json",
}
class UpperCAmelCase ( UpperCamelCase__ ):
__lowercase = """xlnet"""
__lowercase = ["""mems"""]
__lowercase = {
"""n_token""": """vocab_size""", # Backward compatibility
"""hidden_size""": """d_model""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self :int , lowercase_ :Any=3_20_00 , lowercase_ :Optional[int]=10_24 , lowercase_ :Optional[Any]=24 , lowercase_ :Optional[Any]=16 , lowercase_ :Union[str, Any]=40_96 , lowercase_ :Dict="gelu" , lowercase_ :int=True , lowercase_ :int="bi" , lowercase_ :Dict=0.0_2 , lowercase_ :str=1E-12 , lowercase_ :Optional[int]=0.1 , lowercase_ :Tuple=5_12 , lowercase_ :Tuple=None , lowercase_ :Any=True , lowercase_ :Tuple=False , lowercase_ :List[Any]=False , lowercase_ :int=-1 , lowercase_ :Tuple=False , lowercase_ :Optional[Any]="last" , lowercase_ :str=True , lowercase_ :Tuple="tanh" , lowercase_ :List[Any]=0.1 , lowercase_ :Optional[int]=5 , lowercase_ :Dict=5 , lowercase_ :int=5 , lowercase_ :Dict=1 , lowercase_ :List[Any]=2 , **lowercase_ :Optional[int] , )-> Dict:
A__ = vocab_size
A__ = d_model
A__ = n_layer
A__ = n_head
if d_model % n_head != 0:
raise ValueError(F"'d_model % n_head' ({d_model % n_head}) should be equal to 0" )
if "d_head" in kwargs:
if kwargs["d_head"] != d_model // n_head:
raise ValueError(
F"`d_head` ({kwargs['d_head']}) should be equal to `d_model // n_head` ({d_model // n_head})" )
A__ = d_model // n_head
A__ = ff_activation
A__ = d_inner
A__ = untie_r
A__ = attn_type
A__ = initializer_range
A__ = layer_norm_eps
A__ = dropout
A__ = mem_len
A__ = reuse_len
A__ = bi_data
A__ = clamp_len
A__ = same_length
A__ = summary_type
A__ = summary_use_proj
A__ = summary_activation
A__ = summary_last_dropout
A__ = start_n_top
A__ = end_n_top
A__ = bos_token_id
A__ = pad_token_id
A__ = eos_token_id
if "use_cache" in kwargs:
warnings.warn(
"The `use_cache` argument is deprecated and will be removed in a future version, use `use_mems_eval`"
" instead." , lowercase_ , )
A__ = kwargs["use_cache"]
A__ = use_mems_eval
A__ = use_mems_train
super().__init__(pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , **lowercase_ )
@property
def UpperCAmelCase_ ( self :Any )-> List[Any]:
logger.info(F"The model {self.model_type} is one of the few models that has no sequence length limit." )
return -1
@max_position_embeddings.setter
def UpperCAmelCase_ ( self :Tuple , lowercase_ :str )-> int:
# Message copied from Transformer-XL documentation
raise NotImplementedError(
F"The model {self.model_type} is one of the few models that has no sequence length limit." )
| 123 |
'''simple docstring'''
from .imports import is_tqdm_available
if is_tqdm_available():
from tqdm.auto import tqdm as _tqdm
from ..state import PartialState
def UpperCamelCase ( _lowerCamelCase : bool = True , *_lowerCamelCase : Optional[int] , **_lowerCamelCase : Optional[Any] ):
if not is_tqdm_available():
raise ImportError("Accelerate's `tqdm` module requires `tqdm` to be installed. Please run `pip install tqdm`." )
A__ = False
if main_process_only:
A__ = PartialState().local_process_index == 0
return _tqdm(*_lowerCamelCase , **_lowerCamelCase , disable=_lowerCamelCase )
| 123 | 1 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import is_speech_available, is_vision_available
from transformers.testing_utils import require_torch
if is_vision_available():
from transformers import TvltImageProcessor
if is_speech_available():
from transformers import TvltFeatureExtractor
from transformers import TvltProcessor
@require_torch
class a ( unittest.TestCase ):
def __lowerCamelCase ( self :Tuple ):
snake_case__ : Any = '''ZinengTang/tvlt-base'''
snake_case__ : Optional[Any] = tempfile.mkdtemp()
def __lowerCamelCase ( self :Dict ,**__lowercase :Tuple ):
return TvltImageProcessor.from_pretrained(self.checkpoint ,**__lowercase )
def __lowerCamelCase ( self :Tuple ,**__lowercase :int ):
return TvltFeatureExtractor.from_pretrained(self.checkpoint ,**__lowercase )
def __lowerCamelCase ( self :Union[str, Any] ):
shutil.rmtree(self.tmpdirname )
def __lowerCamelCase ( self :Any ):
snake_case__ : List[Any] = self.get_image_processor()
snake_case__ : Optional[Any] = self.get_feature_extractor()
snake_case__ : List[str] = TvltProcessor(image_processor=__lowercase ,feature_extractor=__lowercase )
processor.save_pretrained(self.tmpdirname )
snake_case__ : str = TvltProcessor.from_pretrained(self.tmpdirname )
self.assertIsInstance(processor.feature_extractor ,__lowercase )
self.assertIsInstance(processor.image_processor ,__lowercase )
def __lowerCamelCase ( self :List[Any] ):
snake_case__ : Optional[int] = self.get_image_processor()
snake_case__ : int = self.get_feature_extractor()
snake_case__ : Dict = TvltProcessor(image_processor=__lowercase ,feature_extractor=__lowercase )
snake_case__ : Dict = np.ones([1_2_0_0_0] )
snake_case__ : Optional[int] = feature_extractor(__lowercase ,return_tensors='''np''' )
snake_case__ : str = processor(audio=__lowercase ,return_tensors='''np''' )
for key in audio_dict.keys():
self.assertAlmostEqual(audio_dict[key].sum() ,input_processor[key].sum() ,delta=1e-2 )
def __lowerCamelCase ( self :str ):
snake_case__ : Optional[Any] = self.get_image_processor()
snake_case__ : str = self.get_feature_extractor()
snake_case__ : Optional[int] = TvltProcessor(image_processor=__lowercase ,feature_extractor=__lowercase )
snake_case__ : Any = np.ones([3, 2_2_4, 2_2_4] )
snake_case__ : Optional[Any] = image_processor(__lowercase ,return_tensors='''np''' )
snake_case__ : List[Any] = processor(images=__lowercase ,return_tensors='''np''' )
for key in image_dict.keys():
self.assertAlmostEqual(image_dict[key].sum() ,input_processor[key].sum() ,delta=1e-2 )
def __lowerCamelCase ( self :Any ):
snake_case__ : Tuple = self.get_image_processor()
snake_case__ : Any = self.get_feature_extractor()
snake_case__ : Dict = TvltProcessor(image_processor=__lowercase ,feature_extractor=__lowercase )
snake_case__ : Optional[Any] = np.ones([1_2_0_0_0] )
snake_case__ : Optional[int] = np.ones([3, 2_2_4, 2_2_4] )
snake_case__ : Any = processor(audio=__lowercase ,images=__lowercase )
self.assertListEqual(list(inputs.keys() ) ,['''audio_values''', '''audio_mask''', '''pixel_values''', '''pixel_mask'''] )
# test if it raises when no input is passed
with pytest.raises(__lowercase ):
processor()
def __lowerCamelCase ( self :str ):
snake_case__ : List[str] = self.get_image_processor()
snake_case__ : int = self.get_feature_extractor()
snake_case__ : Optional[Any] = TvltProcessor(image_processor=__lowercase ,feature_extractor=__lowercase )
self.assertListEqual(
processor.model_input_names ,image_processor.model_input_names + feature_extractor.model_input_names ,msg='''`processor` and `image_processor`+`feature_extractor` model input names do not match''' ,)
| 230 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A__ = logging.get_logger(__name__)
A__ = {
'''distilbert-base-uncased''': '''https://huggingface.co/distilbert-base-uncased/resolve/main/config.json''',
'''distilbert-base-uncased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/config.json'''
),
'''distilbert-base-cased''': '''https://huggingface.co/distilbert-base-cased/resolve/main/config.json''',
'''distilbert-base-cased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/config.json'''
),
'''distilbert-base-german-cased''': '''https://huggingface.co/distilbert-base-german-cased/resolve/main/config.json''',
'''distilbert-base-multilingual-cased''': (
'''https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/config.json'''
),
'''distilbert-base-uncased-finetuned-sst-2-english''': (
'''https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english/resolve/main/config.json'''
),
}
class a ( __lowerCamelCase ):
__lowerCAmelCase : str = """distilbert"""
__lowerCAmelCase : str = {
"""hidden_size""": """dim""",
"""num_attention_heads""": """n_heads""",
"""num_hidden_layers""": """n_layers""",
}
def __init__( self :Dict ,__lowercase :Tuple=3_0_5_2_2 ,__lowercase :Optional[Any]=5_1_2 ,__lowercase :List[str]=False ,__lowercase :List[str]=6 ,__lowercase :Optional[Any]=1_2 ,__lowercase :Tuple=7_6_8 ,__lowercase :int=4 * 7_6_8 ,__lowercase :List[Any]=0.1 ,__lowercase :List[str]=0.1 ,__lowercase :Union[str, Any]="gelu" ,__lowercase :List[str]=0.02 ,__lowercase :Optional[int]=0.1 ,__lowercase :Dict=0.2 ,__lowercase :Union[str, Any]=0 ,**__lowercase :Optional[Any] ,):
snake_case__ : List[str] = vocab_size
snake_case__ : Tuple = max_position_embeddings
snake_case__ : Optional[int] = sinusoidal_pos_embds
snake_case__ : str = n_layers
snake_case__ : List[Any] = n_heads
snake_case__ : Tuple = dim
snake_case__ : str = hidden_dim
snake_case__ : int = dropout
snake_case__ : Dict = attention_dropout
snake_case__ : Tuple = activation
snake_case__ : int = initializer_range
snake_case__ : Optional[Any] = qa_dropout
snake_case__ : Union[str, Any] = seq_classif_dropout
super().__init__(**__lowercase ,pad_token_id=__lowercase )
class a ( __lowerCamelCase ):
@property
def __lowerCamelCase ( self :Union[str, Any] ):
if self.task == "multiple-choice":
snake_case__ : str = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
snake_case__ : Optional[int] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 230 | 1 |
import PIL.Image
import PIL.ImageOps
from packaging import version
from PIL import Image
if version.parse(version.parse(PIL.__version__).base_version) >= version.parse("""9.1.0"""):
lowerCamelCase_ = {
"linear": PIL.Image.Resampling.BILINEAR,
"bilinear": PIL.Image.Resampling.BILINEAR,
"bicubic": PIL.Image.Resampling.BICUBIC,
"lanczos": PIL.Image.Resampling.LANCZOS,
"nearest": PIL.Image.Resampling.NEAREST,
}
else:
lowerCamelCase_ = {
"linear": PIL.Image.LINEAR,
"bilinear": PIL.Image.BILINEAR,
"bicubic": PIL.Image.BICUBIC,
"lanczos": PIL.Image.LANCZOS,
"nearest": PIL.Image.NEAREST,
}
def lowerCamelCase ( a_ ) -> Any:
lowerCAmelCase_ = (images / 2 + 0.5).clamp(0 , 1 )
lowerCAmelCase_ = images.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
lowerCAmelCase_ = numpy_to_pil(lowerCAmelCase__ )
return images
def lowerCamelCase ( a_ ) -> str:
if images.ndim == 3:
lowerCAmelCase_ = images[None, ...]
lowerCAmelCase_ = (images * 255).round().astype('uint8' )
if images.shape[-1] == 1:
# special case for grayscale (single channel) images
lowerCAmelCase_ = [Image.fromarray(image.squeeze() , mode='L' ) for image in images]
else:
lowerCAmelCase_ = [Image.fromarray(lowerCAmelCase__ ) for image in images]
return pil_images
| 358 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {
"""shi-labs/nat-mini-in1k-224""": """https://huggingface.co/shi-labs/nat-mini-in1k-224/resolve/main/config.json""",
# See all Nat models at https://huggingface.co/models?filter=nat
}
class a_ ( a_ , a_ ):
'''simple docstring'''
__a: Optional[Any] = '''nat'''
__a: int = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self , lowercase_=4 , lowercase_=3 , lowercase_=6_4 , lowercase_=[3, 4, 6, 5] , lowercase_=[2, 4, 8, 1_6] , lowercase_=7 , lowercase_=3.0 , lowercase_=True , lowercase_=0.0 , lowercase_=0.0 , lowercase_=0.1 , lowercase_="gelu" , lowercase_=0.02 , lowercase_=1e-5 , lowercase_=0.0 , lowercase_=None , lowercase_=None , **lowercase_ , ) -> Optional[int]:
'''simple docstring'''
super().__init__(**lowercase_ )
lowerCAmelCase_ = patch_size
lowerCAmelCase_ = num_channels
lowerCAmelCase_ = embed_dim
lowerCAmelCase_ = depths
lowerCAmelCase_ = len(lowercase_ )
lowerCAmelCase_ = num_heads
lowerCAmelCase_ = kernel_size
lowerCAmelCase_ = mlp_ratio
lowerCAmelCase_ = qkv_bias
lowerCAmelCase_ = hidden_dropout_prob
lowerCAmelCase_ = attention_probs_dropout_prob
lowerCAmelCase_ = drop_path_rate
lowerCAmelCase_ = hidden_act
lowerCAmelCase_ = layer_norm_eps
lowerCAmelCase_ = initializer_range
# we set the hidden_size attribute in order to make Nat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
lowerCAmelCase_ = int(embed_dim * 2 ** (len(lowercase_ ) - 1) )
lowerCAmelCase_ = layer_scale_init_value
lowerCAmelCase_ = ['stem'] + [f'''stage{idx}''' for idx in range(1 , len(lowercase_ ) + 1 )]
lowerCAmelCase_ , lowerCAmelCase_ = get_aligned_output_features_output_indices(
out_features=lowercase_ , out_indices=lowercase_ , stage_names=self.stage_names )
| 14 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
_lowercase = {'''configuration_plbart''': ['''PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''PLBartConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = ['''PLBartTokenizer''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
'''PLBART_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''PLBartForCausalLM''',
'''PLBartForConditionalGeneration''',
'''PLBartForSequenceClassification''',
'''PLBartModel''',
'''PLBartPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_plbart import PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP, PLBartConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_plbart import PLBartTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_plbart import (
PLBART_PRETRAINED_MODEL_ARCHIVE_LIST,
PLBartForCausalLM,
PLBartForConditionalGeneration,
PLBartForSequenceClassification,
PLBartModel,
PLBartPreTrainedModel,
)
else:
import sys
_lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure) | 74 |
"""simple docstring"""
import argparse
import torch
from torch import nn
from transformers import MBartConfig, MBartForConditionalGeneration
def _snake_case ( snake_case__ : Dict ):
A = [
'encoder.version',
'decoder.version',
'model.encoder.version',
'model.decoder.version',
'_float_tensor',
'decoder.output_projection.weight',
]
for k in ignore_keys:
state_dict.pop(snake_case__ , snake_case__ )
def _snake_case ( snake_case__ : int ):
A , A = emb.weight.shape
A = nn.Linear(snake_case__ , snake_case__ , bias=snake_case__ )
A = emb.weight.data
return lin_layer
def _snake_case ( snake_case__ : List[str] , snake_case__ : Any="facebook/mbart-large-en-ro" , snake_case__ : Optional[int]=False , snake_case__ : List[str]=False ):
A = torch.load(snake_case__ , map_location='cpu' )['model']
remove_ignore_keys_(snake_case__ )
A = state_dict['encoder.embed_tokens.weight'].shape[0]
A = MBartConfig.from_pretrained(snake_case__ , vocab_size=snake_case__ )
if mbart_aa and finetuned:
A = 'relu'
A = state_dict['decoder.embed_tokens.weight']
A = MBartForConditionalGeneration(snake_case__ )
model.model.load_state_dict(snake_case__ )
if finetuned:
A = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''fairseq_path''', type=str, help='''bart.large, bart.large.cnn or a path to a model.pt on local filesystem.'''
)
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--hf_config''',
default='''facebook/mbart-large-cc25''',
type=str,
help='''Which huggingface architecture to use: mbart-large''',
)
parser.add_argument('''--mbart_50''', action='''store_true''', help='''whether the model is mMART-50 checkpoint''')
parser.add_argument('''--finetuned''', action='''store_true''', help='''whether the model is a fine-tuned checkpoint''')
_lowercase = parser.parse_args()
_lowercase = convert_fairseq_mbart_checkpoint_from_disk(
args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa
)
model.save_pretrained(args.pytorch_dump_folder_path) | 74 | 1 |
def snake_case_(_UpperCamelCase , _UpperCamelCase ) -> int:
"""simple docstring"""
return int((input_a, input_a).count(0 ) != 0 )
def snake_case_() -> None:
"""simple docstring"""
assert nand_gate(0 , 0 ) == 1
assert nand_gate(0 , 1 ) == 1
assert nand_gate(1 , 0 ) == 1
assert nand_gate(1 , 1 ) == 0
if __name__ == "__main__":
print(nand_gate(0, 0))
print(nand_gate(0, 1))
print(nand_gate(1, 0))
print(nand_gate(1, 1))
| 351 |
import warnings
from ...utils import logging
from .image_processing_deformable_detr import DeformableDetrImageProcessor
__A = logging.get_logger(__name__)
class lowercase_ ( __lowercase ):
def __init__( self : Optional[Any] , *A__ : List[Any] , **A__ : int ) -> None:
warnings.warn(
'''The class DeformableDetrFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use DeformableDetrImageProcessor instead.''' , A__ , )
super().__init__(*A__ , **A__ )
| 278 | 0 |
"""simple docstring"""
def _snake_case ( _snake_case : int = 10_00 ) -> int:
'''simple docstring'''
_A = 1, 1
_A = 2
while True:
_A = 0
_A = fa + fa
_A = fa, f
index += 1
for _ in str(lowercase_ ):
i += 1
if i == n:
break
return index
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 315 |
import darl # noqa
import gym
import tqdm
from diffusers.experimental import ValueGuidedRLPipeline
A_ : List[str] = {
'n_samples': 64,
'horizon': 32,
'num_inference_steps': 20,
'n_guide_steps': 2, # can set to 0 for faster sampling, does not use value network
'scale_grad_by_std': True,
'scale': 0.1,
'eta': 0.0,
't_grad_cutoff': 2,
'device': 'cpu',
}
if __name__ == "__main__":
A_ : Optional[int] = 'hopper-medium-v2'
A_ : List[Any] = gym.make(env_name)
A_ : str = ValueGuidedRLPipeline.from_pretrained(
'bglick13/hopper-medium-v2-value-function-hor32',
env=env,
)
env.seed(0)
A_ : List[Any] = env.reset()
A_ : Optional[int] = 0
A_ : str = 0
A_ : Optional[Any] = 1000
A_ : Union[str, Any] = [obs.copy()]
try:
for t in tqdm.tqdm(range(T)):
# call the policy
A_ : Tuple = pipeline(obs, planning_horizon=32)
# execute action in environment
A_ , A_ , A_ , A_ : Dict = env.step(denorm_actions)
A_ : List[str] = env.get_normalized_score(total_reward)
# update return
total_reward += reward
total_score += score
print(
f'''Step: {t}, Reward: {reward}, Total Reward: {total_reward}, Score: {score}, Total Score:'''
f''' {total_score}'''
)
# save observations for rendering
rollout.append(next_observation.copy())
A_ : int = next_observation
except KeyboardInterrupt:
pass
print(f'''Total reward: {total_reward}''')
| 192 | 0 |
def SCREAMING_SNAKE_CASE ( snake_case_ : list , snake_case_ : list ):
_validate_point(snake_case_ )
_validate_point(snake_case_ )
if len(snake_case_ ) != len(snake_case_ ):
raise ValueError("Both points must be in the same n-dimensional space" )
return float(sum(abs(a - b ) for a, b in zip(snake_case_ , snake_case_ ) ) )
def SCREAMING_SNAKE_CASE ( snake_case_ : list[float] ):
if point:
if isinstance(snake_case_ , snake_case_ ):
for item in point:
if not isinstance(snake_case_ , (int, float) ):
snake_case__ : Optional[int] = (
"Expected a list of numbers as input, found "
F'''{type(snake_case_ ).__name__}'''
)
raise TypeError(snake_case_ )
else:
snake_case__ : str = F'''Expected a list of numbers as input, found {type(snake_case_ ).__name__}'''
raise TypeError(snake_case_ )
else:
raise ValueError("Missing an input" )
def SCREAMING_SNAKE_CASE ( snake_case_ : list , snake_case_ : list ):
_validate_point(snake_case_ )
_validate_point(snake_case_ )
if len(snake_case_ ) != len(snake_case_ ):
raise ValueError("Both points must be in the same n-dimensional space" )
return float(sum(abs(x - y ) for x, y in zip(snake_case_ , snake_case_ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 286 |
from tempfile import TemporaryDirectory
from unittest import TestCase
from unittest.mock import MagicMock, patch
from transformers import AutoModel, TFAutoModel
from transformers.onnx import FeaturesManager
from transformers.testing_utils import SMALL_MODEL_IDENTIFIER, require_tf, require_torch
@require_torch
@require_tf
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ):
"""simple docstring"""
def _lowercase ( self : Optional[Any] ):
snake_case__ : Optional[Any] = SMALL_MODEL_IDENTIFIER
snake_case__ : Any = "pt"
snake_case__ : Any = "tf"
def _lowercase ( self : Union[str, Any] , __A : List[Any] ):
snake_case__ : int = AutoModel.from_pretrained(self.test_model )
model_pt.save_pretrained(__A )
def _lowercase ( self : Optional[int] , __A : Tuple ):
snake_case__ : List[Any] = TFAutoModel.from_pretrained(self.test_model , from_pt=__A )
model_tf.save_pretrained(__A )
def _lowercase ( self : str ):
snake_case__ : Optional[Any] = "mock_framework"
# Framework provided - return whatever the user provides
snake_case__ : Optional[Any] = FeaturesManager.determine_framework(self.test_model , __A )
self.assertEqual(__A , __A )
# Local checkpoint and framework provided - return provided framework
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(__A )
snake_case__ : Optional[int] = FeaturesManager.determine_framework(__A , __A )
self.assertEqual(__A , __A )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(__A )
snake_case__ : int = FeaturesManager.determine_framework(__A , __A )
self.assertEqual(__A , __A )
def _lowercase ( self : Dict ):
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(__A )
snake_case__ : List[str] = FeaturesManager.determine_framework(__A )
self.assertEqual(__A , self.framework_pt )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(__A )
snake_case__ : Tuple = FeaturesManager.determine_framework(__A )
self.assertEqual(__A , self.framework_tf )
# Invalid local checkpoint
with TemporaryDirectory() as local_invalid_ckpt:
with self.assertRaises(__A ):
snake_case__ : int = FeaturesManager.determine_framework(__A )
def _lowercase ( self : Dict ):
snake_case__ : Dict = MagicMock(return_value=__A )
with patch("transformers.onnx.features.is_tf_available" , __A ):
snake_case__ : List[Any] = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(__A , self.framework_pt )
# PyTorch not in environment -> use TensorFlow
snake_case__ : Tuple = MagicMock(return_value=__A )
with patch("transformers.onnx.features.is_torch_available" , __A ):
snake_case__ : int = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(__A , self.framework_tf )
# Both in environment -> use PyTorch
snake_case__ : Dict = MagicMock(return_value=__A )
snake_case__ : Optional[int] = MagicMock(return_value=__A )
with patch("transformers.onnx.features.is_tf_available" , __A ), patch(
"transformers.onnx.features.is_torch_available" , __A ):
snake_case__ : Optional[Any] = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(__A , self.framework_pt )
# Both not in environment -> raise error
snake_case__ : List[str] = MagicMock(return_value=__A )
snake_case__ : Optional[Any] = MagicMock(return_value=__A )
with patch("transformers.onnx.features.is_tf_available" , __A ), patch(
"transformers.onnx.features.is_torch_available" , __A ):
with self.assertRaises(__A ):
snake_case__ : Optional[Any] = FeaturesManager.determine_framework(self.test_model )
| 286 | 1 |
"""simple docstring"""
from math import acos, sin
from typing import List, Tuple, Union
import numpy as np
import torch
from PIL import Image
from ...models import AutoencoderKL, UNetaDConditionModel
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput
from .mel import Mel
class __snake_case ( __UpperCamelCase ):
"""simple docstring"""
_lowerCamelCase = ['vqvae']
def __init__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , ):
'''simple docstring'''
super().__init__()
self.register_modules(unet=lowerCAmelCase__ , scheduler=lowerCAmelCase__ , mel=lowerCAmelCase__ , vqvae=lowerCAmelCase__ )
def UpperCamelCase__( self ):
'''simple docstring'''
return 50 if isinstance(self.scheduler , lowerCAmelCase__ ) else 1000
@torch.no_grad()
def __call__( self , __lowerCamelCase = 1 , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase = 0 , __lowerCamelCase = 0 , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase = 0 , __lowerCamelCase = 0 , __lowerCamelCase = None , __lowerCamelCase = 0 , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase=True , ):
'''simple docstring'''
__A : Union[str, Any] = steps or self.get_default_steps()
self.scheduler.set_timesteps(lowerCAmelCase__ )
__A : Optional[int] = step_generator or generator
# For backwards compatibility
if type(self.unet.config.sample_size ) == int:
__A : Optional[int] = (self.unet.config.sample_size, self.unet.config.sample_size)
if noise is None:
__A : List[str] = randn_tensor(
(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size[0],
self.unet.config.sample_size[1],
) , generator=lowerCAmelCase__ , device=self.device , )
__A : List[Any] = noise
__A : int = None
if audio_file is not None or raw_audio is not None:
self.mel.load_audio(lowerCAmelCase__ , lowerCAmelCase__ )
__A : int = self.mel.audio_slice_to_image(lowerCAmelCase__ )
__A : int = np.frombuffer(input_image.tobytes() , dtype='''uint8''' ).reshape(
(input_image.height, input_image.width) )
__A : str = (input_image / 255) * 2 - 1
__A : Union[str, Any] = torch.tensor(input_image[np.newaxis, :, :] , dtype=torch.float ).to(self.device )
if self.vqvae is not None:
__A : Optional[int] = self.vqvae.encode(torch.unsqueeze(lowerCAmelCase__ , 0 ) ).latent_dist.sample(
generator=lowerCAmelCase__ )[0]
__A : Dict = self.vqvae.config.scaling_factor * input_images
if start_step > 0:
__A : List[Any] = self.scheduler.add_noise(lowerCAmelCase__ , lowerCAmelCase__ , self.scheduler.timesteps[start_step - 1] )
__A : str = (
self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length
)
__A : Dict = int(mask_start_secs * pixels_per_second )
__A : Tuple = int(mask_end_secs * pixels_per_second )
__A : List[Any] = self.scheduler.add_noise(lowerCAmelCase__ , lowerCAmelCase__ , torch.tensor(self.scheduler.timesteps[start_step:] ) )
for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ):
if isinstance(self.unet , lowerCAmelCase__ ):
__A : Union[str, Any] = self.unet(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )['sample']
else:
__A : Optional[Any] = self.unet(lowerCAmelCase__ , lowerCAmelCase__ )['sample']
if isinstance(self.scheduler , lowerCAmelCase__ ):
__A : List[str] = self.scheduler.step(
model_output=lowerCAmelCase__ , timestep=lowerCAmelCase__ , sample=lowerCAmelCase__ , eta=lowerCAmelCase__ , generator=lowerCAmelCase__ , )['prev_sample']
else:
__A : int = self.scheduler.step(
model_output=lowerCAmelCase__ , timestep=lowerCAmelCase__ , sample=lowerCAmelCase__ , generator=lowerCAmelCase__ , )['prev_sample']
if mask is not None:
if mask_start > 0:
__A : List[Any] = mask[:, step, :, :mask_start]
if mask_end > 0:
__A : Optional[int] = mask[:, step, :, -mask_end:]
if self.vqvae is not None:
# 0.18215 was scaling factor used in training to ensure unit variance
__A : Union[str, Any] = 1 / self.vqvae.config.scaling_factor * images
__A : Optional[Any] = self.vqvae.decode(lowerCAmelCase__ )['sample']
__A : Dict = (images / 2 + 0.5).clamp(0 , 1 )
__A : List[Any] = images.cpu().permute(0 , 2 , 3 , 1 ).numpy()
__A : int = (images * 255).round().astype('''uint8''' )
__A : Optional[Any] = list(
(Image.fromarray(_[:, :, 0] ) for _ in images)
if images.shape[3] == 1
else (Image.fromarray(lowerCAmelCase__ , mode='''RGB''' ).convert('''L''' ) for _ in images) )
__A : Dict = [self.mel.image_to_audio(lowerCAmelCase__ ) for _ in images]
if not return_dict:
return images, (self.mel.get_sample_rate(), audios)
return BaseOutput(**AudioPipelineOutput(np.array(lowerCAmelCase__ )[:, np.newaxis, :] ) , **ImagePipelineOutput(lowerCAmelCase__ ) )
@torch.no_grad()
def UpperCamelCase__( self , __lowerCamelCase , __lowerCamelCase = 50 ):
'''simple docstring'''
assert isinstance(self.scheduler , lowerCAmelCase__ )
self.scheduler.set_timesteps(lowerCAmelCase__ )
__A : List[str] = np.array(
[np.frombuffer(image.tobytes() , dtype='''uint8''' ).reshape((1, image.height, image.width) ) for image in images] )
__A : str = (sample / 255) * 2 - 1
__A : str = torch.Tensor(lowerCAmelCase__ ).to(self.device )
for t in self.progress_bar(torch.flip(self.scheduler.timesteps , (0,) ) ):
__A : Union[str, Any] = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
__A : Optional[Any] = self.scheduler.alphas_cumprod[t]
__A : str = (
self.scheduler.alphas_cumprod[prev_timestep]
if prev_timestep >= 0
else self.scheduler.final_alpha_cumprod
)
__A : str = 1 - alpha_prod_t
__A : int = self.unet(lowerCAmelCase__ , lowerCAmelCase__ )['sample']
__A : int = (1 - alpha_prod_t_prev) ** 0.5 * model_output
__A : Optional[int] = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5)
__A : Any = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output
return sample
@staticmethod
def UpperCamelCase__( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
'''simple docstring'''
__A : Any = acos(torch.dot(torch.flatten(lowerCAmelCase__ ) , torch.flatten(lowerCAmelCase__ ) ) / torch.norm(lowerCAmelCase__ ) / torch.norm(lowerCAmelCase__ ) )
return sin((1 - alpha) * theta ) * xa / sin(lowerCAmelCase__ ) + sin(alpha * theta ) * xa / sin(lowerCAmelCase__ )
| 179 |
def snake_case_ ( snake_case ) -> int:
if n == 1 or not isinstance(snake_case , snake_case ):
return 0
elif n == 2:
return 1
else:
lowercase__: Optional[Any] = [0, 1]
for i in range(2 , n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def snake_case_ ( snake_case ) -> int:
lowercase__: int = 0
lowercase__: int = 2
while digits < n:
index += 1
lowercase__: Tuple = len(str(fibonacci(snake_case ) ) )
return index
def snake_case_ ( snake_case = 10_00 ) -> int:
return fibonacci_digits_index(snake_case )
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 196 | 0 |
'''simple docstring'''
import argparse
import pickle
import numpy as np
import torch
from torch import nn
from transformers import ReformerConfig, ReformerModelWithLMHead
from transformers.utils import logging
logging.set_verbosity_info()
def a__ ( lowercase : Optional[Any], lowercase : Any, lowercase : List[str]=None ) -> List[Any]:
"""simple docstring"""
assert torch_layer.weight.shape == weight.shape, F"""{torch_layer} layer.weight does not match"""
_UpperCamelCase = nn.Parameter(lowercase )
if bias is not None:
assert torch_layer.bias.shape == bias.shape, F"""{torch_layer} layer.bias does not match"""
_UpperCamelCase = nn.Parameter(lowercase )
def a__ ( lowercase : Union[str, Any], lowercase : List[Any], lowercase : Dict ) -> Dict:
"""simple docstring"""
_UpperCamelCase = np.asarray(weights[0] )
_UpperCamelCase = np.asarray(weights[1] )
_UpperCamelCase = np.asarray(weights[2] )
set_param(
torch_layer.self_attention.query_key, torch.tensor(lowercase ).transpose(1, 2 ).contiguous().view(-1, lowercase ), )
set_param(
torch_layer.self_attention.value, torch.tensor(lowercase ).transpose(1, 2 ).contiguous().view(-1, lowercase ), )
set_param(
torch_layer.output.dense, torch.tensor(lowercase ).view(-1, lowercase ).contiguous().transpose(0, 1 ), )
def a__ ( lowercase : str, lowercase : Tuple, lowercase : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = np.asarray(weights[0] )
_UpperCamelCase = np.asarray(weights[1] )
_UpperCamelCase = np.asarray(weights[2] )
_UpperCamelCase = np.asarray(weights[3] )
set_param(
torch_layer.self_attention.query, torch.tensor(lowercase ).transpose(1, 2 ).contiguous().view(-1, lowercase ), )
set_param(
torch_layer.self_attention.key, torch.tensor(lowercase ).transpose(1, 2 ).contiguous().view(-1, lowercase ), )
set_param(
torch_layer.self_attention.value, torch.tensor(lowercase ).transpose(1, 2 ).contiguous().view(-1, lowercase ), )
set_param(
torch_layer.output.dense, torch.tensor(lowercase ).view(-1, lowercase ).contiguous().transpose(0, 1 ), )
def a__ ( lowercase : Tuple, lowercase : Union[str, Any], lowercase : Optional[int] ) -> str:
"""simple docstring"""
_UpperCamelCase = weights[0][0][0]
_UpperCamelCase = np.asarray(layer_norm_a[0] )
_UpperCamelCase = np.asarray(layer_norm_a[1] )
set_param(
torch_block.attention.layer_norm, torch.tensor(lowercase ), torch.tensor(lowercase ), )
# lsh weights + output
_UpperCamelCase = weights[0][1]
if len(lowercase ) < 4:
set_layer_weights_in_torch_lsh(lowercase, torch_block.attention, lowercase )
else:
set_layer_weights_in_torch_local(lowercase, torch_block.attention, lowercase )
# intermediate weighs
_UpperCamelCase = weights[2][0][1][2]
# Chunked Feed Forward
if len(lowercase ) == 4:
_UpperCamelCase = intermediate_weights[2]
# layernorm 2
_UpperCamelCase = np.asarray(intermediate_weights[0][0] )
_UpperCamelCase = np.asarray(intermediate_weights[0][1] )
set_param(
torch_block.feed_forward.layer_norm, torch.tensor(lowercase ), torch.tensor(lowercase ), )
# intermediate dense
_UpperCamelCase = np.asarray(intermediate_weights[1][0] )
_UpperCamelCase = np.asarray(intermediate_weights[1][1] )
set_param(
torch_block.feed_forward.dense.dense, torch.tensor(lowercase ).transpose(0, 1 ).contiguous(), torch.tensor(lowercase ), )
# intermediate out
_UpperCamelCase = np.asarray(intermediate_weights[4][0] )
_UpperCamelCase = np.asarray(intermediate_weights[4][1] )
set_param(
torch_block.feed_forward.output.dense, torch.tensor(lowercase ).transpose(0, 1 ).contiguous(), torch.tensor(lowercase ), )
def a__ ( lowercase : Any, lowercase : Optional[Any], lowercase : Optional[int] ) -> Tuple:
"""simple docstring"""
_UpperCamelCase = torch_model.reformer
# word embeds
_UpperCamelCase = np.asarray(weights[1] )
set_param(
torch_model_reformer.embeddings.word_embeddings, torch.tensor(lowercase ), )
if isinstance(weights[3], lowercase ):
_UpperCamelCase = torch_model_reformer.embeddings.position_embeddings
for emb_idx in range(len(position_embeddings.weights ) ):
_UpperCamelCase = np.asarray(weights[3][emb_idx][0] )
assert (
position_embeddings.weights[emb_idx].shape == emb_weights.shape
), F"""{position_embeddings[emb_idx]} emb does not match"""
_UpperCamelCase = nn.Parameter(torch.tensor(lowercase ) )
_UpperCamelCase = weights[5]
assert len(torch_model_reformer.encoder.layers ) * 4 == len(
lowercase ), "HF and trax model do not have the same number of layers"
for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ):
_UpperCamelCase = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)]
set_block_weights_in_torch(lowercase, lowercase, lowercase )
# output layer norm
_UpperCamelCase = np.asarray(weights[7][0] )
_UpperCamelCase = np.asarray(weights[7][1] )
set_param(
torch_model_reformer.encoder.layer_norm, torch.tensor(lowercase ), torch.tensor(lowercase ), )
# output embeddings
_UpperCamelCase = np.asarray(weights[9][0] )
_UpperCamelCase = np.asarray(weights[9][1] )
set_param(
torch_model.lm_head.decoder, torch.tensor(lowercase ).transpose(0, 1 ).contiguous(), torch.tensor(lowercase ), )
def a__ ( lowercase : Dict, lowercase : int, lowercase : Dict ) -> int:
"""simple docstring"""
_UpperCamelCase = ReformerConfig.from_json_file(lowercase )
print(F"""Building PyTorch model from configuration: {config}""" )
_UpperCamelCase = ReformerModelWithLMHead(lowercase )
with open(lowercase, '''rb''' ) as f:
_UpperCamelCase = pickle.load(lowercase )['''weights''']
set_model_weights_in_torch(lowercase, lowercase, config.hidden_size )
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict(), lowercase )
if __name__ == "__main__":
lowercase__ : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--trax_model_pkl_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained Reformer model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
lowercase__ : Union[str, Any] = parser.parse_args()
convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
| 366 |
'''simple docstring'''
import importlib.metadata
from typing import Union
from packaging.version import Version, parse
from .constants import STR_OPERATION_TO_FUNC
lowercase__ : List[Any] = parse(importlib.metadata.version('torch'))
def a__ ( lowercase : Union[str, Version], lowercase : str, lowercase : str ) -> List[str]:
"""simple docstring"""
if operation not in STR_OPERATION_TO_FUNC.keys():
raise ValueError(F"""`operation` must be one of {list(STR_OPERATION_TO_FUNC.keys() )}, received {operation}""" )
_UpperCamelCase = STR_OPERATION_TO_FUNC[operation]
if isinstance(lowercase, lowercase ):
_UpperCamelCase = parse(importlib.metadata.version(lowercase ) )
return operation(lowercase, parse(lowercase ) )
def a__ ( lowercase : str, lowercase : str ) -> List[Any]:
"""simple docstring"""
return compare_versions(lowercase, lowercase, lowercase )
| 287 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.