code stringlengths 86 54.5k | code_codestyle int64 0 371 | style_context stringlengths 87 49.2k | style_context_codestyle int64 0 349 | label int64 0 1 |
|---|---|---|---|---|
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class __snake_case ( metaclass=SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = ["""keras_nlp"""]
def __init__( self , *__lowerCamelCase , **__lowerCamelCase ):
'''simple docstring'''
requires_backends(self , ['''keras_nlp'''] )
| 179 |
"""simple docstring"""
def __lowercase ( snake_case_ : int ,snake_case_ : list ) ->Any:
'''simple docstring'''
_enforce_args(snake_case_ ,snake_case_ )
if n == 0:
return 0
__A : int = float('''-inf''' )
for i in range(1 ,n + 1 ):
__A : Union[str, Any] = max(
snake_case_ ,prices[i - 1] + naive_cut_rod_recursive(n - i ,snake_case_ ) )
return max_revue
def __lowercase ( snake_case_ : int ,snake_case_ : list ) ->int:
'''simple docstring'''
_enforce_args(snake_case_ ,snake_case_ )
__A : Dict = [float('''-inf''' ) for _ in range(n + 1 )]
return _top_down_cut_rod_recursive(snake_case_ ,snake_case_ ,snake_case_ )
def __lowercase ( snake_case_ : int ,snake_case_ : list ,snake_case_ : list ) ->Any:
'''simple docstring'''
if max_rev[n] >= 0:
return max_rev[n]
elif n == 0:
return 0
else:
__A : Any = float('''-inf''' )
for i in range(1 ,n + 1 ):
__A : Union[str, Any] = max(
snake_case_ ,prices[i - 1] + _top_down_cut_rod_recursive(n - i ,snake_case_ ,snake_case_ ) ,)
__A : Any = max_revenue
return max_rev[n]
def __lowercase ( snake_case_ : int ,snake_case_ : list ) ->Any:
'''simple docstring'''
_enforce_args(snake_case_ ,snake_case_ )
# length(max_rev) = n + 1, to accommodate for the revenue obtainable from a rod of
# length 0.
__A : Union[str, Any] = [float('''-inf''' ) for _ in range(n + 1 )]
__A : List[Any] = 0
for i in range(1 ,n + 1 ):
__A : Union[str, Any] = max_rev[i]
for j in range(1 ,i + 1 ):
__A : str = max(snake_case_ ,prices[j - 1] + max_rev[i - j] )
__A : List[str] = max_revenue_i
return max_rev[n]
def __lowercase ( snake_case_ : int ,snake_case_ : list ) ->Union[str, Any]:
'''simple docstring'''
if n < 0:
__A : Union[str, Any] = F"""n must be greater than or equal to 0. Got n = {n}"""
raise ValueError(snake_case_ )
if n > len(snake_case_ ):
__A : List[Any] = (
'''Each integral piece of rod must have a corresponding price. '''
F"""Got n = {n} but length of prices = {len(snake_case_ )}"""
)
raise ValueError(snake_case_ )
def __lowercase ( ) ->str:
'''simple docstring'''
__A : Any = [6, 10, 12, 15, 20, 23]
__A : Union[str, Any] = len(snake_case_ )
# the best revenue comes from cutting the rod into 6 pieces, each
# of length 1 resulting in a revenue of 6 * 6 = 36.
__A : str = 36
__A : Any = top_down_cut_rod(snake_case_ ,snake_case_ )
__A : Any = bottom_up_cut_rod(snake_case_ ,snake_case_ )
__A : Optional[Any] = naive_cut_rod_recursive(snake_case_ ,snake_case_ )
assert expected_max_revenue == max_rev_top_down
assert max_rev_top_down == max_rev_bottom_up
assert max_rev_bottom_up == max_rev_naive
if __name__ == "__main__":
main()
| 179 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE_:int = {"""configuration_sew""": ["""SEW_PRETRAINED_CONFIG_ARCHIVE_MAP""", """SEWConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_:Optional[Any] = [
"""SEW_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""SEWForCTC""",
"""SEWForSequenceClassification""",
"""SEWModel""",
"""SEWPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_sew import SEW_PRETRAINED_CONFIG_ARCHIVE_MAP, SEWConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_sew import (
SEW_PRETRAINED_MODEL_ARCHIVE_LIST,
SEWForCTC,
SEWForSequenceClassification,
SEWModel,
SEWPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_:List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 354 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
SCREAMING_SNAKE_CASE_:Any = {
"""configuration_mobilenet_v2""": [
"""MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""MobileNetV2Config""",
"""MobileNetV2OnnxConfig""",
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_:List[Any] = ["""MobileNetV2FeatureExtractor"""]
SCREAMING_SNAKE_CASE_:Tuple = ["""MobileNetV2ImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_:Any = [
"""MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MobileNetV2ForImageClassification""",
"""MobileNetV2ForSemanticSegmentation""",
"""MobileNetV2Model""",
"""MobileNetV2PreTrainedModel""",
"""load_tf_weights_in_mobilenet_v2""",
]
if TYPE_CHECKING:
from .configuration_mobilenet_va import (
MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileNetVaConfig,
MobileNetVaOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilenet_va import MobileNetVaFeatureExtractor
from .image_processing_mobilenet_va import MobileNetVaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilenet_va import (
MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileNetVaForImageClassification,
MobileNetVaForSemanticSegmentation,
MobileNetVaModel,
MobileNetVaPreTrainedModel,
load_tf_weights_in_mobilenet_va,
)
else:
import sys
SCREAMING_SNAKE_CASE_:Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 115 | 0 |
"""simple docstring"""
from collections import deque
def lowercase (SCREAMING_SNAKE_CASE_ : str ) -> List[str]:
SCREAMING_SNAKE_CASE = len(__lowerCamelCase )
SCREAMING_SNAKE_CASE = deque()
SCREAMING_SNAKE_CASE = [False for _ in range(__lowerCamelCase )]
SCREAMING_SNAKE_CASE = [-1 for _ in range(__lowerCamelCase )]
SCREAMING_SNAKE_CASE = index_of[:]
def strong_connect(SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Optional[Any] ):
SCREAMING_SNAKE_CASE = index # the number when this node is seen
SCREAMING_SNAKE_CASE = index # lowest rank node reachable from here
index += 1
stack.append(__lowerCamelCase )
SCREAMING_SNAKE_CASE = True
for w in g[v]:
if index_of[w] == -1:
SCREAMING_SNAKE_CASE = strong_connect(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
SCREAMING_SNAKE_CASE = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
elif on_stack[w]:
SCREAMING_SNAKE_CASE = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
if lowlink_of[v] == index_of[v]:
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = stack.pop()
SCREAMING_SNAKE_CASE = False
component.append(__lowerCamelCase )
while w != v:
SCREAMING_SNAKE_CASE = stack.pop()
SCREAMING_SNAKE_CASE = False
component.append(__lowerCamelCase )
components.append(__lowerCamelCase )
return index
SCREAMING_SNAKE_CASE = []
for v in range(__lowerCamelCase ):
if index_of[v] == -1:
strong_connect(__lowerCamelCase , 0 , __lowerCamelCase )
return components
def lowercase (SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> str:
SCREAMING_SNAKE_CASE = [[] for _ in range(__lowerCamelCase )]
for u, v in edges:
g[u].append(__lowerCamelCase )
return g
if __name__ == "__main__":
# Test
__UpperCamelCase = 7
__UpperCamelCase = [0, 0, 1, 2, 3, 3, 4, 4, 6]
__UpperCamelCase = [1, 3, 2, 0, 1, 4, 5, 6, 5]
__UpperCamelCase = [(u, v) for u, v in zip(source, target)]
__UpperCamelCase = create_graph(n_vertices, edges)
assert [[5], [6], [4], [3, 2, 1, 0]] == tarjan(g)
| 113 |
'''simple docstring'''
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import List
import timm
import torch
import torch.nn as nn
from huggingface_hub import hf_hub_download
from torch import Tensor
from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification
from transformers.utils import logging
logging.set_verbosity_info()
lowercase_ = logging.get_logger()
@dataclass
class a_ :
'''simple docstring'''
UpperCamelCase = 42
UpperCamelCase = field(default_factory=snake_case_ )
UpperCamelCase = field(default_factory=snake_case_ )
def snake_case_( self , A , A , A ) -> Optional[int]:
_SCREAMING_SNAKE_CASE = len(list(m.modules() ) ) == 1 or isinstance(A , nn.Convad ) or isinstance(A , nn.BatchNormad )
if has_not_submodules:
self.traced.append(A )
def __call__( self , A ) -> str:
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(A )
[x.remove() for x in self.handles]
return self
@property
def snake_case_( self ) -> str:
# check the len of the state_dict keys to see if we have learnable params
return list(filter(lambda A : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) )
@dataclass
class a_ :
'''simple docstring'''
UpperCamelCase = 42
UpperCamelCase = 42
UpperCamelCase = 0
UpperCamelCase = field(default_factory=snake_case_ )
UpperCamelCase = field(default_factory=snake_case_ )
def __call__( self , A ) -> List[str]:
_SCREAMING_SNAKE_CASE = Tracker(self.dest )(A ).parametrized
_SCREAMING_SNAKE_CASE = Tracker(self.src )(A ).parametrized
_SCREAMING_SNAKE_CASE = list(filter(lambda A : type(A ) not in self.src_skip , A ) )
_SCREAMING_SNAKE_CASE = list(filter(lambda A : type(A ) not in self.dest_skip , A ) )
if len(A ) != len(A ):
raise Exception(
f'Numbers of operations are different. Source module has {len(A )} operations while'
f' destination module has {len(A )}.' )
for dest_m, src_m in zip(A , A ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(f'Transfered from={src_m} to={dest_m}' )
def lowerCamelCase ( __lowerCamelCase : str , __lowerCamelCase : ResNetConfig , __lowerCamelCase : Path , __lowerCamelCase : bool = True ) ->int:
print(F'Converting {name}...' )
with torch.no_grad():
_SCREAMING_SNAKE_CASE = timm.create_model(__lowerCamelCase , pretrained=__lowerCamelCase ).eval()
_SCREAMING_SNAKE_CASE = ResNetForImageClassification(__lowerCamelCase ).eval()
_SCREAMING_SNAKE_CASE = ModuleTransfer(src=__lowerCamelCase , dest=__lowerCamelCase )
_SCREAMING_SNAKE_CASE = torch.randn((1, 3, 224, 224) )
module_transfer(__lowerCamelCase )
assert torch.allclose(from_model(__lowerCamelCase ) , our_model(__lowerCamelCase ).logits ), "The model logits don't match the original one."
_SCREAMING_SNAKE_CASE = F'resnet{"-".join(name.split("resnet" ) )}'
print(__lowerCamelCase )
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message="""Add model""" , use_temp_dir=__lowerCamelCase , )
# we can use the convnext one
_SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained("""facebook/convnext-base-224-22k-1k""" )
image_processor.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message="""Add image processor""" , use_temp_dir=__lowerCamelCase , )
print(F'Pushed {checkpoint_name}' )
def lowerCamelCase ( __lowerCamelCase : Path , __lowerCamelCase : str = None , __lowerCamelCase : bool = True ) ->Any:
_SCREAMING_SNAKE_CASE = """imagenet-1k-id2label.json"""
_SCREAMING_SNAKE_CASE = 1000
_SCREAMING_SNAKE_CASE = (1, num_labels)
_SCREAMING_SNAKE_CASE = """huggingface/label-files"""
_SCREAMING_SNAKE_CASE = num_labels
_SCREAMING_SNAKE_CASE = json.load(open(hf_hub_download(__lowerCamelCase , __lowerCamelCase , repo_type="""dataset""" ) , """r""" ) )
_SCREAMING_SNAKE_CASE = {int(__lowerCamelCase ): v for k, v in idalabel.items()}
_SCREAMING_SNAKE_CASE = idalabel
_SCREAMING_SNAKE_CASE = {v: k for k, v in idalabel.items()}
_SCREAMING_SNAKE_CASE = partial(__lowerCamelCase , num_labels=__lowerCamelCase , idalabel=__lowerCamelCase , labelaid=__lowerCamelCase )
_SCREAMING_SNAKE_CASE = {
"""resnet18""": ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[64, 128, 256, 512] , layer_type="""basic""" ),
"""resnet26""": ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[256, 512, 1024, 2048] , layer_type="""bottleneck""" ),
"""resnet34""": ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[64, 128, 256, 512] , layer_type="""basic""" ),
"""resnet50""": ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type="""bottleneck""" ),
"""resnet101""": ImageNetPreTrainedConfig(
depths=[3, 4, 23, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type="""bottleneck""" ),
"""resnet152""": ImageNetPreTrainedConfig(
depths=[3, 8, 36, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type="""bottleneck""" ),
}
if model_name:
convert_weight_and_push(__lowerCamelCase , names_to_config[model_name] , __lowerCamelCase , __lowerCamelCase )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
return config, expected_shape
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default=None,
type=str,
help=(
"""The name of the model you wish to convert, it must be one of the supported resnet* architecture,"""
""" currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted."""
),
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=Path,
required=True,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument(
"""--push_to_hub""",
default=True,
type=bool,
required=False,
help="""If True, push model and image processor to the hub.""",
)
lowercase_ = parser.parse_args()
lowercase_ = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 58 | 0 |
__a = [sum(int(c, 10) ** 2 for c in i.__str__()) for i in range(100_000)]
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
UpperCAmelCase_ : str = 0
while number:
# Increased Speed Slightly by checking every 5 digits together.
sum_of_digits_squared += DIGITS_SQUARED[number % 100000]
number //= 100000
return sum_of_digits_squared
# There are 2 Chains made,
# One ends with 89 with the chain member 58 being the one which when declared first,
# there will be the least number of iterations for all the members to be checked.
# The other one ends with 1 and has only one element 1.
# So 58 and 1 are chosen to be declared at the starting.
# Changed dictionary to an array to quicken the solution
__a = [None] * 10_000_000
__a = True
__a = False
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
if CHAINS[number - 1] is not None:
return CHAINS[number - 1] # type: ignore
UpperCAmelCase_ : Optional[Any] = chain(next_number(_lowerCAmelCase ) )
UpperCAmelCase_ : Any = number_chain
while number < 10000000:
UpperCAmelCase_ : Any = number_chain
number *= 10
return number_chain
def lowerCamelCase__ ( _lowercase = 10000000 ):
'''simple docstring'''
for i in range(1 , _lowerCAmelCase ):
if CHAINS[i] is None:
chain(i + 1 )
return CHAINS[:number].count(_lowerCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F"""{solution() = }""") | 371 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
__a = logging.get_logger(__name__)
__a = {
'microsoft/layoutlmv3-base': 'https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json',
}
class __a( _a ):
"""simple docstring"""
lowerCAmelCase = '''layoutlmv3'''
def __init__( self ,_SCREAMING_SNAKE_CASE=50_265 ,_SCREAMING_SNAKE_CASE=768 ,_SCREAMING_SNAKE_CASE=12 ,_SCREAMING_SNAKE_CASE=12 ,_SCREAMING_SNAKE_CASE=3_072 ,_SCREAMING_SNAKE_CASE="gelu" ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=512 ,_SCREAMING_SNAKE_CASE=2 ,_SCREAMING_SNAKE_CASE=0.02 ,_SCREAMING_SNAKE_CASE=1e-5 ,_SCREAMING_SNAKE_CASE=1 ,_SCREAMING_SNAKE_CASE=0 ,_SCREAMING_SNAKE_CASE=2 ,_SCREAMING_SNAKE_CASE=1_024 ,_SCREAMING_SNAKE_CASE=128 ,_SCREAMING_SNAKE_CASE=128 ,_SCREAMING_SNAKE_CASE=True ,_SCREAMING_SNAKE_CASE=32 ,_SCREAMING_SNAKE_CASE=128 ,_SCREAMING_SNAKE_CASE=64 ,_SCREAMING_SNAKE_CASE=256 ,_SCREAMING_SNAKE_CASE=True ,_SCREAMING_SNAKE_CASE=True ,_SCREAMING_SNAKE_CASE=True ,_SCREAMING_SNAKE_CASE=224 ,_SCREAMING_SNAKE_CASE=3 ,_SCREAMING_SNAKE_CASE=16 ,_SCREAMING_SNAKE_CASE=None ,**_SCREAMING_SNAKE_CASE ,) -> Dict:
super().__init__(
vocab_size=_SCREAMING_SNAKE_CASE ,hidden_size=_SCREAMING_SNAKE_CASE ,num_hidden_layers=_SCREAMING_SNAKE_CASE ,num_attention_heads=_SCREAMING_SNAKE_CASE ,intermediate_size=_SCREAMING_SNAKE_CASE ,hidden_act=_SCREAMING_SNAKE_CASE ,hidden_dropout_prob=_SCREAMING_SNAKE_CASE ,attention_probs_dropout_prob=_SCREAMING_SNAKE_CASE ,max_position_embeddings=_SCREAMING_SNAKE_CASE ,type_vocab_size=_SCREAMING_SNAKE_CASE ,initializer_range=_SCREAMING_SNAKE_CASE ,layer_norm_eps=_SCREAMING_SNAKE_CASE ,pad_token_id=_SCREAMING_SNAKE_CASE ,bos_token_id=_SCREAMING_SNAKE_CASE ,eos_token_id=_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ,)
UpperCAmelCase_ : Dict = max_ad_position_embeddings
UpperCAmelCase_ : Any = coordinate_size
UpperCAmelCase_ : Tuple = shape_size
UpperCAmelCase_ : Optional[int] = has_relative_attention_bias
UpperCAmelCase_ : Union[str, Any] = rel_pos_bins
UpperCAmelCase_ : Dict = max_rel_pos
UpperCAmelCase_ : Union[str, Any] = has_spatial_attention_bias
UpperCAmelCase_ : Any = rel_ad_pos_bins
UpperCAmelCase_ : Tuple = max_rel_ad_pos
UpperCAmelCase_ : List[str] = text_embed
UpperCAmelCase_ : int = visual_embed
UpperCAmelCase_ : int = input_size
UpperCAmelCase_ : Dict = num_channels
UpperCAmelCase_ : int = patch_size
UpperCAmelCase_ : Dict = classifier_dropout
class __a( _a ):
"""simple docstring"""
lowerCAmelCase = version.parse('''1.12''' )
@property
def a__ ( self ) -> Mapping[str, Mapping[int, str]]:
# The order of inputs is different for question answering and sequence classification
if self.task in ["question-answering", "sequence-classification"]:
return OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''sequence'''}),
('''bbox''', {0: '''batch''', 1: '''sequence'''}),
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
else:
return OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''sequence'''}),
('''bbox''', {0: '''batch''', 1: '''sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''sequence'''}),
('''pixel_values''', {0: '''batch''', 1: '''num_channels'''}),
] )
@property
def a__ ( self ) -> float:
return 1e-5
@property
def a__ ( self ) -> int:
return 12
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = -1 ,_SCREAMING_SNAKE_CASE = -1 ,_SCREAMING_SNAKE_CASE = False ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = 3 ,_SCREAMING_SNAKE_CASE = 40 ,_SCREAMING_SNAKE_CASE = 40 ,) -> Mapping[str, Any]:
setattr(processor.image_processor ,'''apply_ocr''' ,_SCREAMING_SNAKE_CASE )
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
UpperCAmelCase_ : List[str] = compute_effective_axis_dimension(
_SCREAMING_SNAKE_CASE ,fixed_dimension=OnnxConfig.default_fixed_batch ,num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
UpperCAmelCase_ : Optional[Any] = processor.tokenizer.num_special_tokens_to_add(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : int = compute_effective_axis_dimension(
_SCREAMING_SNAKE_CASE ,fixed_dimension=OnnxConfig.default_fixed_sequence ,num_token_to_add=_SCREAMING_SNAKE_CASE )
# Generate dummy inputs according to compute batch and sequence
UpperCAmelCase_ : Optional[Any] = [[''' '''.join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size
# Generate dummy bounding boxes
UpperCAmelCase_ : Tuple = [[[48, 84, 73, 128]]] * batch_size
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
# batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch)
UpperCAmelCase_ : Union[str, Any] = self._generate_dummy_images(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : int = dict(
processor(
_SCREAMING_SNAKE_CASE ,text=_SCREAMING_SNAKE_CASE ,boxes=_SCREAMING_SNAKE_CASE ,return_tensors=_SCREAMING_SNAKE_CASE ,) )
return inputs | 235 | 0 |
'''simple docstring'''
import inspect
import warnings
from typing import Any, Dict, Optional, Union
from packaging import version
def lowerCAmelCase_ ( *_lowerCamelCase: int , _lowerCamelCase: Optional[Union[Dict, Any]] = None , _lowerCamelCase: List[Any]=True , _lowerCamelCase: Union[str, Any]=2 ):
from .. import __version__
__SCREAMING_SNAKE_CASE : Union[str, Any] = take_from
__SCREAMING_SNAKE_CASE : str = ()
if not isinstance(args[0] , _lowerCamelCase ):
__SCREAMING_SNAKE_CASE : Dict = (args,)
for attribute, version_name, message in args:
if version.parse(version.parse(_lowerCamelCase ).base_version ) >= version.parse(_lowerCamelCase ):
raise ValueError(
F"The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers'"
F" version {__version__} is >= {version_name}" )
__SCREAMING_SNAKE_CASE : int = None
if isinstance(_lowerCamelCase , _lowerCamelCase ) and attribute in deprecated_kwargs:
values += (deprecated_kwargs.pop(_lowerCamelCase ),)
__SCREAMING_SNAKE_CASE : str = F"The `{attribute}` argument is deprecated and will be removed in version {version_name}."
elif hasattr(_lowerCamelCase , _lowerCamelCase ):
values += (getattr(_lowerCamelCase , _lowerCamelCase ),)
__SCREAMING_SNAKE_CASE : int = F"The `{attribute}` attribute is deprecated and will be removed in version {version_name}."
elif deprecated_kwargs is None:
__SCREAMING_SNAKE_CASE : List[Any] = F"`{attribute}` is deprecated and will be removed in version {version_name}."
if warning is not None:
__SCREAMING_SNAKE_CASE : Optional[int] = warning + """ """ if standard_warn else """"""
warnings.warn(warning + message , _lowerCamelCase , stacklevel=_lowerCamelCase )
if isinstance(_lowerCamelCase , _lowerCamelCase ) and len(_lowerCamelCase ) > 0:
__SCREAMING_SNAKE_CASE : int = inspect.getouterframes(inspect.currentframe() )[1]
__SCREAMING_SNAKE_CASE : Union[str, Any] = call_frame.filename
__SCREAMING_SNAKE_CASE : List[str] = call_frame.lineno
__SCREAMING_SNAKE_CASE : List[str] = call_frame.function
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[Any] = next(iter(deprecated_kwargs.items() ) )
raise TypeError(F"{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`" )
if len(_lowerCamelCase ) == 0:
return
elif len(_lowerCamelCase ) == 1:
return values[0]
return values | 112 |
'''simple docstring'''
import warnings
from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401
warnings.warn(
'''The `inpainting.py` script is outdated. Please use directly `from diffusers import'''
''' StableDiffusionInpaintPipeline` instead.'''
) | 112 | 1 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.utils import is_vision_available
from transformers.utils.generic import TensorType
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import logging
if is_vision_available():
import PIL
__UpperCamelCase : int = logging.get_logger(__name__)
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : List[str] ):
if isinstance(_a , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(_a , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(_a ):
return [[videos]]
raise ValueError(F'Could not make batched video from {videos}' )
class a ( a__ ):
snake_case__ = ['''pixel_values''']
def __init__( self , _snake_case = True , _snake_case = None , _snake_case = PILImageResampling.BILINEAR , _snake_case = True , _snake_case = None , _snake_case = True , _snake_case = 1 / 2_55 , _snake_case = True , _snake_case = True , _snake_case = None , _snake_case = None , **_snake_case , ):
"""simple docstring"""
super().__init__(**lowerCamelCase_ )
lowerCAmelCase = size if size is not None else {'shortest_edge': 2_56}
lowerCAmelCase = get_size_dict(lowerCamelCase_ , default_to_square=lowerCamelCase_ )
lowerCAmelCase = crop_size if crop_size is not None else {'height': 2_24, 'width': 2_24}
lowerCAmelCase = get_size_dict(lowerCamelCase_ , param_name='crop_size' )
lowerCAmelCase = do_resize
lowerCAmelCase = size
lowerCAmelCase = do_center_crop
lowerCAmelCase = crop_size
lowerCAmelCase = resample
lowerCAmelCase = do_rescale
lowerCAmelCase = rescale_factor
lowerCAmelCase = offset
lowerCAmelCase = do_normalize
lowerCAmelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowerCAmelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case = PILImageResampling.BILINEAR , _snake_case = None , **_snake_case , ):
"""simple docstring"""
lowerCAmelCase = get_size_dict(lowerCamelCase_ , default_to_square=lowerCamelCase_ )
if "shortest_edge" in size:
lowerCAmelCase = get_resize_output_image_size(lowerCamelCase_ , size['shortest_edge'] , default_to_square=lowerCamelCase_ )
elif "height" in size and "width" in size:
lowerCAmelCase = (size['height'], size['width'])
else:
raise ValueError(F'Size must have \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}' )
return resize(lowerCamelCase_ , size=lowerCamelCase_ , resample=lowerCamelCase_ , data_format=lowerCamelCase_ , **lowerCamelCase_ )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case = None , **_snake_case , ):
"""simple docstring"""
lowerCAmelCase = get_size_dict(lowerCamelCase_ )
if "height" not in size or "width" not in size:
raise ValueError(F'Size must have \'height\' and \'width\' as keys. Got {size.keys()}' )
return center_crop(lowerCamelCase_ , size=(size['height'], size['width']) , data_format=lowerCamelCase_ , **lowerCamelCase_ )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case = True , _snake_case = None , **_snake_case , ):
"""simple docstring"""
lowerCAmelCase = image.astype(np.floataa )
if offset:
lowerCAmelCase = image - (scale / 2)
return rescale(lowerCamelCase_ , scale=lowerCamelCase_ , data_format=lowerCamelCase_ , **lowerCamelCase_ )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case = None , **_snake_case , ):
"""simple docstring"""
return normalize(lowerCamelCase_ , mean=lowerCamelCase_ , std=lowerCamelCase_ , data_format=lowerCamelCase_ , **lowerCamelCase_ )
def UpperCamelCase__ ( self , _snake_case , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = ChannelDimension.FIRST , ):
"""simple docstring"""
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
if offset and not do_rescale:
raise ValueError('For offset, do_rescale must also be set to True.' )
# All transformations expect numpy arrays.
lowerCAmelCase = to_numpy_array(lowerCamelCase_ )
if do_resize:
lowerCAmelCase = self.resize(image=lowerCamelCase_ , size=lowerCamelCase_ , resample=lowerCamelCase_ )
if do_center_crop:
lowerCAmelCase = self.center_crop(lowerCamelCase_ , size=lowerCamelCase_ )
if do_rescale:
lowerCAmelCase = self.rescale(image=lowerCamelCase_ , scale=lowerCamelCase_ , offset=lowerCamelCase_ )
if do_normalize:
lowerCAmelCase = self.normalize(image=lowerCamelCase_ , mean=lowerCamelCase_ , std=lowerCamelCase_ )
lowerCAmelCase = to_channel_dimension_format(lowerCamelCase_ , lowerCamelCase_ )
return image
def UpperCamelCase__ ( self , _snake_case , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = ChannelDimension.FIRST , **_snake_case , ):
"""simple docstring"""
lowerCAmelCase = do_resize if do_resize is not None else self.do_resize
lowerCAmelCase = resample if resample is not None else self.resample
lowerCAmelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
lowerCAmelCase = do_rescale if do_rescale is not None else self.do_rescale
lowerCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCAmelCase = offset if offset is not None else self.offset
lowerCAmelCase = do_normalize if do_normalize is not None else self.do_normalize
lowerCAmelCase = image_mean if image_mean is not None else self.image_mean
lowerCAmelCase = image_std if image_std is not None else self.image_std
lowerCAmelCase = size if size is not None else self.size
lowerCAmelCase = get_size_dict(lowerCamelCase_ , default_to_square=lowerCamelCase_ )
lowerCAmelCase = crop_size if crop_size is not None else self.crop_size
lowerCAmelCase = get_size_dict(lowerCamelCase_ , param_name='crop_size' )
if not valid_images(lowerCamelCase_ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
lowerCAmelCase = make_batched(lowerCamelCase_ )
lowerCAmelCase = [
[
self._preprocess_image(
image=lowerCamelCase_ , do_resize=lowerCamelCase_ , size=lowerCamelCase_ , resample=lowerCamelCase_ , do_center_crop=lowerCamelCase_ , crop_size=lowerCamelCase_ , do_rescale=lowerCamelCase_ , rescale_factor=lowerCamelCase_ , offset=lowerCamelCase_ , do_normalize=lowerCamelCase_ , image_mean=lowerCamelCase_ , image_std=lowerCamelCase_ , data_format=lowerCamelCase_ , )
for img in video
]
for video in videos
]
lowerCAmelCase = {'pixel_values': videos}
return BatchFeature(data=lowerCamelCase_ , tensor_type=lowerCamelCase_ )
| 361 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__UpperCamelCase : Any = {
'''configuration_layoutlmv2''': ['''LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LayoutLMv2Config'''],
'''processing_layoutlmv2''': ['''LayoutLMv2Processor'''],
'''tokenization_layoutlmv2''': ['''LayoutLMv2Tokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Tuple = ['''LayoutLMv2TokenizerFast''']
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Optional[int] = ['''LayoutLMv2FeatureExtractor''']
__UpperCamelCase : Optional[int] = ['''LayoutLMv2ImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Any = [
'''LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''LayoutLMv2ForQuestionAnswering''',
'''LayoutLMv2ForSequenceClassification''',
'''LayoutLMv2ForTokenClassification''',
'''LayoutLMv2Layer''',
'''LayoutLMv2Model''',
'''LayoutLMv2PreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_layoutlmva import LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor, LayoutLMvaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaLayer,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
else:
import sys
__UpperCamelCase : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 309 | 0 |
__snake_case :Tuple = [
999,
800,
799,
600,
599,
500,
400,
399,
377,
355,
333,
311,
288,
266,
244,
222,
200,
199,
177,
155,
133,
111,
88,
66,
44,
22,
0,
]
__snake_case :str = [
999,
976,
952,
928,
905,
882,
858,
857,
810,
762,
715,
714,
572,
429,
428,
286,
285,
238,
190,
143,
142,
118,
95,
71,
47,
24,
0,
]
__snake_case :Any = [
999,
988,
977,
966,
955,
944,
933,
922,
911,
900,
899,
879,
859,
840,
820,
800,
799,
766,
733,
700,
699,
650,
600,
599,
500,
499,
400,
399,
350,
300,
299,
266,
233,
200,
199,
179,
159,
140,
120,
100,
99,
88,
77,
66,
55,
44,
33,
22,
11,
0,
]
__snake_case :List[str] = [
999,
995,
992,
989,
985,
981,
978,
975,
971,
967,
964,
961,
957,
956,
951,
947,
942,
937,
933,
928,
923,
919,
914,
913,
908,
903,
897,
892,
887,
881,
876,
871,
870,
864,
858,
852,
846,
840,
834,
828,
827,
820,
813,
806,
799,
792,
785,
784,
777,
770,
763,
756,
749,
742,
741,
733,
724,
716,
707,
699,
698,
688,
677,
666,
656,
655,
645,
634,
623,
613,
612,
598,
584,
570,
569,
555,
541,
527,
526,
505,
484,
483,
462,
440,
439,
396,
395,
352,
351,
308,
307,
264,
263,
220,
219,
176,
132,
88,
44,
0,
]
__snake_case :str = [
999,
997,
995,
992,
990,
988,
986,
984,
981,
979,
977,
975,
972,
970,
968,
966,
964,
961,
959,
957,
956,
954,
951,
949,
946,
944,
941,
939,
936,
934,
931,
929,
926,
924,
921,
919,
916,
914,
913,
910,
907,
905,
902,
899,
896,
893,
891,
888,
885,
882,
879,
877,
874,
871,
870,
867,
864,
861,
858,
855,
852,
849,
846,
843,
840,
837,
834,
831,
828,
827,
824,
821,
817,
814,
811,
808,
804,
801,
798,
795,
791,
788,
785,
784,
780,
777,
774,
770,
766,
763,
760,
756,
752,
749,
746,
742,
741,
737,
733,
730,
726,
722,
718,
714,
710,
707,
703,
699,
698,
694,
690,
685,
681,
677,
673,
669,
664,
660,
656,
655,
650,
646,
641,
636,
632,
627,
622,
618,
613,
612,
607,
602,
596,
591,
586,
580,
575,
570,
569,
563,
557,
551,
545,
539,
533,
527,
526,
519,
512,
505,
498,
491,
484,
483,
474,
466,
457,
449,
440,
439,
428,
418,
407,
396,
395,
381,
366,
352,
351,
330,
308,
307,
286,
264,
263,
242,
220,
219,
176,
175,
132,
131,
88,
44,
0,
]
__snake_case :List[str] = [
999,
991,
982,
974,
966,
958,
950,
941,
933,
925,
916,
908,
900,
899,
874,
850,
825,
800,
799,
700,
600,
500,
400,
300,
200,
100,
0,
]
__snake_case :int = [
999,
992,
985,
978,
971,
964,
957,
949,
942,
935,
928,
921,
914,
907,
900,
899,
879,
859,
840,
820,
800,
799,
766,
733,
700,
699,
650,
600,
599,
500,
499,
400,
399,
300,
299,
200,
199,
100,
99,
0,
]
__snake_case :str = [
999,
996,
992,
989,
985,
982,
979,
975,
972,
968,
965,
961,
958,
955,
951,
948,
944,
941,
938,
934,
931,
927,
924,
920,
917,
914,
910,
907,
903,
900,
899,
891,
884,
876,
869,
861,
853,
846,
838,
830,
823,
815,
808,
800,
799,
788,
777,
766,
755,
744,
733,
722,
711,
700,
699,
688,
677,
666,
655,
644,
633,
622,
611,
600,
599,
585,
571,
557,
542,
528,
514,
500,
499,
485,
471,
457,
442,
428,
414,
400,
399,
379,
359,
340,
320,
300,
299,
279,
259,
240,
220,
200,
199,
166,
133,
100,
99,
66,
33,
0,
]
| 49 |
# HF Trainer benchmarking tool
#
# This tool can be used to run and compare multiple dimensions of the HF Trainers args.
#
# It then prints a report once in github format with all the information that needs to be shared
# with others and second time in a console-friendly format, so it's easier to use for tuning things up.
#
# The main idea is:
#
# ./trainer-benchmark.py --base-cmd '<cmd args that don't change>' \
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1' \
# --target-metric-key train_samples_per_second
#
# The variations can be any command line argument that you want to compare and not just dtype as in
# the example.
#
# --variations allows you to compare variations in multiple dimensions.
#
# as the first dimention has 2 options and the second 3 in our example, this will run the trainer 6
# times adding one of:
#
# 1. --tf32 0 --fp16 0
# 2. --tf32 0 --fp16 1
# 3. --tf32 0 --bf16 1
# 4. --tf32 1 --fp16 0
# 5. --tf32 1 --fp16 1
# 6. --tf32 1 --bf16 1
#
# and print the results. This is just a cartesian product - and more than 2 dimensions can be used.
#
# If you want to rely on defaults, this:
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1'
# is identical to this:
# --variations '--tf32 0|--tf32 1' '|--fp16|--bf16'
#
# the leading empty variation in the 2nd dimension is a valid variation.
#
# So here we get the following 6 variations:
#
# 1. --tf32 0
# 2. --tf32 0 --fp16
# 3. --tf32 0 --bf16
# 4. --tf32 1
# 5. --tf32 1 --fp16
# 6. --tf32 1 --bf16
#
# In this particular case we don't know what the default tf32 setting is as it's normally
# pytorch-version dependent). That's why it's best to do an explicit setting of each variation:
# `--tf32 0|--tf32 1`
#
# Here is a full example of a train:
#
# CUDA_VISIBLE_DEVICES=0 python ./scripts/benchmark/trainer-benchmark.py \
# --base-cmd \
# ' examples/pytorch/translation/run_translation.py --model_name_or_path t5-small \
# --output_dir output_dir --do_train --label_smoothing 0.1 --logging_strategy no \
# --save_strategy no --per_device_train_batch_size 32 --max_source_length 512 \
# --max_target_length 512 --num_train_epochs 1 --overwrite_output_dir \
# --source_lang en --target_lang ro --dataset_name wmt16 --dataset_config "ro-en" \
# --source_prefix "translate English to Romanian: " --warmup_steps 50 \
# --max_train_samples 20000 --dataloader_num_workers 2 ' \
# --target-metric-key train_samples_per_second --repeat-times 1 --variations \
# '|--fp16|--bf16' '--tf32 0|--tf32 1' --report-metric-keys train_loss \
# --repeat-times 1 --base-variation '--tf32 0'
#
# and here is a possible output:
#
#
# | Variation | Train | Diff | Train |
# | | samples | % | loss |
# | | per | | |
# | | second | | |
# |:----------------|----------:|-------:|--------:|
# | --tf32 0 | 285.11 | 0 | 2.51 |
# | --tf32 1 | 342.09 | 20 | 2.51 |
# | --fp16 --tf32 0 | 423.49 | 49 | 2.51 |
# | --fp16 --tf32 1 | 423.13 | 48 | 2.51 |
# | --bf16 --tf32 0 | 416.80 | 46 | 2.52 |
# | --bf16 --tf32 1 | 415.87 | 46 | 2.52 |
#
#
# So you can quickly compare the different outcomes.
#
# Typically running each experiment once is enough, but if the environment is unstable you can
# re-run each multiple times, e.g., 3 using --repeat-times 3 and it will report the averaged results.
#
# By default it'll use the lowest result as the base line to use as 100% and then compare the rest to
# it as can be seen from the table above, but you can also specify which combination is the one to use as
# the baseline, e.g., to change to another entry use: --base-variation '--tf32 1 --fp16 0'
#
# --target-metric-key is there to tell the program which metrics to compare - the different metric keys are
# inside output_dir/all_results.json. e.g., to measure eval performance instead of train use:
# --target-metric-key eval_samples_per_second
# but of course you will need to adjust the --base-cmd value in the example to perform evaluation as
# well (as currently it doesn't)
#
import argparse
import datetime
import io
import itertools
import json
import math
import os
import platform
import re
import shlex
import subprocess
import sys
from pathlib import Path
from statistics import fmean
import pandas as pd
import torch
from tqdm import tqdm
import transformers
lowerCAmelCase_ = float('''nan''')
class __lowerCAmelCase :
def __init__(self , __magic_name__ ) -> int:
'''simple docstring'''
snake_case_ : List[Any] = sys.stdout
snake_case_ : int = open(__magic_name__ , '''a''' )
def __getattr__(self , __magic_name__ ) -> Dict:
'''simple docstring'''
return getattr(self.stdout , __magic_name__ )
def lowerCamelCase (self , __magic_name__ ) -> int:
'''simple docstring'''
self.stdout.write(__magic_name__ )
# strip tqdm codes
self.file.write(re.sub(R'''^.*\r''' , '''''' , __magic_name__ , 0 , re.M ) )
def lowerCamelCase_ ( _UpperCamelCase=80 , _UpperCamelCase=False ) -> str:
"""simple docstring"""
snake_case_ : str = []
# deal with critical env vars
snake_case_ : int = ['''CUDA_VISIBLE_DEVICES''']
for key in env_keys:
snake_case_ : Optional[int] = os.environ.get(_UpperCamelCase , _UpperCamelCase )
if val is not None:
cmd.append(f'''{key}={val}''' )
# python executable (not always needed if the script is executable)
snake_case_ : Optional[int] = sys.executable if full_python_path else sys.executable.split('''/''' )[-1]
cmd.append(_UpperCamelCase )
# now the normal args
cmd += list(map(shlex.quote , sys.argv ) )
# split up into up to MAX_WIDTH lines with shell multi-line escapes
snake_case_ : Dict = []
snake_case_ : Dict = ''''''
while len(_UpperCamelCase ) > 0:
current_line += f'''{cmd.pop(0 )} '''
if len(_UpperCamelCase ) == 0 or len(_UpperCamelCase ) + len(cmd[0] ) + 1 > max_width - 1:
lines.append(_UpperCamelCase )
snake_case_ : List[Any] = ''''''
return "\\\n".join(_UpperCamelCase )
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> List[str]:
"""simple docstring"""
snake_case_ : str = re.sub(R'''[\\\n]+''' , ''' ''' , args.base_cmd )
# remove --output_dir if any and set our own
snake_case_ : Optional[Any] = re.sub('''--output_dir\s+[^\s]+''' , '''''' , args.base_cmd )
args.base_cmd += f''' --output_dir {output_dir}'''
# ensure we have --overwrite_output_dir
snake_case_ : int = re.sub('''--overwrite_output_dir\s+''' , '''''' , args.base_cmd )
args.base_cmd += " --overwrite_output_dir"
return [sys.executable] + shlex.split(args.base_cmd )
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Union[str, Any]:
"""simple docstring"""
if 0:
import random
from time import sleep
sleep(0 )
return dict(
{k: random.uniform(0 , 100 ) for k in metric_keys} , **{target_metric_key: random.choice([nan, 10.31, 100.2, 55.6_666, 222.22_222_222] )} , )
snake_case_ : Tuple = subprocess.run(_UpperCamelCase , capture_output=_UpperCamelCase , text=_UpperCamelCase )
if verbose:
print('''STDOUT''' , result.stdout )
print('''STDERR''' , result.stderr )
# save the streams
snake_case_ : Any = variation.replace(''' ''' , '''-''' )
with open(Path(_UpperCamelCase ) / f'''log.{prefix}.stdout.txt''' , '''w''' ) as f:
f.write(result.stdout )
with open(Path(_UpperCamelCase ) / f'''log.{prefix}.stderr.txt''' , '''w''' ) as f:
f.write(result.stderr )
if result.returncode != 0:
if verbose:
print('''failed''' )
return {target_metric_key: nan}
with io.open(f'''{output_dir}/all_results.json''' , '''r''' , encoding='''utf-8''' ) as f:
snake_case_ : str = json.load(_UpperCamelCase )
# filter out just the keys we want
return {k: v for k, v in metrics.items() if k in metric_keys}
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , ) -> Tuple:
"""simple docstring"""
snake_case_ : Tuple = []
snake_case_ : Any = []
snake_case_ : int = f'''{id}: {variation:<{longest_variation_len}}'''
snake_case_ : Optional[Any] = f'''{preamble}: '''
snake_case_ : Optional[int] = set(report_metric_keys + [target_metric_key] )
for i in tqdm(range(_UpperCamelCase ) , desc=_UpperCamelCase , leave=_UpperCamelCase ):
snake_case_ : int = process_run_single(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
snake_case_ : List[str] = single_run_metrics[target_metric_key]
if not math.isnan(_UpperCamelCase ):
metrics.append(_UpperCamelCase )
results.append(_UpperCamelCase )
outcome += "✓"
else:
outcome += "✘"
snake_case_ : Any = f'''\33[2K\r{outcome}'''
if len(_UpperCamelCase ) > 0:
snake_case_ : List[Any] = {k: fmean([x[k] for x in metrics] ) for k in metrics[0].keys()}
snake_case_ : Any = round(mean_metrics[target_metric_key] , 2 )
snake_case_ : List[str] = f'''{outcome} {mean_target}'''
if len(_UpperCamelCase ) > 1:
results_str += f''' {tuple(round(_UpperCamelCase , 2 ) for x in results )}'''
print(_UpperCamelCase )
snake_case_ : Optional[int] = variation
return mean_metrics
else:
print(_UpperCamelCase )
return {variation_key: variation, target_metric_key: nan}
def lowerCamelCase_ ( ) -> Optional[int]:
"""simple docstring"""
snake_case_ : Any = torch.cuda.get_device_properties(torch.device('''cuda''' ) )
return f'''
Datetime : {datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S" )}
Software:
transformers: {transformers.__version__}
torch : {torch.__version__}
cuda : {torch.version.cuda}
python : {platform.python_version()}
Hardware:
{torch.cuda.device_count()} GPUs : {properties.name}, {properties.total_memory/2**30:0.2f}GB
'''
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Dict:
"""simple docstring"""
snake_case_ : str = pd.DataFrame(_UpperCamelCase )
snake_case_ : Optional[int] = '''variation'''
snake_case_ : Union[str, Any] = '''diff_%'''
snake_case_ : Optional[int] = nan
if base_variation is not None and len(df[df[variation_key] == base_variation] ):
# this may still return nan
snake_case_ : Optional[Any] = df.loc[df[variation_key] == base_variation][target_metric_key].item()
if math.isnan(_UpperCamelCase ):
# as a fallback, use the minimal value as the sentinel
snake_case_ : Any = df.loc[df[target_metric_key] != nan][target_metric_key].min()
# create diff column if possible
if not math.isnan(_UpperCamelCase ):
snake_case_ : Dict = df.apply(
lambda _UpperCamelCase : round(100 * (r[target_metric_key] - sentinel_value) / sentinel_value )
if not math.isnan(r[target_metric_key] )
else 0 , axis='''columns''' , )
# re-order columns
snake_case_ : Dict = [variation_key, target_metric_key, diff_key, *report_metric_keys]
snake_case_ : int = df.reindex(_UpperCamelCase , axis='''columns''' ) # reorder cols
# capitalize
snake_case_ : Optional[int] = df.rename(str.capitalize , axis='''columns''' )
# make the cols as narrow as possible
snake_case_ : Any = df.rename(lambda _UpperCamelCase : c.replace('''_''' , '''<br>''' ) , axis='''columns''' )
snake_case_ : int = df.rename(lambda _UpperCamelCase : c.replace('''_''' , '''\n''' ) , axis='''columns''' )
snake_case_ : Tuple = ['''''', '''Copy between the cut-here-lines and paste as is to github or a forum''']
report += ["----------8<-----------------8<--------"]
report += ["*** Results:", df_github.to_markdown(index=_UpperCamelCase , floatfmt='''.2f''' )]
report += ["```"]
report += ["*** Setup:", get_versions()]
report += ["*** The benchmark command line was:", get_original_command()]
report += ["```"]
report += ["----------8<-----------------8<--------"]
report += ["*** Results (console):", df_console.to_markdown(index=_UpperCamelCase , floatfmt='''.2f''' )]
print('''\n\n'''.join(_UpperCamelCase ) )
def lowerCamelCase_ ( ) -> Any:
"""simple docstring"""
snake_case_ : Any = argparse.ArgumentParser()
parser.add_argument(
'''--base-cmd''' , default=_UpperCamelCase , type=_UpperCamelCase , required=_UpperCamelCase , help='''Base cmd''' , )
parser.add_argument(
'''--variations''' , default=_UpperCamelCase , type=_UpperCamelCase , nargs='''+''' , required=_UpperCamelCase , help='''Multi-dimensional variations, example: \'|--fp16|--bf16\' \'|--tf32\'''' , )
parser.add_argument(
'''--base-variation''' , default=_UpperCamelCase , type=_UpperCamelCase , help='''Baseline variation to compare to. if None the minimal target value will be used to compare against''' , )
parser.add_argument(
'''--target-metric-key''' , default=_UpperCamelCase , type=_UpperCamelCase , required=_UpperCamelCase , help='''Target metric key in output_dir/all_results.json, e.g., train_samples_per_second''' , )
parser.add_argument(
'''--report-metric-keys''' , default='''''' , type=_UpperCamelCase , help='''Report metric keys - other metric keys from output_dir/all_results.json to report, e.g., train_loss. Use a single argument e.g., \'train_loss train_samples''' , )
parser.add_argument(
'''--repeat-times''' , default=1 , type=_UpperCamelCase , help='''How many times to re-run each variation - an average will be reported''' , )
parser.add_argument(
'''--output_dir''' , default='''output_benchmark''' , type=_UpperCamelCase , help='''The output directory where all the benchmark reports will go to and additionally this directory will be used to override --output_dir in the script that is being benchmarked''' , )
parser.add_argument(
'''--verbose''' , default=_UpperCamelCase , action='''store_true''' , help='''Whether to show the outputs of each run or just the benchmark progress''' , )
snake_case_ : Tuple = parser.parse_args()
snake_case_ : Optional[Any] = args.output_dir
Path(_UpperCamelCase ).mkdir(exist_ok=_UpperCamelCase )
snake_case_ : Optional[int] = get_base_command(_UpperCamelCase , _UpperCamelCase )
# split each dimension into its --foo variations
snake_case_ : Optional[int] = [list(map(str.strip , re.split(R'''\|''' , _UpperCamelCase ) ) ) for x in args.variations]
# build a cartesian product of dimensions and convert those back into cmd-line arg strings,
# while stripping white space for inputs that were empty
snake_case_ : List[str] = list(map(str.strip , map(''' '''.join , itertools.product(*_UpperCamelCase ) ) ) )
snake_case_ : Optional[int] = max(len(_UpperCamelCase ) for x in variations )
# split wanted keys
snake_case_ : int = args.report_metric_keys.split()
# capture prints into a log file for convenience
snake_case_ : str = f'''benchmark-report-{datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S" )}.txt'''
print(f'''\nNote: each run\'s output is also logged under {output_dir}/log.*.std*.txt''' )
print(f'''and this script\'s output is also piped into {report_fn}''' )
snake_case_ : Tuple = Tee(_UpperCamelCase )
print(f'''\n*** Running {len(_UpperCamelCase )} benchmarks:''' )
print(f'''Base command: {" ".join(_UpperCamelCase )}''' )
snake_case_ : List[Any] = '''variation'''
snake_case_ : Tuple = []
for id, variation in enumerate(tqdm(_UpperCamelCase , desc='''Total completion: ''' , leave=_UpperCamelCase ) ):
snake_case_ : Optional[Any] = base_cmd + variation.split()
results.append(
process_run(
id + 1 , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , args.target_metric_key , _UpperCamelCase , args.repeat_times , _UpperCamelCase , args.verbose , ) )
process_results(_UpperCamelCase , args.target_metric_key , _UpperCamelCase , args.base_variation , _UpperCamelCase )
if __name__ == "__main__":
main()
| 279 | 0 |
import enum
import warnings
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
UpperCAmelCase_ : List[Any] = logging.get_logger(__name__)
class UpperCamelCase ( enum.Enum ):
lowerCAmelCase : int = 0
lowerCAmelCase : Optional[int] = 1
@add_end_docstrings(_UpperCAmelCase )
class UpperCamelCase ( _UpperCAmelCase ):
lowerCAmelCase : Dict = """generated"""
def __init__( self , *UpperCAmelCase__ , **UpperCAmelCase__ ):
super().__init__(*UpperCAmelCase__ , **UpperCAmelCase__ )
self.check_model_type(
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if self.framework == "tf"
else MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING )
def __A ( self , UpperCAmelCase__=None , UpperCAmelCase__=None , UpperCAmelCase__=None , UpperCAmelCase__=None , UpperCAmelCase__=None , UpperCAmelCase__=None , **UpperCAmelCase__ , ):
A__ = {}
if truncation is not None:
A__ = truncation
A__ = generate_kwargs
A__ = {}
if return_tensors is not None and return_type is None:
A__ = ReturnType.TENSORS if return_tensors else ReturnType.TEXT
if return_type is not None:
A__ = return_type
if clean_up_tokenization_spaces is not None:
A__ = clean_up_tokenization_spaces
if stop_sequence is not None:
A__ = self.tokenizer.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ )
if len(UpperCAmelCase__ ) > 1:
warnings.warn(
"Stopping on a multiple token sequence is not yet supported on transformers. The first token of"
" the stop sequence will be used as the stop sequence string in the interim." )
A__ = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def __A ( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
return True
def __A ( self , *UpperCAmelCase__ , UpperCAmelCase__ ):
A__ = self.model.config.prefix if self.model.config.prefix is not None else ""
if isinstance(args[0] , UpperCAmelCase__ ):
if self.tokenizer.pad_token_id is None:
raise ValueError("Please make sure that the tokenizer has a pad_token_id when using a batch input" )
A__ = ([prefix + arg for arg in args[0]],)
A__ = True
elif isinstance(args[0] , UpperCAmelCase__ ):
A__ = (prefix + args[0],)
A__ = False
else:
raise ValueError(
F""" `args[0]`: {args[0]} have the wrong format. The should be either of type `str` or type `list`""" )
A__ = self.tokenizer(*UpperCAmelCase__ , padding=UpperCAmelCase__ , truncation=UpperCAmelCase__ , return_tensors=self.framework )
# This is produced by tokenizers but is an invalid generate kwargs
if "token_type_ids" in inputs:
del inputs["token_type_ids"]
return inputs
def __call__( self , *UpperCAmelCase__ , **UpperCAmelCase__ ):
A__ = super().__call__(*UpperCAmelCase__ , **UpperCAmelCase__ )
if (
isinstance(args[0] , UpperCAmelCase__ )
and all(isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) for el in args[0] )
and all(len(UpperCAmelCase__ ) == 1 for res in result )
):
return [res[0] for res in result]
return result
def __A ( self , UpperCAmelCase__ , UpperCAmelCase__=TruncationStrategy.DO_NOT_TRUNCATE , **UpperCAmelCase__ ):
A__ = self._parse_and_tokenize(UpperCAmelCase__ , truncation=UpperCAmelCase__ , **UpperCAmelCase__ )
return inputs
def __A ( self , UpperCAmelCase__ , **UpperCAmelCase__ ):
if self.framework == "pt":
A__ , A__ = model_inputs["input_ids"].shape
elif self.framework == "tf":
A__ , A__ = tf.shape(model_inputs["input_ids"] ).numpy()
A__ = generate_kwargs.get("min_length" , self.model.config.min_length )
A__ = generate_kwargs.get("max_length" , self.model.config.max_length )
self.check_inputs(UpperCAmelCase__ , generate_kwargs["min_length"] , generate_kwargs["max_length"] )
A__ = self.model.generate(**UpperCAmelCase__ , **UpperCAmelCase__ )
A__ = output_ids.shape[0]
if self.framework == "pt":
A__ = output_ids.reshape(UpperCAmelCase__ , out_b // in_b , *output_ids.shape[1:] )
elif self.framework == "tf":
A__ = tf.reshape(UpperCAmelCase__ , (in_b, out_b // in_b, *output_ids.shape[1:]) )
return {"output_ids": output_ids}
def __A ( self , UpperCAmelCase__ , UpperCAmelCase__=ReturnType.TEXT , UpperCAmelCase__=False ):
A__ = []
for output_ids in model_outputs["output_ids"][0]:
if return_type == ReturnType.TENSORS:
A__ = {F"""{self.return_name}_token_ids""": output_ids}
elif return_type == ReturnType.TEXT:
A__ = {
F"""{self.return_name}_text""": self.tokenizer.decode(
UpperCAmelCase__ , skip_special_tokens=UpperCAmelCase__ , clean_up_tokenization_spaces=UpperCAmelCase__ , )
}
records.append(UpperCAmelCase__ )
return records
@add_end_docstrings(_UpperCAmelCase )
class UpperCamelCase ( _UpperCAmelCase ):
lowerCAmelCase : Union[str, Any] = """summary"""
def __call__( self , *UpperCAmelCase__ , **UpperCAmelCase__ ):
return super().__call__(*UpperCAmelCase__ , **UpperCAmelCase__ )
def __A ( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
if max_length < min_length:
logger.warning(F"""Your min_length={min_length} must be inferior than your max_length={max_length}.""" )
if input_length < max_length:
logger.warning(
F"""Your max_length is set to {max_length}, but your input_length is only {input_length}. Since this is """
"a summarization task, where outputs shorter than the input are typically wanted, you might "
F"""consider decreasing max_length manually, e.g. summarizer('...', max_length={input_length//2})""" )
@add_end_docstrings(_UpperCAmelCase )
class UpperCamelCase ( _UpperCAmelCase ):
lowerCAmelCase : int = """translation"""
def __A ( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
if input_length > 0.9 * max_length:
logger.warning(
F"""Your input_length: {input_length} is bigger than 0.9 * max_length: {max_length}. You might consider """
"increasing your max_length manually, e.g. translator('...', max_length=400)" )
return True
def __A ( self , *UpperCAmelCase__ , UpperCAmelCase__=TruncationStrategy.DO_NOT_TRUNCATE , UpperCAmelCase__=None , UpperCAmelCase__=None ):
if getattr(self.tokenizer , "_build_translation_inputs" , UpperCAmelCase__ ):
return self.tokenizer._build_translation_inputs(
*UpperCAmelCase__ , return_tensors=self.framework , truncation=UpperCAmelCase__ , src_lang=UpperCAmelCase__ , tgt_lang=UpperCAmelCase__ )
else:
return super()._parse_and_tokenize(*UpperCAmelCase__ , truncation=UpperCAmelCase__ )
def __A ( self , UpperCAmelCase__=None , UpperCAmelCase__=None , **UpperCAmelCase__ ):
A__ , A__ , A__ = super()._sanitize_parameters(**UpperCAmelCase__ )
if src_lang is not None:
A__ = src_lang
if tgt_lang is not None:
A__ = tgt_lang
if src_lang is None and tgt_lang is None:
# Backward compatibility, direct arguments use is preferred.
A__ = kwargs.get("task" , self.task )
A__ = task.split("_" )
if task and len(UpperCAmelCase__ ) == 4:
# translation, XX, to YY
A__ = items[1]
A__ = items[3]
return preprocess_params, forward_params, postprocess_params
def __call__( self , *UpperCAmelCase__ , **UpperCAmelCase__ ):
return super().__call__(*UpperCAmelCase__ , **UpperCAmelCase__ )
| 355 |
import argparse
import struct
import unittest
class UpperCamelCase :
def __init__( self , UpperCAmelCase__ ):
A__ = data
# Initialize hash values
A__ = [
0x6A_09E_667,
0xBB_67A_E85,
0x3C_6EF_372,
0xA5_4FF_53A,
0x51_0E5_27F,
0x9B_056_88C,
0x1F_83D_9AB,
0x5B_E0C_D19,
]
# Initialize round constants
A__ = [
0x42_8A2_F98,
0x71_374_491,
0xB5_C0F_BCF,
0xE9_B5D_BA5,
0x39_56C_25B,
0x59_F11_1F1,
0x92_3F8_2A4,
0xAB_1C5_ED5,
0xD8_07A_A98,
0x12_835_B01,
0x24_318_5BE,
0x55_0C7_DC3,
0x72_BE5_D74,
0x80_DEB_1FE,
0x9B_DC0_6A7,
0xC1_9BF_174,
0xE4_9B6_9C1,
0xEF_BE4_786,
0x0F_C19_DC6,
0x24_0CA_1CC,
0x2D_E92_C6F,
0x4A_748_4AA,
0x5C_B0A_9DC,
0x76_F98_8DA,
0x98_3E5_152,
0xA8_31C_66D,
0xB0_032_7C8,
0xBF_597_FC7,
0xC6_E00_BF3,
0xD5_A79_147,
0x06_CA6_351,
0x14_292_967,
0x27_B70_A85,
0x2E_1B2_138,
0x4D_2C6_DFC,
0x53_380_D13,
0x65_0A7_354,
0x76_6A0_ABB,
0x81_C2C_92E,
0x92_722_C85,
0xA2_BFE_8A1,
0xA8_1A6_64B,
0xC2_4B8_B70,
0xC7_6C5_1A3,
0xD1_92E_819,
0xD6_990_624,
0xF4_0E3_585,
0x10_6AA_070,
0x19_A4C_116,
0x1E_376_C08,
0x27_487_74C,
0x34_B0B_CB5,
0x39_1C0_CB3,
0x4E_D8A_A4A,
0x5B_9CC_A4F,
0x68_2E6_FF3,
0x74_8F8_2EE,
0x78_A56_36F,
0x84_C87_814,
0x8C_C70_208,
0x90_BEF_FFA,
0xA4_506_CEB,
0xBE_F9A_3F7,
0xC6_717_8F2,
]
A__ = self.preprocessing(self.data )
self.final_hash()
@staticmethod
def __A ( UpperCAmelCase__ ):
A__ = b"\x80" + (b"\x00" * (63 - (len(UpperCAmelCase__ ) + 8) % 64))
A__ = struct.pack(">Q" , (len(UpperCAmelCase__ ) * 8) )
return data + padding + big_endian_integer
def __A ( self ):
# Convert into blocks of 64 bytes
A__ = [
self.preprocessed_data[x : x + 64]
for x in range(0 , len(self.preprocessed_data ) , 64 )
]
for block in self.blocks:
# Convert the given block into a list of 4 byte integers
A__ = list(struct.unpack(">16L" , UpperCAmelCase__ ) )
# add 48 0-ed integers
words += [0] * 48
A__ , A__ , A__ , A__ , A__ , A__ , A__ , A__ = self.hashes
for index in range(0 , 64 ):
if index > 15:
# modify the zero-ed indexes at the end of the array
A__ = (
self.ror(words[index - 15] , 7 )
^ self.ror(words[index - 15] , 18 )
^ (words[index - 15] >> 3)
)
A__ = (
self.ror(words[index - 2] , 17 )
^ self.ror(words[index - 2] , 19 )
^ (words[index - 2] >> 10)
)
A__ = (
words[index - 16] + sa + words[index - 7] + sa
) % 0x100_000_000
# Compression
A__ = self.ror(UpperCAmelCase__ , 6 ) ^ self.ror(UpperCAmelCase__ , 11 ) ^ self.ror(UpperCAmelCase__ , 25 )
A__ = (e & f) ^ ((~e & 0xFF_FFF_FFF) & g)
A__ = (
h + sa + ch + self.round_constants[index] + words[index]
) % 0x100_000_000
A__ = self.ror(UpperCAmelCase__ , 2 ) ^ self.ror(UpperCAmelCase__ , 13 ) ^ self.ror(UpperCAmelCase__ , 22 )
A__ = (a & b) ^ (a & c) ^ (b & c)
A__ = (sa + maj) % 0x100_000_000
A__ , A__ , A__ , A__ , A__ , A__ , A__ , A__ = (
g,
f,
e,
((d + tempa) % 0x100_000_000),
c,
b,
a,
((tempa + tempa) % 0x100_000_000),
)
A__ = [a, b, c, d, e, f, g, h]
# Modify final values
A__ = [
((element + mutated_hash_values[index]) % 0x100_000_000)
for index, element in enumerate(self.hashes )
]
A__ = "".join([hex(UpperCAmelCase__ )[2:].zfill(8 ) for value in self.hashes] )
def __A ( self , UpperCAmelCase__ , UpperCAmelCase__ ):
return 0xFF_FFF_FFF & (value << (32 - rotations)) | (value >> rotations)
class UpperCamelCase ( unittest.TestCase ):
def __A ( self ):
import hashlib
A__ = bytes("Test String" , "utf-8" )
self.assertEqual(SHAaaa(UpperCAmelCase__ ).hash , hashlib.shaaaa(UpperCAmelCase__ ).hexdigest() )
def UpperCamelCase ( )-> None:
"""simple docstring"""
import doctest
doctest.testmod()
A__ = argparse.ArgumentParser()
parser.add_argument(
"-s" , "--string" , dest="input_string" , default="Hello World!! Welcome to Cryptography" , help="Hash the string" , )
parser.add_argument(
"-f" , "--file" , dest="input_file" , help="Hash contents of a file" )
A__ = parser.parse_args()
A__ = args.input_string
# hash input should be a bytestring
if args.input_file:
with open(args.input_file , "rb" ) as f:
A__ = f.read()
else:
A__ = bytes(_A , "utf-8" )
print(SHAaaa(_A ).hash )
if __name__ == "__main__":
main()
| 198 | 0 |
import gzip
import hashlib
import json
import multiprocessing
import os
import re
import shutil
import time
from pathlib import Path
import numpy as np
from arguments import PreprocessingArguments
from datasets import load_dataset
from minhash_deduplication import deduplicate_dataset
from transformers import AutoTokenizer, HfArgumentParser
_snake_case = re.compile(r'''\s+''')
def _UpperCamelCase ( snake_case__ ) -> Dict:
return {"hash": hashlib.mda(re.sub(_snake_case, "", example["content"] ).encode("utf-8" ) ).hexdigest()}
def _UpperCamelCase ( snake_case__ ) -> List[str]:
__UpperCAmelCase : str = [len(_snake_case ) for line in example["content"].splitlines()]
return {"line_mean": np.mean(_snake_case ), "line_max": max(_snake_case )}
def _UpperCamelCase ( snake_case__ ) -> Tuple:
__UpperCAmelCase : List[Any] = np.mean([c.isalnum() for c in example["content"]] )
return {"alpha_frac": alpha_frac}
def _UpperCamelCase ( snake_case__, snake_case__ ) -> Optional[Any]:
if example["hash"] in uniques:
uniques.remove(example["hash"] )
return True
else:
return False
def _UpperCamelCase ( snake_case__, snake_case__=5 ) -> Any:
__UpperCAmelCase : List[Any] = ["auto-generated", "autogenerated", "automatically generated"]
__UpperCAmelCase : Optional[int] = example["content"].splitlines()
for _, line in zip(range(_snake_case ), _snake_case ):
for keyword in keywords:
if keyword in line.lower():
return {"autogenerated": True}
else:
return {"autogenerated": False}
def _UpperCamelCase ( snake_case__, snake_case__=5, snake_case__=0.05 ) -> Tuple:
__UpperCAmelCase : List[Any] = ["unit tests", "test file", "configuration file"]
__UpperCAmelCase : Any = example["content"].splitlines()
__UpperCAmelCase : Optional[Any] = 0
__UpperCAmelCase : Tuple = 0
# first test
for _, line in zip(range(_snake_case ), _snake_case ):
for keyword in keywords:
if keyword in line.lower():
return {"config_or_test": True}
# second test
__UpperCAmelCase : Union[str, Any] = example["content"].count("\n" )
__UpperCAmelCase : int = int(coeff * nlines )
for line in lines:
count_config += line.lower().count("config" )
count_test += line.lower().count("test" )
if count_config > threshold or count_test > threshold:
return {"config_or_test": True}
return {"config_or_test": False}
def _UpperCamelCase ( snake_case__ ) -> Optional[Any]:
__UpperCAmelCase : Optional[Any] = ["def ", "class ", "for ", "while "]
__UpperCAmelCase : Optional[Any] = example["content"].splitlines()
for line in lines:
for keyword in keywords:
if keyword in line.lower():
return {"has_no_keywords": False}
return {"has_no_keywords": True}
def _UpperCamelCase ( snake_case__, snake_case__=4 ) -> Union[str, Any]:
__UpperCAmelCase : Any = example["content"].splitlines()
__UpperCAmelCase : Dict = 0
for line in lines:
counter += line.lower().count("=" )
if counter > minimum:
return {"has_few_assignments": False}
return {"has_few_assignments": True}
def _UpperCamelCase ( snake_case__ ) -> List[Any]:
__UpperCAmelCase : Any = tokenizer(example["content"], truncation=_snake_case )["input_ids"]
__UpperCAmelCase : List[Any] = len(example["content"] ) / len(_snake_case )
return {"ratio": ratio}
def _UpperCamelCase ( snake_case__ ) -> str:
__UpperCAmelCase : str = {}
results.update(get_hash(_snake_case ) )
results.update(line_stats(_snake_case ) )
results.update(alpha_stats(_snake_case ) )
results.update(char_token_ratio(_snake_case ) )
results.update(is_autogenerated(_snake_case ) )
results.update(is_config_or_test(_snake_case ) )
results.update(has_no_keywords(_snake_case ) )
results.update(has_few_assignments(_snake_case ) )
return results
def _UpperCamelCase ( snake_case__, snake_case__, snake_case__ ) -> Union[str, Any]:
if not check_uniques(_snake_case, _snake_case ):
return False
elif example["autogenerated"]:
return False
elif example["line_max"] > args.line_max:
return False
elif example["line_mean"] > args.line_mean:
return False
elif example["alpha_frac"] < args.alpha_frac:
return False
elif example["ratio"] < args.min_token_ratio:
return False
elif example["config_or_test"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_no_keywords"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_few_assignments"]:
return False
else:
return True
def _UpperCamelCase ( snake_case__ ) -> List[Any]:
with open(_snake_case, "rb" ) as f_in:
with gzip.open(str(_snake_case ) + ".gz", "wb", compresslevel=6 ) as f_out:
shutil.copyfileobj(_snake_case, _snake_case )
os.unlink(_snake_case )
# Settings
_snake_case = HfArgumentParser(PreprocessingArguments)
_snake_case = parser.parse_args()
if args.num_workers is None:
_snake_case = multiprocessing.cpu_count()
_snake_case = AutoTokenizer.from_pretrained(args.tokenizer_dir)
# Load dataset
_snake_case = time.time()
_snake_case = load_dataset(args.dataset_name, split='''train''')
print(F'Time to load dataset: {time.time()-t_start:.2f}')
# Run preprocessing
_snake_case = time.time()
_snake_case = ds.map(preprocess, num_proc=args.num_workers)
print(F'Time to preprocess dataset: {time.time()-t_start:.2f}')
# Deduplicate hashes
_snake_case = set(ds.unique('''hash'''))
_snake_case = len(uniques) / len(ds)
print(F'Fraction of duplicates: {1-frac:.2%}')
# Deduplicate data and apply heuristics
_snake_case = time.time()
_snake_case = ds.filter(filter, fn_kwargs={'''uniques''': uniques, '''args''': args})
print(F'Time to filter dataset: {time.time()-t_start:.2f}')
print(F'Size of filtered dataset: {len(ds_filter)}')
# Deduplicate with minhash and jaccard similarity
if args.near_deduplication:
_snake_case = time.time()
_snake_case = deduplicate_dataset(ds_filter, args.jaccard_threshold)
print(F'Time to deduplicate dataset: {time.time()-t_start:.2f}')
print(F'Size of deduplicate dataset: {len(ds_filter)}')
# Save data in batches of samples_per_file
_snake_case = Path(args.output_dir)
output_dir.mkdir(exist_ok=True)
# save duplicate_clusters in the output_dir as artifacts
# not sure it is the right place the save it
if args.near_deduplication:
with open(output_dir / '''duplicate_clusters.json''', '''w''') as f:
json.dump(duplicate_clusters, f)
_snake_case = output_dir / "data"
data_dir.mkdir(exist_ok=True)
_snake_case = time.time()
for file_number, index in enumerate(range(0, len(ds_filter), args.samples_per_file)):
_snake_case = str(data_dir / F'file-{file_number+1:012}.json')
_snake_case = min(len(ds_filter), index + args.samples_per_file)
ds_filter.select(list(range(index, end_index))).to_json(file_path)
compress_file(file_path)
print(F'Time to save dataset: {time.time()-t_start:.2f}')
| 157 |
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
snake_case : int = logging.get_logger(__name__)
snake_case : List[str] = {"vocab_file": "spiece.model"}
snake_case : List[str] = {
"vocab_file": {
"albert-base-v1": "https://huggingface.co/albert-base-v1/resolve/main/spiece.model",
"albert-large-v1": "https://huggingface.co/albert-large-v1/resolve/main/spiece.model",
"albert-xlarge-v1": "https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model",
"albert-xxlarge-v1": "https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model",
"albert-base-v2": "https://huggingface.co/albert-base-v2/resolve/main/spiece.model",
"albert-large-v2": "https://huggingface.co/albert-large-v2/resolve/main/spiece.model",
"albert-xlarge-v2": "https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model",
"albert-xxlarge-v2": "https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model",
}
}
snake_case : Tuple = {
"albert-base-v1": 512,
"albert-large-v1": 512,
"albert-xlarge-v1": 512,
"albert-xxlarge-v1": 512,
"albert-base-v2": 512,
"albert-large-v2": 512,
"albert-xlarge-v2": 512,
"albert-xxlarge-v2": 512,
}
snake_case : List[str] = "▁"
class _snake_case ( snake_case ):
UpperCamelCase__ = VOCAB_FILES_NAMES
UpperCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , _a , _a=True , _a=True , _a=False , _a="[CLS]" , _a="[SEP]" , _a="<unk>" , _a="[SEP]" , _a="<pad>" , _a="[CLS]" , _a="[MASK]" , _a = None , **_a , ):
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
__magic_name__ : str = (
AddedToken(_a , lstrip=_a , rstrip=_a , normalized=_a )
if isinstance(_a , _a )
else mask_token
)
__magic_name__ : Union[str, Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=_a , remove_space=_a , keep_accents=_a , bos_token=_a , eos_token=_a , unk_token=_a , sep_token=_a , pad_token=_a , cls_token=_a , mask_token=_a , sp_model_kwargs=self.sp_model_kwargs , **_a , )
__magic_name__ : Dict = do_lower_case
__magic_name__ : Tuple = remove_space
__magic_name__ : Union[str, Any] = keep_accents
__magic_name__ : Tuple = vocab_file
__magic_name__ : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_a )
@property
def SCREAMING_SNAKE_CASE ( self ):
return len(self.sp_model )
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : List[str] = {self.convert_ids_to_tokens(_a ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
__magic_name__ : List[str] = self.__dict__.copy()
__magic_name__ : Any = None
return state
def __setstate__( self , _a ):
__magic_name__ : Union[str, Any] = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
__magic_name__ : str = {}
__magic_name__ : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def SCREAMING_SNAKE_CASE ( self , _a ):
if self.remove_space:
__magic_name__ : List[Any] = " ".join(inputs.strip().split() )
else:
__magic_name__ : str = inputs
__magic_name__ : int = outputs.replace("``" , "\"" ).replace("''" , "\"" )
if not self.keep_accents:
__magic_name__ : str = unicodedata.normalize("NFKD" , _a )
__magic_name__ : Tuple = "".join([c for c in outputs if not unicodedata.combining(_a )] )
if self.do_lower_case:
__magic_name__ : int = outputs.lower()
return outputs
def SCREAMING_SNAKE_CASE ( self , _a ):
__magic_name__ : Optional[Any] = self.preprocess_text(_a )
__magic_name__ : Dict = self.sp_model.encode(_a , out_type=_a )
__magic_name__ : Any = []
for piece in pieces:
if len(_a ) > 1 and piece[-1] == str("," ) and piece[-2].isdigit():
__magic_name__ : Optional[Any] = self.sp_model.EncodeAsPieces(piece[:-1].replace(_a , "" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
__magic_name__ : List[str] = cur_pieces[1:]
else:
__magic_name__ : Optional[int] = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(_a )
else:
new_pieces.append(_a )
return new_pieces
def SCREAMING_SNAKE_CASE ( self , _a ):
return self.sp_model.PieceToId(_a )
def SCREAMING_SNAKE_CASE ( self , _a ):
return self.sp_model.IdToPiece(_a )
def SCREAMING_SNAKE_CASE ( self , _a ):
__magic_name__ : Any = []
__magic_name__ : Union[str, Any] = ""
__magic_name__ : int = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(_a ) + token
__magic_name__ : List[Any] = True
__magic_name__ : Optional[int] = []
else:
current_sub_tokens.append(_a )
__magic_name__ : Optional[Any] = False
out_string += self.sp_model.decode(_a )
return out_string.strip()
def SCREAMING_SNAKE_CASE ( self , _a , _a = None ):
__magic_name__ : List[str] = [self.sep_token_id]
__magic_name__ : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE ( self , _a , _a = None , _a = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_a , token_ids_a=_a , already_has_special_tokens=_a )
if token_ids_a is not None:
return [1] + ([0] * len(_a )) + [1] + ([0] * len(_a )) + [1]
return [1] + ([0] * len(_a )) + [1]
def SCREAMING_SNAKE_CASE ( self , _a , _a = None ):
__magic_name__ : Optional[int] = [self.sep_token_id]
__magic_name__ : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def SCREAMING_SNAKE_CASE ( self , _a , _a = None ):
if not os.path.isdir(_a ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
__magic_name__ : List[str] = os.path.join(
_a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_a ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _a )
elif not os.path.isfile(self.vocab_file ):
with open(_a , "wb" ) as fi:
__magic_name__ : Union[str, Any] = self.sp_model.serialized_model_proto()
fi.write(_a )
return (out_vocab_file,)
| 281 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_UpperCAmelCase : List[Any] = {
"""configuration_biogpt""": ["""BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """BioGptConfig"""],
"""tokenization_biogpt""": ["""BioGptTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : List[str] = [
"""BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BioGptForCausalLM""",
"""BioGptForTokenClassification""",
"""BioGptForSequenceClassification""",
"""BioGptModel""",
"""BioGptPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig
from .tokenization_biogpt import BioGptTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_biogpt import (
BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST,
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptPreTrainedModel,
)
else:
import sys
_UpperCAmelCase : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 45 |
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import List
import timm
import torch
import torch.nn as nn
from huggingface_hub import hf_hub_download
from torch import Tensor
from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification
from transformers.utils import logging
logging.set_verbosity_info()
_UpperCAmelCase : List[str] = logging.get_logger()
@dataclass
class lowerCAmelCase :
UpperCAmelCase__ = 42
UpperCAmelCase__ = field(default_factory=__UpperCamelCase )
UpperCAmelCase__ = field(default_factory=__UpperCamelCase )
def A_ ( self : List[str] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Tensor , UpperCAmelCase : Tensor ) -> Any:
lowerCamelCase__ : List[str] = len(list(m.modules() ) ) == 1 or isinstance(UpperCAmelCase , nn.Convad ) or isinstance(UpperCAmelCase , nn.BatchNormad )
if has_not_submodules:
self.traced.append(UpperCAmelCase )
def __call__( self : Any , UpperCAmelCase : Tensor ) -> Dict:
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(UpperCAmelCase )
[x.remove() for x in self.handles]
return self
@property
def A_ ( self : List[str] ) -> int:
# check the len of the state_dict keys to see if we have learnable params
return list(filter(lambda UpperCAmelCase : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) )
@dataclass
class lowerCAmelCase :
UpperCAmelCase__ = 42
UpperCAmelCase__ = 42
UpperCAmelCase__ = 0
UpperCAmelCase__ = field(default_factory=__UpperCamelCase )
UpperCAmelCase__ = field(default_factory=__UpperCamelCase )
def __call__( self : Any , UpperCAmelCase : Tensor ) -> int:
lowerCamelCase__ : Union[str, Any] = Tracker(self.dest )(UpperCAmelCase ).parametrized
lowerCamelCase__ : List[Any] = Tracker(self.src )(UpperCAmelCase ).parametrized
lowerCamelCase__ : Any = list(filter(lambda UpperCAmelCase : type(UpperCAmelCase ) not in self.src_skip , UpperCAmelCase ) )
lowerCamelCase__ : int = list(filter(lambda UpperCAmelCase : type(UpperCAmelCase ) not in self.dest_skip , UpperCAmelCase ) )
if len(UpperCAmelCase ) != len(UpperCAmelCase ):
raise Exception(
F"""Numbers of operations are different. Source module has {len(UpperCAmelCase )} operations while"""
F""" destination module has {len(UpperCAmelCase )}.""" )
for dest_m, src_m in zip(UpperCAmelCase , UpperCAmelCase ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(F"""Transfered from={src_m} to={dest_m}""" )
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = True ) -> Any:
print(F"""Converting {name}...""" )
with torch.no_grad():
lowerCamelCase__ : int = timm.create_model(_UpperCAmelCase , pretrained=_UpperCAmelCase ).eval()
lowerCamelCase__ : Union[str, Any] = ResNetForImageClassification(_UpperCAmelCase ).eval()
lowerCamelCase__ : str = ModuleTransfer(src=_UpperCAmelCase , dest=_UpperCAmelCase )
lowerCamelCase__ : Optional[int] = torch.randn((1, 3, 224, 224) )
module_transfer(_UpperCAmelCase )
assert torch.allclose(from_model(_UpperCAmelCase ) , our_model(_UpperCAmelCase ).logits ), "The model logits don't match the original one."
lowerCamelCase__ : Union[str, Any] = F"""resnet{"-".join(name.split("resnet" ) )}"""
print(_UpperCAmelCase )
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message='Add model' , use_temp_dir=_UpperCAmelCase , )
# we can use the convnext one
lowerCamelCase__ : Union[str, Any] = AutoImageProcessor.from_pretrained('facebook/convnext-base-224-22k-1k' )
image_processor.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message='Add image processor' , use_temp_dir=_UpperCAmelCase , )
print(F"""Pushed {checkpoint_name}""" )
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = True ) -> List[str]:
lowerCamelCase__ : Dict = 'imagenet-1k-id2label.json'
lowerCamelCase__ : Optional[int] = 1000
lowerCamelCase__ : int = (1, num_labels)
lowerCamelCase__ : Any = 'huggingface/label-files'
lowerCamelCase__ : str = num_labels
lowerCamelCase__ : Any = json.load(open(hf_hub_download(_UpperCAmelCase , _UpperCAmelCase , repo_type='dataset' ) , 'r' ) )
lowerCamelCase__ : Any = {int(_UpperCAmelCase ): v for k, v in idalabel.items()}
lowerCamelCase__ : str = idalabel
lowerCamelCase__ : Any = {v: k for k, v in idalabel.items()}
lowerCamelCase__ : Tuple = partial(_UpperCAmelCase , num_labels=_UpperCAmelCase , idalabel=_UpperCAmelCase , labelaid=_UpperCAmelCase )
lowerCamelCase__ : Optional[Any] = {
'resnet18': ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[64, 128, 256, 512] , layer_type='basic' ),
'resnet26': ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='bottleneck' ),
'resnet34': ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[64, 128, 256, 512] , layer_type='basic' ),
'resnet50': ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='bottleneck' ),
'resnet101': ImageNetPreTrainedConfig(
depths=[3, 4, 23, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='bottleneck' ),
'resnet152': ImageNetPreTrainedConfig(
depths=[3, 8, 36, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='bottleneck' ),
}
if model_name:
convert_weight_and_push(_UpperCAmelCase , names_to_config[model_name] , _UpperCAmelCase , _UpperCAmelCase )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
return config, expected_shape
if __name__ == "__main__":
_UpperCAmelCase : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default=None,
type=str,
help=(
"""The name of the model you wish to convert, it must be one of the supported resnet* architecture,"""
""" currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted."""
),
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=Path,
required=True,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument(
"""--push_to_hub""",
default=True,
type=bool,
required=False,
help="""If True, push model and image processor to the hub.""",
)
_UpperCAmelCase : str = parser.parse_args()
_UpperCAmelCase : Path = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 45 | 1 |
"""simple docstring"""
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
__a = logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class lowerCamelCase ( _lowerCAmelCase ):
'''simple docstring'''
_A : bool = field(default=_lowerCAmelCase , metadata={"""help""": """Whether to use SortishSampler or not."""} )
_A : bool = field(
default=_lowerCAmelCase , metadata={"""help""": """Whether to use generate to calculate generative metrics (ROUGE, BLEU)."""} )
_A : Optional[int] = field(
default=_lowerCAmelCase , metadata={
"""help""": (
"""The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default """
"""to the `max_length` value of the model configuration."""
)
} , )
_A : Optional[int] = field(
default=_lowerCAmelCase , metadata={
"""help""": (
"""The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default """
"""to the `num_beams` value of the model configuration."""
)
} , )
_A : Optional[Union[str, Path, GenerationConfig]] = field(
default=_lowerCAmelCase , metadata={
"""help""": """Model id, file path or url pointing to a GenerationConfig json file, to use during prediction."""
} , )
def lowerCAmelCase_ ( self: Dict ) -> str:
snake_case_ :List[str] = super().to_dict()
for k, v in d.items():
if isinstance(snake_case , snake_case ):
snake_case_ :Dict = v.to_dict()
return d
| 66 |
"""simple docstring"""
from __future__ import annotations
from collections import Counter
from random import random
class lowerCamelCase :
'''simple docstring'''
def __init__( self: Tuple ) -> Optional[Any]:
snake_case_ :Optional[int] = {}
def lowerCAmelCase_ ( self: Dict , snake_case: str ) -> None:
snake_case_ :str = {}
def lowerCAmelCase_ ( self: Optional[int] , snake_case: str , snake_case: str , snake_case: float ) -> None:
if nodea not in self.connections:
self.add_node(snake_case )
if nodea not in self.connections:
self.add_node(snake_case )
snake_case_ :Dict = probability
def lowerCAmelCase_ ( self: List[Any] ) -> list[str]:
return list(self.connections )
def lowerCAmelCase_ ( self: Any , snake_case: str ) -> str:
snake_case_ :Optional[Any] = 0
snake_case_ :List[str] = random()
for dest in self.connections[node]:
current_probability += self.connections[node][dest]
if current_probability > random_value:
return dest
return ""
def A_ ( _lowercase, _lowercase, _lowercase ):
'''simple docstring'''
snake_case_ :List[str] = MarkovChainGraphUndirectedUnweighted()
for nodea, nodea, probability in transitions:
graph.add_transition_probability(_lowercase, _lowercase, _lowercase )
snake_case_ :int = Counter(graph.get_nodes() )
snake_case_ :Optional[Any] = start
for _ in range(_lowercase ):
snake_case_ :Tuple = graph.transition(_lowercase )
visited[node] += 1
return visited
if __name__ == "__main__":
import doctest
doctest.testmod()
| 66 | 1 |
def __lowerCamelCase ( ):
"""simple docstring"""
a :Tuple = []
a :Dict = 1
while len(A__ ) < 1E6:
constant.append(str(A__ ) )
i += 1
a :Tuple = """""".join(A__ )
return (
int(constant[0] )
* int(constant[9] )
* int(constant[99] )
* int(constant[999] )
* int(constant[9999] )
* int(constant[9_9999] )
* int(constant[99_9999] )
)
if __name__ == "__main__":
print(solution())
| 351 |
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->UnCLIP
class _snake_case ( _snake_case ):
SCREAMING_SNAKE_CASE__ = 42
SCREAMING_SNAKE_CASE__ = None
def __lowerCamelCase ( UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : List[str]=0.999 , UpperCAmelCase_ : str="cosine" , ):
"""simple docstring"""
if alpha_transform_type == "cosine":
def alpha_bar_fn(UpperCAmelCase_ : List[str] ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(UpperCAmelCase_ : List[Any] ):
return math.exp(t * -12.0 )
else:
raise ValueError(F'''Unsupported alpha_tranform_type: {alpha_transform_type}''' )
a :Union[str, Any] = []
for i in range(UpperCAmelCase_ ):
a :Optional[Any] = i / num_diffusion_timesteps
a :int = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(UpperCAmelCase_ ) / alpha_bar_fn(UpperCAmelCase_ ) , UpperCAmelCase_ ) )
return torch.tensor(UpperCAmelCase_ , dtype=torch.floataa )
class _snake_case ( _snake_case , _snake_case ):
@register_to_config
def __init__( self , _lowerCamelCase = 1000 , _lowerCamelCase = "fixed_small_log" , _lowerCamelCase = True , _lowerCamelCase = 1.0 , _lowerCamelCase = "epsilon" , _lowerCamelCase = "squaredcos_cap_v2" , ):
if beta_schedule != "squaredcos_cap_v2":
raise ValueError('''UnCLIPScheduler only supports `beta_schedule`: \'squaredcos_cap_v2\'''' )
a :List[Any] = betas_for_alpha_bar(_lowerCamelCase )
a :Any = 1.0 - self.betas
a :int = torch.cumprod(self.alphas , dim=0 )
a :Union[str, Any] = torch.tensor(1.0 )
# standard deviation of the initial noise distribution
a :List[Any] = 1.0
# setable values
a :Optional[Any] = None
a :List[str] = torch.from_numpy(np.arange(0 , _lowerCamelCase )[::-1].copy() )
a :List[str] = variance_type
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase = None ):
return sample
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase = None ):
a :Optional[int] = num_inference_steps
a :Union[str, Any] = (self.config.num_train_timesteps - 1) / (self.num_inference_steps - 1)
a :int = (np.arange(0 , _lowerCamelCase ) * step_ratio).round()[::-1].copy().astype(np.intaa )
a :List[str] = torch.from_numpy(_lowerCamelCase ).to(_lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=None ):
if prev_timestep is None:
a :Union[str, Any] = t - 1
a :Dict = self.alphas_cumprod[t]
a :str = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
a :Optional[Any] = 1 - alpha_prod_t
a :Tuple = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
a :int = self.betas[t]
else:
a :Tuple = 1 - alpha_prod_t / alpha_prod_t_prev
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
a :str = beta_prod_t_prev / beta_prod_t * beta
if variance_type is None:
a :Optional[int] = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small_log":
a :Dict = torch.log(torch.clamp(_lowerCamelCase , min=1e-20 ) )
a :Optional[Any] = torch.exp(0.5 * variance )
elif variance_type == "learned_range":
# NOTE difference with DDPM scheduler
a :List[Any] = variance.log()
a :Any = beta.log()
a :List[Any] = (predicted_variance + 1) / 2
a :Dict = frac * max_log + (1 - frac) * min_log
return variance
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase=None , _lowerCamelCase = True , ):
a :Optional[int] = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type == "learned_range":
a , a :Optional[int] = torch.split(_lowerCamelCase , sample.shape[1] , dim=1 )
else:
a :int = None
# 1. compute alphas, betas
if prev_timestep is None:
a :Any = t - 1
a :Tuple = self.alphas_cumprod[t]
a :Optional[int] = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
a :Tuple = 1 - alpha_prod_t
a :List[str] = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
a :Union[str, Any] = self.betas[t]
a :Optional[Any] = self.alphas[t]
else:
a :Dict = 1 - alpha_prod_t / alpha_prod_t_prev
a :Optional[Any] = 1 - beta
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
a :List[Any] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
a :List[str] = model_output
else:
raise ValueError(
F'''prediction_type given as {self.config.prediction_type} must be one of `epsilon` or `sample`'''
''' for the UnCLIPScheduler.''' )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
a :List[str] = torch.clamp(
_lowerCamelCase , -self.config.clip_sample_range , self.config.clip_sample_range )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
a :List[str] = (alpha_prod_t_prev ** 0.5 * beta) / beta_prod_t
a :Any = alpha ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
a :Optional[int] = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
a :Dict = 0
if t > 0:
a :Optional[int] = randn_tensor(
model_output.shape , dtype=model_output.dtype , generator=_lowerCamelCase , device=model_output.device )
a :Dict = self._get_variance(
_lowerCamelCase , predicted_variance=_lowerCamelCase , prev_timestep=_lowerCamelCase , )
if self.variance_type == "fixed_small_log":
a :Optional[int] = variance
elif self.variance_type == "learned_range":
a :Union[str, Any] = (0.5 * variance).exp()
else:
raise ValueError(
F'''variance_type given as {self.variance_type} must be one of `fixed_small_log` or `learned_range`'''
''' for the UnCLIPScheduler.''' )
a :Optional[int] = variance * variance_noise
a :List[str] = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return UnCLIPSchedulerOutput(prev_sample=_lowerCamelCase , pred_original_sample=_lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ):
# Make sure alphas_cumprod and timestep have same device and dtype as original_samples
a :List[Any] = self.alphas_cumprod.to(device=original_samples.device , dtype=original_samples.dtype )
a :Optional[Any] = timesteps.to(original_samples.device )
a :Tuple = alphas_cumprod[timesteps] ** 0.5
a :List[Any] = sqrt_alpha_prod.flatten()
while len(sqrt_alpha_prod.shape ) < len(original_samples.shape ):
a :Any = sqrt_alpha_prod.unsqueeze(-1 )
a :List[str] = (1 - alphas_cumprod[timesteps]) ** 0.5
a :Optional[int] = sqrt_one_minus_alpha_prod.flatten()
while len(sqrt_one_minus_alpha_prod.shape ) < len(original_samples.shape ):
a :List[Any] = sqrt_one_minus_alpha_prod.unsqueeze(-1 )
a :str = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
| 281 | 0 |
import math
def lowerCamelCase__ ( _A ):
'''simple docstring'''
return math.sqrt(A__ ) * math.sqrt(A__ ) == num
def lowerCamelCase__ ( _A ):
'''simple docstring'''
snake_case_ = 0
snake_case_ = n
while left <= right:
snake_case_ = (left + right) // 2
if mid**2 == n:
return True
elif mid**2 > n:
snake_case_ = mid - 1
else:
snake_case_ = mid + 1
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 187 |
"""simple docstring"""
def snake_case ( A__ ,A__ ):
# "extended trapezoidal rule"
# int(f) = dx/2 * (f1 + 2f2 + ... + fn)
UpperCAmelCase_ : Dict = (boundary[1] - boundary[0]) / steps
UpperCAmelCase_ : Optional[int] = boundary[0]
UpperCAmelCase_ : str = boundary[1]
UpperCAmelCase_ : Tuple = make_points(A__ ,A__ ,A__ )
UpperCAmelCase_ : List[str] = 0.0
y += (h / 2.0) * f(A__ )
for i in x_i:
# print(i)
y += h * f(A__ )
y += (h / 2.0) * f(A__ )
return y
def snake_case ( A__ ,A__ ,A__ ):
UpperCAmelCase_ : Union[str, Any] = a + h
while x < (b - h):
yield x
UpperCAmelCase_ : Optional[Any] = x + h
def snake_case ( A__ ): # enter your function here
UpperCAmelCase_ : Dict = (x - 0) * (x - 0)
return y
def snake_case ( ):
UpperCAmelCase_ : Dict = 0.0 # Lower bound of integration
UpperCAmelCase_ : Optional[int] = 1.0 # Upper bound of integration
UpperCAmelCase_ : Dict = 10.0 # define number of steps or resolution
UpperCAmelCase_ : List[Any] = [a, b] # define boundary of integration
UpperCAmelCase_ : Union[str, Any] = method_a(A__ ,A__ )
print(F"""y = {y}""" )
if __name__ == "__main__":
main()
| 268 | 0 |
"""simple docstring"""
import copy
import re
class UpperCamelCase :
SCREAMING_SNAKE_CASE_ = "hp"
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = None
@classmethod
def a_ ( cls, lowerCAmelCase__, lowerCAmelCase__) -> Tuple:
snake_case_ = prefix
snake_case_ = defaults
cls.build_naming_info()
@staticmethod
def a_ ( lowerCAmelCase__, lowerCAmelCase__) -> Optional[Any]:
if len(lowerCAmelCase__) == 0:
return ""
snake_case_ = None
if any(char.isdigit() for char in word):
raise Exception(f'Parameters should not contain numbers: \'{word}\' contains a number')
if word in info["short_word"]:
return info["short_word"][word]
for prefix_len in range(1, len(lowerCAmelCase__) + 1):
snake_case_ = word[:prefix_len]
if prefix in info["reverse_short_word"]:
continue
else:
snake_case_ = prefix
break
if short_word is None:
# Paranoid fallback
def int_to_alphabetic(lowerCAmelCase__):
snake_case_ = ''
while integer != 0:
snake_case_ = chr(ord('A') + integer % 10) + s
integer //= 10
return s
snake_case_ = 0
while True:
snake_case_ = word + '#' + int_to_alphabetic(lowerCAmelCase__)
if sword in info["reverse_short_word"]:
continue
else:
snake_case_ = sword
break
snake_case_ = short_word
snake_case_ = word
return short_word
@staticmethod
def a_ ( lowerCAmelCase__, lowerCAmelCase__) -> Dict:
snake_case_ = param_name.split('_')
snake_case_ = [TrialShortNamer.shortname_for_word(lowerCAmelCase__, lowerCAmelCase__) for word in words]
# We try to create a separatorless short name, but if there is a collision we have to fallback
# to a separated short name
snake_case_ = ['', '_']
for separator in separators:
snake_case_ = separator.join(lowerCAmelCase__)
if shortname not in info["reverse_short_param"]:
snake_case_ = shortname
snake_case_ = param_name
return shortname
return param_name
@staticmethod
def a_ ( lowerCAmelCase__, lowerCAmelCase__) -> List[Any]:
snake_case_ = TrialShortNamer.shortname_for_key(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = short_name
snake_case_ = param_name
@classmethod
def a_ ( cls) -> List[str]:
if cls.NAMING_INFO is not None:
return
snake_case_ = {
'short_word': {},
'reverse_short_word': {},
'short_param': {},
'reverse_short_param': {},
}
snake_case_ = list(cls.DEFAULTS.keys())
for k in field_keys:
cls.add_new_param_name(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = info
@classmethod
def a_ ( cls, lowerCAmelCase__) -> List[Any]:
cls.build_naming_info()
assert cls.PREFIX is not None
snake_case_ = [copy.copy(cls.PREFIX)]
for k, v in params.items():
if k not in cls.DEFAULTS:
raise Exception(f'You should provide a default value for the param name {k} with value {v}')
if v == cls.DEFAULTS[k]:
# The default value is not added to the name
continue
snake_case_ = cls.NAMING_INFO['short_param'][k]
if isinstance(lowerCAmelCase__, lowerCAmelCase__):
snake_case_ = 1 if v else 0
snake_case_ = '' if isinstance(lowerCAmelCase__, (int, float)) else '-'
snake_case_ = f'{key}{sep}{v}'
name.append(lowerCAmelCase__)
return "_".join(lowerCAmelCase__)
@classmethod
def a_ ( cls, lowerCAmelCase__) -> Optional[Any]:
snake_case_ = repr[len(cls.PREFIX) + 1 :]
if repr == "":
snake_case_ = []
else:
snake_case_ = repr.split('_')
snake_case_ = {}
for value in values:
if "-" in value:
snake_case_ , snake_case_ = value.split('-')
else:
snake_case_ = re.sub('[0-9.]', '', lowerCAmelCase__)
snake_case_ = float(re.sub('[^0-9.]', '', lowerCAmelCase__))
snake_case_ = cls.NAMING_INFO['reverse_short_param'][p_k]
snake_case_ = p_v
for k in cls.DEFAULTS:
if k not in parameters:
snake_case_ = cls.DEFAULTS[k]
return parameters
| 312 | """simple docstring"""
import random
import unittest
import numpy as np
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionImgaImgPipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class UpperCamelCase ( lowerCAmelCase__ , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ = "hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline"
def a_ ( self, lowerCAmelCase__=0) -> List[Any]:
snake_case_ = floats_tensor((1, 3, 128, 128), rng=random.Random(lowerCAmelCase__))
snake_case_ = np.random.RandomState(lowerCAmelCase__)
snake_case_ = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'generator': generator,
'num_inference_steps': 3,
'strength': 0.75,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def a_ ( self) -> Optional[Any]:
snake_case_ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider')
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
snake_case_ = self.get_dummy_inputs()
snake_case_ = pipe(**lowerCAmelCase__).images
snake_case_ = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 128, 128, 3)
snake_case_ = np.array([0.69643, 0.58484, 0.50314, 0.58760, 0.55368, 0.59643, 0.51529, 0.41217, 0.49087])
assert np.abs(image_slice - expected_slice).max() < 1e-1
def a_ ( self) -> List[str]:
snake_case_ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider')
snake_case_ = PNDMScheduler.from_config(pipe.scheduler.config, skip_prk_steps=lowerCAmelCase__)
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
snake_case_ = self.get_dummy_inputs()
snake_case_ = pipe(**lowerCAmelCase__).images
snake_case_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
snake_case_ = np.array([0.61737, 0.54642, 0.53183, 0.54465, 0.52742, 0.60525, 0.49969, 0.40655, 0.48154])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1
def a_ ( self) -> str:
snake_case_ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider')
snake_case_ = LMSDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
# warmup pass to apply optimizations
snake_case_ = pipe(**self.get_dummy_inputs())
snake_case_ = self.get_dummy_inputs()
snake_case_ = pipe(**lowerCAmelCase__).images
snake_case_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
snake_case_ = np.array([0.52761, 0.59977, 0.49033, 0.49619, 0.54282, 0.50311, 0.47600, 0.40918, 0.45203])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1
def a_ ( self) -> int:
snake_case_ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider')
snake_case_ = EulerDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
snake_case_ = self.get_dummy_inputs()
snake_case_ = pipe(**lowerCAmelCase__).images
snake_case_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
snake_case_ = np.array([0.52911, 0.60004, 0.49229, 0.49805, 0.54502, 0.50680, 0.47777, 0.41028, 0.45304])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1
def a_ ( self) -> Dict:
snake_case_ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider')
snake_case_ = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
snake_case_ = self.get_dummy_inputs()
snake_case_ = pipe(**lowerCAmelCase__).images
snake_case_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
snake_case_ = np.array([0.52911, 0.60004, 0.49229, 0.49805, 0.54502, 0.50680, 0.47777, 0.41028, 0.45304])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1
def a_ ( self) -> Dict:
snake_case_ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider')
snake_case_ = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
snake_case_ = self.get_dummy_inputs()
snake_case_ = pipe(**lowerCAmelCase__).images
snake_case_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
snake_case_ = np.array([0.65331, 0.58277, 0.48204, 0.56059, 0.53665, 0.56235, 0.50969, 0.40009, 0.46552])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1
@nightly
@require_onnxruntime
@require_torch_gpu
class UpperCamelCase ( unittest.TestCase ):
@property
def a_ ( self) -> int:
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def a_ ( self) -> str:
snake_case_ = ort.SessionOptions()
snake_case_ = False
return options
def a_ ( self) -> Any:
snake_case_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg')
snake_case_ = init_image.resize((768, 512))
# using the PNDM scheduler by default
snake_case_ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4', revision='onnx', safety_checker=lowerCAmelCase__, feature_extractor=lowerCAmelCase__, provider=self.gpu_provider, sess_options=self.gpu_options, )
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
snake_case_ = 'A fantasy landscape, trending on artstation'
snake_case_ = np.random.RandomState(0)
snake_case_ = pipe(
prompt=lowerCAmelCase__, image=lowerCAmelCase__, strength=0.75, guidance_scale=7.5, num_inference_steps=10, generator=lowerCAmelCase__, output_type='np', )
snake_case_ = output.images
snake_case_ = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
snake_case_ = np.array([0.4909, 0.5059, 0.5372, 0.4623, 0.4876, 0.5049, 0.4820, 0.4956, 0.5019])
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice).max() < 2e-2
def a_ ( self) -> List[Any]:
snake_case_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg')
snake_case_ = init_image.resize((768, 512))
snake_case_ = LMSDiscreteScheduler.from_pretrained(
'runwayml/stable-diffusion-v1-5', subfolder='scheduler', revision='onnx')
snake_case_ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5', revision='onnx', scheduler=lowerCAmelCase__, safety_checker=lowerCAmelCase__, feature_extractor=lowerCAmelCase__, provider=self.gpu_provider, sess_options=self.gpu_options, )
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
snake_case_ = 'A fantasy landscape, trending on artstation'
snake_case_ = np.random.RandomState(0)
snake_case_ = pipe(
prompt=lowerCAmelCase__, image=lowerCAmelCase__, strength=0.75, guidance_scale=7.5, num_inference_steps=20, generator=lowerCAmelCase__, output_type='np', )
snake_case_ = output.images
snake_case_ = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
snake_case_ = np.array([0.8043, 0.926, 0.9581, 0.8119, 0.8954, 0.913, 0.7209, 0.7463, 0.7431])
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice).max() < 2e-2
| 312 | 1 |
'''simple docstring'''
import os
import sys
import unittest
snake_case_ : Optional[Any] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
snake_case_ : Tuple = os.path.join(git_repo_path, 'src', 'diffusers')
class lowercase__ ( unittest.TestCase ):
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
_UpperCamelCase : List[str] = find_backend(' if not is_torch_available():' )
self.assertEqual(lowerCamelCase__ ,'torch' )
# backend_with_underscore = find_backend(" if not is_tensorflow_text_available():")
# self.assertEqual(backend_with_underscore, "tensorflow_text")
_UpperCamelCase : Dict = find_backend(' if not (is_torch_available() and is_transformers_available()):' )
self.assertEqual(lowerCamelCase__ ,'torch_and_transformers' )
# double_backend_with_underscore = find_backend(
# " if not (is_sentencepiece_available() and is_tensorflow_text_available()):"
# )
# self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text")
_UpperCamelCase : Optional[int] = find_backend(
' if not (is_torch_available() and is_transformers_available() and is_onnx_available()):' )
self.assertEqual(lowerCamelCase__ ,'torch_and_transformers_and_onnx' )
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
_UpperCamelCase : str = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn('torch' ,lowerCamelCase__ )
self.assertIn('torch_and_transformers' ,lowerCamelCase__ )
self.assertIn('flax_and_transformers' ,lowerCamelCase__ )
self.assertIn('torch_and_transformers_and_onnx' ,lowerCamelCase__ )
# Likewise, we can't assert on the exact content of a key
self.assertIn('UNet2DModel' ,objects['torch'] )
self.assertIn('FlaxUNet2DConditionModel' ,objects['flax'] )
self.assertIn('StableDiffusionPipeline' ,objects['torch_and_transformers'] )
self.assertIn('FlaxStableDiffusionPipeline' ,objects['flax_and_transformers'] )
self.assertIn('LMSDiscreteScheduler' ,objects['torch_and_scipy'] )
self.assertIn('OnnxStableDiffusionPipeline' ,objects['torch_and_transformers_and_onnx'] )
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
_UpperCamelCase : List[str] = create_dummy_object('CONSTANT' ,'\'torch\'' )
self.assertEqual(lowerCamelCase__ ,'\nCONSTANT = None\n' )
_UpperCamelCase : List[Any] = create_dummy_object('function' ,'\'torch\'' )
self.assertEqual(
lowerCamelCase__ ,'\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n' )
_UpperCamelCase : str = '\nclass FakeClass(metaclass=DummyObject):\n _backends = \'torch\'\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, \'torch\')\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, \'torch\')\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, \'torch\')\n'
_UpperCamelCase : Any = create_dummy_object('FakeClass' ,'\'torch\'' )
self.assertEqual(lowerCamelCase__ ,lowerCamelCase__ )
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
_UpperCamelCase : Optional[Any] = '# This file is autogenerated by the command `make fix-copies`, do not edit.\nfrom ..utils import DummyObject, requires_backends\n\n\nCONSTANT = None\n\n\ndef function(*args, **kwargs):\n requires_backends(function, ["torch"])\n\n\nclass FakeClass(metaclass=DummyObject):\n _backends = ["torch"]\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, ["torch"])\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, ["torch"])\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, ["torch"])\n'
_UpperCamelCase : int = create_dummy_files({'torch': ['CONSTANT', 'function', 'FakeClass']} )
self.assertEqual(dummy_files['torch'] ,lowerCamelCase__ )
| 83 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import MutableSequence
class lowercase__ :
def __init__( self : List[Any] ,lowerCamelCase__ : int ,lowerCamelCase__ : MutableSequence[float] ):
'''simple docstring'''
if len(lowerCamelCase__ ) != degree + 1:
raise ValueError(
'The number of coefficients should be equal to the degree + 1.' )
_UpperCamelCase : list[float] = list(lowerCamelCase__ )
_UpperCamelCase : Tuple = degree
def __add__( self : Optional[int] ,lowerCamelCase__ : Polynomial ):
'''simple docstring'''
if self.degree > polynomial_a.degree:
_UpperCamelCase : str = self.coefficients[:]
for i in range(polynomial_a.degree + 1 ):
coefficients[i] += polynomial_a.coefficients[i]
return Polynomial(self.degree ,lowerCamelCase__ )
else:
_UpperCamelCase : str = polynomial_a.coefficients[:]
for i in range(self.degree + 1 ):
coefficients[i] += self.coefficients[i]
return Polynomial(polynomial_a.degree ,lowerCamelCase__ )
def __sub__( self : Dict ,lowerCamelCase__ : Polynomial ):
'''simple docstring'''
return self + polynomial_a * Polynomial(0 ,[-1] )
def __neg__( self : Dict ):
'''simple docstring'''
return Polynomial(self.degree ,[-c for c in self.coefficients] )
def __mul__( self : Union[str, Any] ,lowerCamelCase__ : Polynomial ):
'''simple docstring'''
_UpperCamelCase : list[float] = [0] * (self.degree + polynomial_a.degree + 1)
for i in range(self.degree + 1 ):
for j in range(polynomial_a.degree + 1 ):
coefficients[i + j] += (
self.coefficients[i] * polynomial_a.coefficients[j]
)
return Polynomial(self.degree + polynomial_a.degree ,lowerCamelCase__ )
def UpperCamelCase_ ( self : Dict ,lowerCamelCase__ : int | float ):
'''simple docstring'''
_UpperCamelCase : int | float = 0
for i in range(self.degree + 1 ):
result += self.coefficients[i] * (substitution**i)
return result
def __str__( self : Union[str, Any] ):
'''simple docstring'''
_UpperCamelCase : Dict = ''
for i in range(self.degree ,-1 ,-1 ):
if self.coefficients[i] == 0:
continue
elif self.coefficients[i] > 0:
if polynomial:
polynomial += " + "
else:
polynomial += " - "
if i == 0:
polynomial += str(abs(self.coefficients[i] ) )
elif i == 1:
polynomial += str(abs(self.coefficients[i] ) ) + "x"
else:
polynomial += str(abs(self.coefficients[i] ) ) + "x^" + str(lowerCamelCase__ )
return polynomial
def __repr__( self : List[str] ):
'''simple docstring'''
return self.__str__()
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
_UpperCamelCase : list[float] = [0] * self.degree
for i in range(self.degree ):
_UpperCamelCase : Optional[int] = self.coefficients[i + 1] * (i + 1)
return Polynomial(self.degree - 1 ,lowerCamelCase__ )
def UpperCamelCase_ ( self : Any ,lowerCamelCase__ : int | float = 0 ):
'''simple docstring'''
_UpperCamelCase : list[float] = [0] * (self.degree + 2)
_UpperCamelCase : Any = constant
for i in range(self.degree + 1 ):
_UpperCamelCase : Optional[Any] = self.coefficients[i] / (i + 1)
return Polynomial(self.degree + 1 ,lowerCamelCase__ )
def __eq__( self : str ,lowerCamelCase__ : object ):
'''simple docstring'''
if not isinstance(lowerCamelCase__ ,lowerCamelCase__ ):
return False
if self.degree != polynomial_a.degree:
return False
for i in range(self.degree + 1 ):
if self.coefficients[i] != polynomial_a.coefficients[i]:
return False
return True
def __ne__( self : List[str] ,lowerCamelCase__ : object ):
'''simple docstring'''
return not self.__eq__(lowerCamelCase__ )
| 83 | 1 |
'''simple docstring'''
import unittest
from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_vision_available
from transformers.pipelines import pipeline
from transformers.pipelines.document_question_answering import apply_tesseract
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_detectrona,
require_pytesseract,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
from transformers.image_utils import load_image
else:
class lowerCAmelCase__ :
@staticmethod
def _snake_case ( *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
pass
def snake_case_ ( __SCREAMING_SNAKE_CASE : Union[str, Any] ):
"""simple docstring"""
return None
# This is a pinned image from a specific revision of a document question answering space, hosted by HuggingFace,
# so we can expect it to be available.
_lowercase : Optional[int] = (
"https://huggingface.co/spaces/impira/docquery/resolve/2f6c96314dc84dfda62d40de9da55f2f5165d403/invoice.png"
)
@is_pipeline_test
@require_torch
@require_vision
class lowerCAmelCase__ ( unittest.TestCase ):
lowerCAmelCase_ = MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING
@require_pytesseract
@require_vision
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : List[str] = pipeline(
'''document-question-answering''' , model=__SCREAMING_SNAKE_CASE , tokenizer=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[int] = INVOICE_URL
lowercase_ : Optional[int] = list(zip(*apply_tesseract(load_image(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE , '''''' ) ) )
lowercase_ : Tuple = '''What is the placebo?'''
lowercase_ : Dict = [
{
'''image''': load_image(__SCREAMING_SNAKE_CASE ),
'''question''': question,
},
{
'''image''': image,
'''question''': question,
},
{
'''image''': image,
'''question''': question,
'''word_boxes''': word_boxes,
},
]
return dqa_pipeline, examples
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : Any = dqa_pipeline(__SCREAMING_SNAKE_CASE , top_k=2 )
self.assertEqual(
__SCREAMING_SNAKE_CASE , [
[
{'''score''': ANY(__SCREAMING_SNAKE_CASE ), '''answer''': ANY(__SCREAMING_SNAKE_CASE ), '''start''': ANY(__SCREAMING_SNAKE_CASE ), '''end''': ANY(__SCREAMING_SNAKE_CASE )},
{'''score''': ANY(__SCREAMING_SNAKE_CASE ), '''answer''': ANY(__SCREAMING_SNAKE_CASE ), '''start''': ANY(__SCREAMING_SNAKE_CASE ), '''end''': ANY(__SCREAMING_SNAKE_CASE )},
]
]
* 3 , )
@require_torch
@require_detectrona
@require_pytesseract
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : List[str] = pipeline('''document-question-answering''' , model='''hf-internal-testing/tiny-random-layoutlmv2''' )
lowercase_ : Dict = INVOICE_URL
lowercase_ : List[str] = '''How many cats are there?'''
lowercase_ : Dict = [
{'''score''': 0.0_001, '''answer''': '''oy 2312/2019''', '''start''': 38, '''end''': 39},
{'''score''': 0.0_001, '''answer''': '''oy 2312/2019 DUE''', '''start''': 38, '''end''': 40},
]
lowercase_ : Dict = dqa_pipeline(image=__SCREAMING_SNAKE_CASE , question=__SCREAMING_SNAKE_CASE , top_k=2 )
self.assertEqual(nested_simplify(__SCREAMING_SNAKE_CASE , decimals=4 ) , __SCREAMING_SNAKE_CASE )
lowercase_ : Union[str, Any] = dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(nested_simplify(__SCREAMING_SNAKE_CASE , decimals=4 ) , __SCREAMING_SNAKE_CASE )
# This image does not detect ANY text in it, meaning layoutlmv2 should fail.
# Empty answer probably
lowercase_ : List[str] = '''./tests/fixtures/tests_samples/COCO/000000039769.png'''
lowercase_ : Dict = dqa_pipeline(image=__SCREAMING_SNAKE_CASE , question=__SCREAMING_SNAKE_CASE , top_k=2 )
self.assertEqual(__SCREAMING_SNAKE_CASE , [] )
# We can optionnally pass directly the words and bounding boxes
lowercase_ : Optional[int] = '''./tests/fixtures/tests_samples/COCO/000000039769.png'''
lowercase_ : Union[str, Any] = []
lowercase_ : List[Any] = []
lowercase_ : Optional[int] = dqa_pipeline(image=__SCREAMING_SNAKE_CASE , question=__SCREAMING_SNAKE_CASE , words=__SCREAMING_SNAKE_CASE , boxes=__SCREAMING_SNAKE_CASE , top_k=2 )
self.assertEqual(__SCREAMING_SNAKE_CASE , [] )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : str = pipeline(
'''document-question-answering''' , model='''tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa''' , revision='''9977165''' , )
lowercase_ : List[Any] = INVOICE_URL
lowercase_ : Any = '''What is the invoice number?'''
lowercase_ : Optional[Any] = dqa_pipeline(image=__SCREAMING_SNAKE_CASE , question=__SCREAMING_SNAKE_CASE , top_k=2 )
self.assertEqual(
nested_simplify(__SCREAMING_SNAKE_CASE , decimals=4 ) , [
{'''score''': 0.9_944, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_009, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
lowercase_ : str = dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(__SCREAMING_SNAKE_CASE , decimals=4 ) , [
{'''score''': 0.9_944, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_009, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
lowercase_ : Union[str, Any] = dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(__SCREAMING_SNAKE_CASE , decimals=4 ) , [
[
{'''score''': 0.9_944, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_009, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
],
]
* 2 , )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : Dict = pipeline(
'''document-question-answering''' , model='''tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa''' , revision='''9977165''' , max_seq_len=50 , )
lowercase_ : Tuple = INVOICE_URL
lowercase_ : str = '''What is the invoice number?'''
lowercase_ : int = dqa_pipeline(image=__SCREAMING_SNAKE_CASE , question=__SCREAMING_SNAKE_CASE , top_k=2 )
self.assertEqual(
nested_simplify(__SCREAMING_SNAKE_CASE , decimals=4 ) , [
{'''score''': 0.9_974, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
{'''score''': 0.9_948, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
lowercase_ : List[Any] = dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(__SCREAMING_SNAKE_CASE , decimals=4 ) , [
{'''score''': 0.9_974, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
{'''score''': 0.9_948, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
lowercase_ : Union[str, Any] = dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(__SCREAMING_SNAKE_CASE , decimals=4 ) , [
[
{'''score''': 0.9_974, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
{'''score''': 0.9_948, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
]
]
* 2 , )
@slow
@require_torch
@require_pytesseract
@require_vision
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : int = AutoTokenizer.from_pretrained(
'''impira/layoutlm-document-qa''' , revision='''3dc6de3''' , add_prefix_space=__SCREAMING_SNAKE_CASE )
lowercase_ : Union[str, Any] = pipeline(
'''document-question-answering''' , model='''impira/layoutlm-document-qa''' , tokenizer=__SCREAMING_SNAKE_CASE , revision='''3dc6de3''' , )
lowercase_ : Optional[Any] = INVOICE_URL
lowercase_ : Dict = '''What is the invoice number?'''
lowercase_ : int = dqa_pipeline(image=__SCREAMING_SNAKE_CASE , question=__SCREAMING_SNAKE_CASE , top_k=2 )
self.assertEqual(
nested_simplify(__SCREAMING_SNAKE_CASE , decimals=4 ) , [
{'''score''': 0.4_251, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_819, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
] , )
lowercase_ : str = dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(__SCREAMING_SNAKE_CASE , decimals=4 ) , [
{'''score''': 0.4_251, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_819, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
] , )
lowercase_ : str = dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(__SCREAMING_SNAKE_CASE , decimals=4 ) , [
[
{'''score''': 0.4_251, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_819, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
]
]
* 2 , )
lowercase_ : Optional[Any] = list(zip(*apply_tesseract(load_image(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE , '''''' ) ) )
# This model should also work if `image` is set to None
lowercase_ : Union[str, Any] = dqa_pipeline({'''image''': None, '''word_boxes''': word_boxes, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(__SCREAMING_SNAKE_CASE , decimals=4 ) , [
{'''score''': 0.4_251, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_819, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
] , )
@slow
@require_torch
@require_pytesseract
@require_vision
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : List[str] = AutoTokenizer.from_pretrained(
'''impira/layoutlm-document-qa''' , revision='''3dc6de3''' , add_prefix_space=__SCREAMING_SNAKE_CASE )
lowercase_ : Any = pipeline(
'''document-question-answering''' , model='''impira/layoutlm-document-qa''' , tokenizer=__SCREAMING_SNAKE_CASE , revision='''3dc6de3''' , max_seq_len=50 , )
lowercase_ : Tuple = INVOICE_URL
lowercase_ : str = '''What is the invoice number?'''
lowercase_ : Tuple = dqa_pipeline(image=__SCREAMING_SNAKE_CASE , question=__SCREAMING_SNAKE_CASE , top_k=2 )
self.assertEqual(
nested_simplify(__SCREAMING_SNAKE_CASE , decimals=4 ) , [
{'''score''': 0.9_999, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.9_998, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
lowercase_ : Union[str, Any] = dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(__SCREAMING_SNAKE_CASE , decimals=4 ) , [
[
{'''score''': 0.9_999, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.9_998, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
]
]
* 2 , )
lowercase_ : Tuple = list(zip(*apply_tesseract(load_image(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE , '''''' ) ) )
# This model should also work if `image` is set to None
lowercase_ : List[str] = dqa_pipeline({'''image''': None, '''word_boxes''': word_boxes, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(__SCREAMING_SNAKE_CASE , decimals=4 ) , [
{'''score''': 0.9_999, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.9_998, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
@slow
@require_torch
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : Any = pipeline(
'''document-question-answering''' , model='''naver-clova-ix/donut-base-finetuned-docvqa''' , tokenizer=AutoTokenizer.from_pretrained('''naver-clova-ix/donut-base-finetuned-docvqa''' ) , feature_extractor='''naver-clova-ix/donut-base-finetuned-docvqa''' , )
lowercase_ : Tuple = INVOICE_URL
lowercase_ : Any = '''What is the invoice number?'''
lowercase_ : List[str] = dqa_pipeline(image=__SCREAMING_SNAKE_CASE , question=__SCREAMING_SNAKE_CASE , top_k=2 )
self.assertEqual(nested_simplify(__SCREAMING_SNAKE_CASE , decimals=4 ) , [{'''answer''': '''us-001'''}] )
@require_tf
@unittest.skip('''Document question answering not implemented in TF''' )
def _snake_case ( self ):
"""simple docstring"""
pass
| 356 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowercase : List[Any] = logging.get_logger(__name__)
_lowercase : Optional[Any] = {
"google/vit-base-patch16-224": "https://huggingface.co/vit-base-patch16-224/resolve/main/config.json",
# See all ViT models at https://huggingface.co/models?filter=vit
}
class lowerCAmelCase__ ( lowerCamelCase_ ):
lowerCAmelCase_ = '''vit'''
def __init__( self , __SCREAMING_SNAKE_CASE=7_68 , __SCREAMING_SNAKE_CASE=12 , __SCREAMING_SNAKE_CASE=12 , __SCREAMING_SNAKE_CASE=30_72 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=1E-1_2 , __SCREAMING_SNAKE_CASE=2_24 , __SCREAMING_SNAKE_CASE=16 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=16 , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
super().__init__(**__SCREAMING_SNAKE_CASE )
lowercase_ : Union[str, Any] = hidden_size
lowercase_ : Dict = num_hidden_layers
lowercase_ : List[Any] = num_attention_heads
lowercase_ : Any = intermediate_size
lowercase_ : Union[str, Any] = hidden_act
lowercase_ : Dict = hidden_dropout_prob
lowercase_ : List[str] = attention_probs_dropout_prob
lowercase_ : Any = initializer_range
lowercase_ : Tuple = layer_norm_eps
lowercase_ : Union[str, Any] = image_size
lowercase_ : Tuple = patch_size
lowercase_ : Tuple = num_channels
lowercase_ : Union[str, Any] = qkv_bias
lowercase_ : List[Any] = encoder_stride
class lowerCAmelCase__ ( lowerCamelCase_ ):
lowerCAmelCase_ = version.parse('''1.11''' )
@property
def _snake_case ( self ):
"""simple docstring"""
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def _snake_case ( self ):
"""simple docstring"""
return 1E-4
| 264 | 0 |
"""simple docstring"""
import os
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from ...models.controlnet import ControlNetModel, ControlNetOutput
from ...models.modeling_utils import ModelMixin
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , _a ):
super().__init__()
__a = nn.ModuleList(_a )
def __UpperCAmelCase ( self , _a , _a , _a , _a , _a , _a = None , _a = None , _a = None , _a = None , _a = False , _a = True , ):
for i, (image, scale, controlnet) in enumerate(zip(_a , _a , self.nets ) ):
__a , __a = controlnet(
_a , _a , _a , _a , _a , _a , _a , _a , _a , _a , _a , )
# merge samples
if i == 0:
__a , __a = down_samples, mid_sample
else:
__a = [
samples_prev + samples_curr
for samples_prev, samples_curr in zip(_a , _a )
]
mid_block_res_sample += mid_sample
return down_block_res_samples, mid_block_res_sample
def __UpperCAmelCase ( self , _a , _a = True , _a = None , _a = False , _a = None , ):
__a = 0
__a = save_directory
for controlnet in self.nets:
controlnet.save_pretrained(
_a , is_main_process=_a , save_function=_a , safe_serialization=_a , variant=_a , )
idx += 1
__a = model_path_to_save + f'''_{idx}'''
@classmethod
def __UpperCAmelCase ( cls , _a , **_a ):
__a = 0
__a = []
# load controlnet and append to list until no controlnet directory exists anymore
# first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained`
# second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ...
__a = pretrained_model_path
while os.path.isdir(_a ):
__a = ControlNetModel.from_pretrained(_a , **_a )
controlnets.append(_a )
idx += 1
__a = pretrained_model_path + f'''_{idx}'''
logger.info(f'''{len(_a )} controlnets loaded from {pretrained_model_path}.''' )
if len(_a ) == 0:
raise ValueError(
f'''No ControlNets found under {os.path.dirname(_a )}. Expected at least {pretrained_model_path + '_0'}.''' )
return cls(_a )
| 45 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_barthez import BarthezTokenizer
else:
UpperCAmelCase : Union[str, Any] = None
UpperCAmelCase : Union[str, Any] = logging.get_logger(__name__)
UpperCAmelCase : Union[str, Any] = {'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'}
UpperCAmelCase : List[str] = {
'vocab_file': {
'moussaKam/mbarthez': 'https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model',
'moussaKam/barthez': 'https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model',
'moussaKam/barthez-orangesum-title': (
'https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model'
),
},
'tokenizer_file': {
'moussaKam/mbarthez': 'https://huggingface.co/moussaKam/mbarthez/resolve/main/tokenizer.json',
'moussaKam/barthez': 'https://huggingface.co/moussaKam/barthez/resolve/main/tokenizer.json',
'moussaKam/barthez-orangesum-title': (
'https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/tokenizer.json'
),
},
}
UpperCAmelCase : Tuple = {
'moussaKam/mbarthez': 1024,
'moussaKam/barthez': 1024,
'moussaKam/barthez-orangesum-title': 1024,
}
UpperCAmelCase : str = '▁'
class lowerCamelCase__ ( A ):
"""simple docstring"""
__a = VOCAB_FILES_NAMES
__a = PRETRAINED_VOCAB_FILES_MAP
__a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__a = ["""input_ids""", """attention_mask"""]
__a = BarthezTokenizer
def __init__( self : Optional[int] , UpperCamelCase : List[str]=None , UpperCamelCase : List[str]=None , UpperCamelCase : Union[str, Any]="<s>" , UpperCamelCase : Any="</s>" , UpperCamelCase : Tuple="</s>" , UpperCamelCase : Tuple="<s>" , UpperCamelCase : int="<unk>" , UpperCamelCase : List[str]="<pad>" , UpperCamelCase : int="<mask>" , **UpperCamelCase : Any , ):
'''simple docstring'''
__UpperCAmelCase : Tuple = AddedToken(UpperCamelCase , lstrip=UpperCamelCase , rstrip=UpperCamelCase ) if isinstance(UpperCamelCase , UpperCamelCase ) else mask_token
super().__init__(
UpperCamelCase , tokenizer_file=UpperCamelCase , bos_token=UpperCamelCase , eos_token=UpperCamelCase , unk_token=UpperCamelCase , sep_token=UpperCamelCase , cls_token=UpperCamelCase , pad_token=UpperCamelCase , mask_token=UpperCamelCase , **UpperCamelCase , )
__UpperCAmelCase : List[Any] = vocab_file
__UpperCAmelCase : Tuple = False if not self.vocab_file else True
def lowerCamelCase__ ( self : Any , UpperCamelCase : List[int] , UpperCamelCase : Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__UpperCAmelCase : List[Any] = [self.cls_token_id]
__UpperCAmelCase : Tuple = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowerCamelCase__ ( self : Union[str, Any] , UpperCamelCase : List[int] , UpperCamelCase : Optional[List[int]] = None ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = [self.sep_token_id]
__UpperCAmelCase : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCamelCase__ ( self : Tuple , UpperCamelCase : str , UpperCamelCase : Optional[str] = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(UpperCamelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
__UpperCAmelCase : List[Any] = os.path.join(
UpperCamelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase ):
copyfile(self.vocab_file , UpperCamelCase )
return (out_vocab_file,)
| 115 | 0 |
'''simple docstring'''
import argparse
import os
import torch
from transformers import (
XLNetConfig,
XLNetForQuestionAnswering,
XLNetForSequenceClassification,
XLNetLMHeadModel,
load_tf_weights_in_xlnet,
)
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
lowercase__ = {
"cola": 2,
"mnli": 3,
"mrpc": 2,
"sst-2": 2,
"sts-b": 1,
"qqp": 2,
"qnli": 2,
"rte": 2,
"wnli": 2,
}
logging.set_verbosity_info()
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=None ) -> List[Any]:
'''simple docstring'''
snake_case : List[Any] = XLNetConfig.from_json_file(__lowerCAmelCase )
snake_case : str = finetuning_task.lower() if finetuning_task is not None else ''''''
if finetuning_task in GLUE_TASKS_NUM_LABELS:
print(F'Building PyTorch XLNetForSequenceClassification model from configuration: {config}' )
snake_case : List[str] = finetuning_task
snake_case : Any = GLUE_TASKS_NUM_LABELS[finetuning_task]
snake_case : Tuple = XLNetForSequenceClassification(__lowerCAmelCase )
elif "squad" in finetuning_task:
snake_case : List[str] = finetuning_task
snake_case : Optional[Any] = XLNetForQuestionAnswering(__lowerCAmelCase )
else:
snake_case : str = XLNetLMHeadModel(__lowerCAmelCase )
# Load weights from tf checkpoint
load_tf_weights_in_xlnet(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# Save pytorch-model
snake_case : Union[str, Any] = os.path.join(__lowerCAmelCase , __lowerCAmelCase )
snake_case : int = os.path.join(__lowerCAmelCase , __lowerCAmelCase )
print(F'Save PyTorch model to {os.path.abspath(__lowerCAmelCase )}' )
torch.save(model.state_dict() , __lowerCAmelCase )
print(F'Save configuration file to {os.path.abspath(__lowerCAmelCase )}' )
with open(__lowerCAmelCase , '''w''' , encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
lowercase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--xlnet_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained XLNet model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
required=True,
help="Path to the folder to store the PyTorch model or dataset/vocab.",
)
parser.add_argument(
"--finetuning_task",
default=None,
type=str,
help="Name of a task on which the XLNet TensorFlow model was fine-tuned",
)
lowercase__ = parser.parse_args()
print(args)
convert_xlnet_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.xlnet_config_file, args.pytorch_dump_folder_path, args.finetuning_task
)
| 350 |
'''simple docstring'''
import argparse
import torch
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_from_original_stable_diffusion_ckpt
if __name__ == "__main__":
lowercase__ = argparse.ArgumentParser()
parser.add_argument(
"--checkpoint_path", default=None, type=str, required=True, help="Path to the checkpoint to convert."
)
# !wget https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml
parser.add_argument(
"--original_config_file",
default=None,
type=str,
help="The YAML config file corresponding to the original architecture.",
)
parser.add_argument(
"--num_in_channels",
default=None,
type=int,
help="The number of input channels. If `None` number of input channels will be automatically inferred.",
)
parser.add_argument(
"--scheduler_type",
default="pndm",
type=str,
help="Type of scheduler to use. Should be one of ['pndm', 'lms', 'ddim', 'euler', 'euler-ancestral', 'dpm']",
)
parser.add_argument(
"--pipeline_type",
default=None,
type=str,
help=(
"The pipeline type. One of 'FrozenOpenCLIPEmbedder', 'FrozenCLIPEmbedder', 'PaintByExample'"
". If `None` pipeline will be automatically inferred."
),
)
parser.add_argument(
"--image_size",
default=None,
type=int,
help=(
"The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2"
" Base. Use 768 for Stable Diffusion v2."
),
)
parser.add_argument(
"--prediction_type",
default=None,
type=str,
help=(
"The prediction type that the model was trained on. Use 'epsilon' for Stable Diffusion v1.X and Stable"
" Diffusion v2 Base. Use 'v_prediction' for Stable Diffusion v2."
),
)
parser.add_argument(
"--extract_ema",
action="store_true",
help=(
"Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights"
" or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield"
" higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning."
),
)
parser.add_argument(
"--upcast_attention",
action="store_true",
help=(
"Whether the attention computation should always be upcasted. This is necessary when running stable"
" diffusion 2.1."
),
)
parser.add_argument(
"--from_safetensors",
action="store_true",
help="If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.",
)
parser.add_argument(
"--to_safetensors",
action="store_true",
help="Whether to store pipeline in safetensors format or not.",
)
parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.")
parser.add_argument("--device", type=str, help="Device to use (e.g. cpu, cuda:0, cuda:1, etc.)")
parser.add_argument(
"--stable_unclip",
type=str,
default=None,
required=False,
help="Set if this is a stable unCLIP model. One of 'txt2img' or 'img2img'.",
)
parser.add_argument(
"--stable_unclip_prior",
type=str,
default=None,
required=False,
help="Set if this is a stable unCLIP txt2img model. Selects which prior to use. If `--stable_unclip` is set to `txt2img`, the karlo prior (https://huggingface.co/kakaobrain/karlo-v1-alpha/tree/main/prior) is selected by default.",
)
parser.add_argument(
"--clip_stats_path",
type=str,
help="Path to the clip stats file. Only required if the stable unclip model's config specifies `model.params.noise_aug_config.params.clip_stats_path`.",
required=False,
)
parser.add_argument(
"--controlnet", action="store_true", default=None, help="Set flag if this is a controlnet checkpoint."
)
parser.add_argument("--half", action="store_true", help="Save weights in half precision.")
parser.add_argument(
"--vae_path",
type=str,
default=None,
required=False,
help="Set to a path, hub id to an already converted vae to not convert it again.",
)
lowercase__ = parser.parse_args()
lowercase__ = download_from_original_stable_diffusion_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
prediction_type=args.prediction_type,
model_type=args.pipeline_type,
extract_ema=args.extract_ema,
scheduler_type=args.scheduler_type,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
stable_unclip=args.stable_unclip,
stable_unclip_prior=args.stable_unclip_prior,
clip_stats_path=args.clip_stats_path,
controlnet=args.controlnet,
vae_path=args.vae_path,
)
if args.half:
pipe.to(torch_dtype=torch.floataa)
if args.controlnet:
# only save the controlnet model
pipe.controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
else:
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 83 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tensorflow_text_available, is_torch_available
_lowerCAmelCase = {
'''configuration_ernie''': ['''ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ErnieConfig''', '''ErnieOnnxConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = [
'''ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ErnieForCausalLM''',
'''ErnieForMaskedLM''',
'''ErnieForMultipleChoice''',
'''ErnieForNextSentencePrediction''',
'''ErnieForPreTraining''',
'''ErnieForQuestionAnswering''',
'''ErnieForSequenceClassification''',
'''ErnieForTokenClassification''',
'''ErnieModel''',
'''ErniePreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_ernie import ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP, ErnieConfig, ErnieOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ernie import (
ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST,
ErnieForCausalLM,
ErnieForMaskedLM,
ErnieForMultipleChoice,
ErnieForNextSentencePrediction,
ErnieForPreTraining,
ErnieForQuestionAnswering,
ErnieForSequenceClassification,
ErnieForTokenClassification,
ErnieModel,
ErniePreTrainedModel,
)
else:
import sys
_lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 37 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
a__ = logging.get_logger(__name__)
a__ = {
'''google/bit-50''': '''https://huggingface.co/google/bit-50/resolve/main/config.json''',
}
class UpperCAmelCase_ ( __lowercase , __lowercase ):
"""simple docstring"""
UpperCAmelCase__ : Any = "bit"
UpperCAmelCase__ : Optional[int] = ["preactivation", "bottleneck"]
UpperCAmelCase__ : Optional[Any] = ["SAME", "VALID"]
def __init__( self , _a=3 , _a=6_4 , _a=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] , _a=[3, 4, 6, 3] , _a="preactivation" , _a="relu" , _a=None , _a=3_2 , _a=0.0 , _a=False , _a=3_2 , _a=1 , _a=None , _a=None , **_a , ) -> Union[str, Any]:
super().__init__(**_a )
if layer_type not in self.layer_types:
raise ValueError(F"""layer_type={layer_type} is not one of {','.join(self.layer_types )}""" )
if global_padding is not None:
if global_padding.upper() in self.supported_padding:
_a : Any = global_padding.upper()
else:
raise ValueError(F"""Padding strategy {global_padding} not supported""" )
_a : Optional[int] = num_channels
_a : List[Any] = embedding_size
_a : Any = hidden_sizes
_a : int = depths
_a : Dict = layer_type
_a : int = hidden_act
_a : Optional[Any] = global_padding
_a : Optional[Any] = num_groups
_a : Union[str, Any] = drop_path_rate
_a : Tuple = embedding_dynamic_padding
_a : Union[str, Any] = output_stride
_a : Any = width_factor
_a : Any = ['''stem'''] + [F"""stage{idx}""" for idx in range(1 , len(_a ) + 1 )]
_a , _a : List[str] = get_aligned_output_features_output_indices(
out_features=_a , out_indices=_a , stage_names=self.stage_names )
| 235 | 0 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowercase = logging.get_logger(__name__)
lowercase = {
"""microsoft/swin-tiny-patch4-window7-224""": (
"""https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json"""
),
# See all Swin models at https://huggingface.co/models?filter=swin
}
class __lowercase ( __snake_case, __snake_case ):
'''simple docstring'''
_A : Optional[Any] = """swin"""
_A : int = {
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self : int , _a : str=224 , _a : Dict=4 , _a : Union[str, Any]=3 , _a : List[str]=96 , _a : int=[2, 2, 6, 2] , _a : Union[str, Any]=[3, 6, 12, 24] , _a : List[str]=7 , _a : Optional[Any]=4.0 , _a : Optional[Any]=True , _a : Union[str, Any]=0.0 , _a : str=0.0 , _a : Any=0.1 , _a : Dict="gelu" , _a : Any=False , _a : Any=0.02 , _a : Tuple=1E-5 , _a : Tuple=32 , _a : Tuple=None , _a : str=None , **_a : List[str] , ):
super().__init__(**UpperCamelCase__ )
UpperCamelCase__ = image_size
UpperCamelCase__ = patch_size
UpperCamelCase__ = num_channels
UpperCamelCase__ = embed_dim
UpperCamelCase__ = depths
UpperCamelCase__ = len(UpperCamelCase__ )
UpperCamelCase__ = num_heads
UpperCamelCase__ = window_size
UpperCamelCase__ = mlp_ratio
UpperCamelCase__ = qkv_bias
UpperCamelCase__ = hidden_dropout_prob
UpperCamelCase__ = attention_probs_dropout_prob
UpperCamelCase__ = drop_path_rate
UpperCamelCase__ = hidden_act
UpperCamelCase__ = use_absolute_embeddings
UpperCamelCase__ = layer_norm_eps
UpperCamelCase__ = initializer_range
UpperCamelCase__ = encoder_stride
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
UpperCamelCase__ = int(embed_dim * 2 ** (len(UpperCamelCase__ ) - 1) )
UpperCamelCase__ = ["stem"] + [F"""stage{idx}""" for idx in range(1 , len(UpperCamelCase__ ) + 1 )]
UpperCamelCase__ = get_aligned_output_features_output_indices(
out_features=UpperCamelCase__ , out_indices=UpperCamelCase__ , stage_names=self.stage_names )
class __lowercase ( __snake_case ):
'''simple docstring'''
_A : Optional[Any] = version.parse('''1.11''' )
@property
def A_ ( self : str ):
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def A_ ( self : List[str] ):
return 1E-4
| 369 | from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
lowercase = [
"""python""",
"""tqdm""",
"""regex""",
"""requests""",
"""packaging""",
"""filelock""",
"""numpy""",
"""tokenizers""",
"""huggingface-hub""",
"""safetensors""",
"""accelerate""",
"""pyyaml""",
]
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
elif pkg == "accelerate":
# must be loaded here, or else tqdm check may fail
from .utils import is_accelerate_available
# Maybe switch to is_torch_available in the future here so that Accelerate is hard dep of
# Transformers with PyTorch
if not is_accelerate_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(f'can\'t find {pkg} in {deps.keys()}, check dependency_versions_table.py')
def lowerCamelCase_ ( UpperCamelCase__ : Union[str, Any], UpperCamelCase__ : Dict=None ):
'''simple docstring'''
require_version(deps[pkg], UpperCamelCase__ )
| 35 | 0 |
"""simple docstring"""
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
_UpperCamelCase : Optional[int] = "2.13.1"
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse("3.7"):
raise ImportWarning(
"To use `datasets`, Python>=3.7 is required, and the current version of Python doesn't match this condition."
)
if version.parse(pyarrow.__version__).major < 8:
raise ImportWarning(
"To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn't match this condition.\n"
"If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`."
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
_UpperCamelCase : Any = concatenate_datasets
_UpperCamelCase : Dict = DownloadConfig
_UpperCamelCase : Dict = DownloadManager
_UpperCamelCase : Dict = DownloadMode
_UpperCamelCase : int = DownloadConfig
_UpperCamelCase : Union[str, Any] = DownloadMode
_UpperCamelCase : Dict = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
| 77 |
'''simple docstring'''
import os
import warnings
from typing import List, Optional
from ...tokenization_utils_base import BatchEncoding
from ...utils import logging
from .configuration_rag import RagConfig
UpperCamelCase_ = logging.get_logger(__name__)
class a_ :
def __init__( self , snake_case_ , snake_case_ ):
_lowerCAmelCase : List[str] = question_encoder
_lowerCAmelCase : Optional[Any] = generator
_lowerCAmelCase : Optional[Any] = self.question_encoder
def __UpperCamelCase ( self , snake_case_ ):
if os.path.isfile(snake_case_ ):
raise ValueError(f'Provided path ({save_directory}) should be a directory, not a file' )
os.makedirs(snake_case_ , exist_ok=snake_case_ )
_lowerCAmelCase : Any = os.path.join(snake_case_ , """question_encoder_tokenizer""" )
_lowerCAmelCase : Tuple = os.path.join(snake_case_ , """generator_tokenizer""" )
self.question_encoder.save_pretrained(snake_case_ )
self.generator.save_pretrained(snake_case_ )
@classmethod
def __UpperCamelCase ( cls , snake_case_ , **snake_case_ ):
# dynamically import AutoTokenizer
from ..auto.tokenization_auto import AutoTokenizer
_lowerCAmelCase : Dict = kwargs.pop("""config""" , snake_case_ )
if config is None:
_lowerCAmelCase : List[Any] = RagConfig.from_pretrained(snake_case_ )
_lowerCAmelCase : int = AutoTokenizer.from_pretrained(
snake_case_ , config=config.question_encoder , subfolder="""question_encoder_tokenizer""" )
_lowerCAmelCase : Union[str, Any] = AutoTokenizer.from_pretrained(
snake_case_ , config=config.generator , subfolder="""generator_tokenizer""" )
return cls(question_encoder=snake_case_ , generator=snake_case_ )
def __call__( self , *snake_case_ , **snake_case_ ):
return self.current_tokenizer(*snake_case_ , **snake_case_ )
def __UpperCamelCase ( self , *snake_case_ , **snake_case_ ):
return self.generator.batch_decode(*snake_case_ , **snake_case_ )
def __UpperCamelCase ( self , *snake_case_ , **snake_case_ ):
return self.generator.decode(*snake_case_ , **snake_case_ )
def __UpperCamelCase ( self ):
_lowerCAmelCase : str = self.question_encoder
def __UpperCamelCase ( self ):
_lowerCAmelCase : Optional[Any] = self.generator
def __UpperCamelCase ( self , snake_case_ , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = "longest" , snake_case_ = None , snake_case_ = True , **snake_case_ , ):
warnings.warn(
"""`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the """
"""regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` """
"""context manager to prepare your targets. See the documentation of your specific tokenizer for more """
"""details""" , snake_case_ , )
if max_length is None:
_lowerCAmelCase : Any = self.current_tokenizer.model_max_length
_lowerCAmelCase : List[Any] = self(
snake_case_ , add_special_tokens=snake_case_ , return_tensors=snake_case_ , max_length=snake_case_ , padding=snake_case_ , truncation=snake_case_ , **snake_case_ , )
if tgt_texts is None:
return model_inputs
# Process tgt_texts
if max_target_length is None:
_lowerCAmelCase : List[str] = self.current_tokenizer.model_max_length
_lowerCAmelCase : List[str] = self(
text_target=snake_case_ , add_special_tokens=snake_case_ , return_tensors=snake_case_ , padding=snake_case_ , max_length=snake_case_ , truncation=snake_case_ , **snake_case_ , )
_lowerCAmelCase : Dict = labels["""input_ids"""]
return model_inputs
| 309 | 0 |
import os
import re
import sys
import traceback
import warnings
from pathlib import Path
from typing import Dict, Optional, Union
from uuid import uuida
from huggingface_hub import HfFolder, ModelCard, ModelCardData, hf_hub_download, whoami
from huggingface_hub.file_download import REGEX_COMMIT_HASH
from huggingface_hub.utils import (
EntryNotFoundError,
RepositoryNotFoundError,
RevisionNotFoundError,
is_jinja_available,
)
from packaging import version
from requests import HTTPError
from .. import __version__
from .constants import (
DEPRECATED_REVISION_ARGS,
DIFFUSERS_CACHE,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
SAFETENSORS_WEIGHTS_NAME,
WEIGHTS_NAME,
)
from .import_utils import (
ENV_VARS_TRUE_VALUES,
_flax_version,
_jax_version,
_onnxruntime_version,
_torch_version,
is_flax_available,
is_onnx_available,
is_torch_available,
)
from .logging import get_logger
A__ = get_logger(__name__)
A__ = Path(__file__).parent / '''model_card_template.md'''
A__ = uuida().hex
A__ = os.getenv('''HF_HUB_OFFLINE''', '''''').upper() in ENV_VARS_TRUE_VALUES
A__ = os.getenv('''DISABLE_TELEMETRY''', '''''').upper() in ENV_VARS_TRUE_VALUES
A__ = HUGGINGFACE_CO_RESOLVE_ENDPOINT + '''/api/telemetry/'''
def _lowerCAmelCase ( __lowerCAmelCase = None ) -> str:
"""simple docstring"""
snake_case__ : List[Any] = f"""diffusers/{__version__}; python/{sys.version.split()[0]}; session_id/{SESSION_ID}"""
if DISABLE_TELEMETRY or HF_HUB_OFFLINE:
return ua + "; telemetry/off"
if is_torch_available():
ua += f"""; torch/{_torch_version}"""
if is_flax_available():
ua += f"""; jax/{_jax_version}"""
ua += f"""; flax/{_flax_version}"""
if is_onnx_available():
ua += f"""; onnxruntime/{_onnxruntime_version}"""
# CI will set this value to True
if os.environ.get('''DIFFUSERS_IS_CI''' , '''''' ).upper() in ENV_VARS_TRUE_VALUES:
ua += "; is_ci/true"
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
ua += "; " + "; ".join(f"""{k}/{v}""" for k, v in user_agent.items() )
elif isinstance(__lowerCAmelCase , __lowerCAmelCase ):
ua += "; " + user_agent
return ua
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = None ) -> Tuple:
"""simple docstring"""
if token is None:
snake_case__ : Optional[Any] = HfFolder.get_token()
if organization is None:
snake_case__ : Tuple = whoami(__lowerCAmelCase )['''name''']
return f"""{username}/{model_id}"""
else:
return f"""{organization}/{model_id}"""
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase ) -> Dict:
"""simple docstring"""
if not is_jinja_available():
raise ValueError(
'''Modelcard rendering is based on Jinja templates.'''
''' Please make sure to have `jinja` installed before using `create_model_card`.'''
''' To install it, please run `pip install Jinja2`.''' )
if hasattr(__lowerCAmelCase , '''local_rank''' ) and args.local_rank not in [-1, 0]:
return
snake_case__ : str = args.hub_token if hasattr(__lowerCAmelCase , '''hub_token''' ) else None
snake_case__ : List[str] = get_full_repo_name(__lowerCAmelCase , token=__lowerCAmelCase )
snake_case__ : str = ModelCard.from_template(
card_data=ModelCardData( # Card metadata object that will be converted to YAML block
language='''en''' , license='''apache-2.0''' , library_name='''diffusers''' , tags=[] , datasets=args.dataset_name , metrics=[] , ) , template_path=__lowerCAmelCase , model_name=__lowerCAmelCase , repo_name=__lowerCAmelCase , dataset_name=args.dataset_name if hasattr(__lowerCAmelCase , '''dataset_name''' ) else None , learning_rate=args.learning_rate , train_batch_size=args.train_batch_size , eval_batch_size=args.eval_batch_size , gradient_accumulation_steps=(
args.gradient_accumulation_steps if hasattr(__lowerCAmelCase , '''gradient_accumulation_steps''' ) else None
) , adam_betaa=args.adam_betaa if hasattr(__lowerCAmelCase , '''adam_beta1''' ) else None , adam_betaa=args.adam_betaa if hasattr(__lowerCAmelCase , '''adam_beta2''' ) else None , adam_weight_decay=args.adam_weight_decay if hasattr(__lowerCAmelCase , '''adam_weight_decay''' ) else None , adam_epsilon=args.adam_epsilon if hasattr(__lowerCAmelCase , '''adam_epsilon''' ) else None , lr_scheduler=args.lr_scheduler if hasattr(__lowerCAmelCase , '''lr_scheduler''' ) else None , lr_warmup_steps=args.lr_warmup_steps if hasattr(__lowerCAmelCase , '''lr_warmup_steps''' ) else None , ema_inv_gamma=args.ema_inv_gamma if hasattr(__lowerCAmelCase , '''ema_inv_gamma''' ) else None , ema_power=args.ema_power if hasattr(__lowerCAmelCase , '''ema_power''' ) else None , ema_max_decay=args.ema_max_decay if hasattr(__lowerCAmelCase , '''ema_max_decay''' ) else None , mixed_precision=args.mixed_precision , )
snake_case__ : str = os.path.join(args.output_dir , '''README.md''' )
model_card.save(__lowerCAmelCase )
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase = None ) -> str:
"""simple docstring"""
if resolved_file is None or commit_hash is not None:
return commit_hash
snake_case__ : List[str] = str(Path(__lowerCAmelCase ).as_posix() )
snake_case__ : Dict = re.search(r'''snapshots/([^/]+)/''' , __lowerCAmelCase )
if search is None:
return None
snake_case__ : str = search.groups()[0]
return commit_hash if REGEX_COMMIT_HASH.match(__lowerCAmelCase ) else None
# Old default cache path, potentially to be migrated.
# This logic was more or less taken from `transformers`, with the following differences:
# - Diffusers doesn't use custom environment variables to specify the cache path.
# - There is no need to migrate the cache format, just move the files to the new location.
A__ = os.path.expanduser(
os.getenv('''HF_HOME''', os.path.join(os.getenv('''XDG_CACHE_HOME''', '''~/.cache'''), '''huggingface'''))
)
A__ = os.path.join(hf_cache_home, '''diffusers''')
def _lowerCAmelCase ( __lowerCAmelCase = None , __lowerCAmelCase = None ) -> None:
"""simple docstring"""
if new_cache_dir is None:
snake_case__ : Union[str, Any] = DIFFUSERS_CACHE
if old_cache_dir is None:
snake_case__ : int = old_diffusers_cache
snake_case__ : int = Path(__lowerCAmelCase ).expanduser()
snake_case__ : List[Any] = Path(__lowerCAmelCase ).expanduser()
for old_blob_path in old_cache_dir.glob('''**/blobs/*''' ):
if old_blob_path.is_file() and not old_blob_path.is_symlink():
snake_case__ : Dict = new_cache_dir / old_blob_path.relative_to(__lowerCAmelCase )
new_blob_path.parent.mkdir(parents=__lowerCAmelCase , exist_ok=__lowerCAmelCase )
os.replace(__lowerCAmelCase , __lowerCAmelCase )
try:
os.symlink(__lowerCAmelCase , __lowerCAmelCase )
except OSError:
logger.warning(
'''Could not create symlink between old cache and new cache. If you use an older version of diffusers again, files will be re-downloaded.''' )
# At this point, old_cache_dir contains symlinks to the new cache (it can still be used).
A__ = os.path.join(DIFFUSERS_CACHE, '''version_diffusers_cache.txt''')
if not os.path.isfile(cache_version_file):
A__ = 0
else:
with open(cache_version_file) as f:
try:
A__ = int(f.read())
except ValueError:
A__ = 0
if cache_version < 1:
A__ = os.path.isdir(old_diffusers_cache) and len(os.listdir(old_diffusers_cache)) > 0
if old_cache_is_not_empty:
logger.warning(
'''The cache for model files in Diffusers v0.14.0 has moved to a new location. Moving your '''
'''existing cached models. This is a one-time operation, you can interrupt it or run it '''
'''later by calling `diffusers.utils.hub_utils.move_cache()`.'''
)
try:
move_cache()
except Exception as e:
A__ = '''\n'''.join(traceback.format_tb(e.__traceback__))
logger.error(
f"""There was a problem when trying to move your cache:\n\n{trace}\n{e.__class__.__name__}: {e}\n\nPlease """
'''file an issue at https://github.com/huggingface/diffusers/issues/new/choose, copy paste this whole '''
'''message and we will do our best to help.'''
)
if cache_version < 1:
try:
os.makedirs(DIFFUSERS_CACHE, exist_ok=True)
with open(cache_version_file, '''w''') as f:
f.write('''1''')
except Exception:
logger.warning(
f"""There was a problem when trying to write in your cache folder ({DIFFUSERS_CACHE}). Please, ensure """
'''the directory exists and can be written to.'''
)
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase = None ) -> str:
"""simple docstring"""
if variant is not None:
snake_case__ : Any = weights_name.split('''.''' )
snake_case__ : int = splits[:-1] + [variant] + splits[-1:]
snake_case__ : List[str] = '''.'''.join(__lowerCAmelCase )
return weights_name
def _lowerCAmelCase ( __lowerCAmelCase , *,
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None , ) -> List[Any]:
"""simple docstring"""
snake_case__ : int = str(__lowerCAmelCase )
if os.path.isfile(__lowerCAmelCase ):
return pretrained_model_name_or_path
elif os.path.isdir(__lowerCAmelCase ):
if os.path.isfile(os.path.join(__lowerCAmelCase , __lowerCAmelCase ) ):
# Load from a PyTorch checkpoint
snake_case__ : List[Any] = os.path.join(__lowerCAmelCase , __lowerCAmelCase )
return model_file
elif subfolder is not None and os.path.isfile(
os.path.join(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) ):
snake_case__ : str = os.path.join(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
return model_file
else:
raise EnvironmentError(
f"""Error no file named {weights_name} found in directory {pretrained_model_name_or_path}.""" )
else:
# 1. First check if deprecated way of loading from branches is used
if (
revision in DEPRECATED_REVISION_ARGS
and (weights_name == WEIGHTS_NAME or weights_name == SAFETENSORS_WEIGHTS_NAME)
and version.parse(version.parse(__lowerCAmelCase ).base_version ) >= version.parse('''0.20.0''' )
):
try:
snake_case__ : str = hf_hub_download(
__lowerCAmelCase , filename=_add_variant(__lowerCAmelCase , __lowerCAmelCase ) , cache_dir=__lowerCAmelCase , force_download=__lowerCAmelCase , proxies=__lowerCAmelCase , resume_download=__lowerCAmelCase , local_files_only=__lowerCAmelCase , use_auth_token=__lowerCAmelCase , user_agent=__lowerCAmelCase , subfolder=__lowerCAmelCase , revision=revision or commit_hash , )
warnings.warn(
f"""Loading the variant {revision} from {pretrained_model_name_or_path} via `revision='{revision}'` is deprecated. Loading instead from `revision='main'` with `variant={revision}`. Loading model variants via `revision='{revision}'` will be removed in diffusers v1. Please use `variant='{revision}'` instead.""" , __lowerCAmelCase , )
return model_file
except: # noqa: E722
warnings.warn(
f"""You are loading the variant {revision} from {pretrained_model_name_or_path} via `revision='{revision}'`. This behavior is deprecated and will be removed in diffusers v1. One should use `variant='{revision}'` instead. However, it appears that {pretrained_model_name_or_path} currently does not have a {_add_variant(__lowerCAmelCase , __lowerCAmelCase )} file in the 'main' branch of {pretrained_model_name_or_path}. \n The Diffusers team and community would be very grateful if you could open an issue: https://github.com/huggingface/diffusers/issues/new with the title '{pretrained_model_name_or_path} is missing {_add_variant(__lowerCAmelCase , __lowerCAmelCase )}' so that the correct variant file can be added.""" , __lowerCAmelCase , )
try:
# 2. Load model file as usual
snake_case__ : Dict = hf_hub_download(
__lowerCAmelCase , filename=__lowerCAmelCase , cache_dir=__lowerCAmelCase , force_download=__lowerCAmelCase , proxies=__lowerCAmelCase , resume_download=__lowerCAmelCase , local_files_only=__lowerCAmelCase , use_auth_token=__lowerCAmelCase , user_agent=__lowerCAmelCase , subfolder=__lowerCAmelCase , revision=revision or commit_hash , )
return model_file
except RepositoryNotFoundError:
raise EnvironmentError(
f"""{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier """
'''listed on \'https://huggingface.co/models\'\nIf this is a private repository, make sure to pass a '''
'''token having permission to this repo with `use_auth_token` or log in with `huggingface-cli '''
'''login`.''' )
except RevisionNotFoundError:
raise EnvironmentError(
f"""{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for """
'''this model name. Check the model page at '''
f"""'https://huggingface.co/{pretrained_model_name_or_path}' for available revisions.""" )
except EntryNotFoundError:
raise EnvironmentError(
f"""{pretrained_model_name_or_path} does not appear to have a file named {weights_name}.""" )
except HTTPError as err:
raise EnvironmentError(
f"""There was a specific connection error when trying to load {pretrained_model_name_or_path}:\n{err}""" )
except ValueError:
raise EnvironmentError(
f"""We couldn't connect to '{HUGGINGFACE_CO_RESOLVE_ENDPOINT}' to load this model, couldn't find it"""
f""" in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a"""
f""" directory containing a file named {weights_name} or"""
''' \nCheckout your internet connection or see how to run the library in'''
''' offline mode at \'https://huggingface.co/docs/diffusers/installation#offline-mode\'.''' )
except EnvironmentError:
raise EnvironmentError(
f"""Can't load the model for '{pretrained_model_name_or_path}'. If you were trying to load it from """
'''\'https://huggingface.co/models\', make sure you don\'t have a local directory with the same name. '''
f"""Otherwise, make sure '{pretrained_model_name_or_path}' is the correct path to a directory """
f"""containing a file named {weights_name}""" )
| 355 |
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import MaMaaaTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from transformers.utils import is_sentencepiece_available
if is_sentencepiece_available():
from transformers.models.mam_aaa.tokenization_mam_aaa import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
if is_sentencepiece_available():
A__ = get_tests_dir('''fixtures/test_sentencepiece.model''')
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
A__ = 12_8022
A__ = 12_8028
@require_sentencepiece
class a ( __lowerCamelCase , unittest.TestCase ):
__lowerCAmelCase : Optional[int] = MaMaaaTokenizer
__lowerCAmelCase : Tuple = False
__lowerCAmelCase : Any = False
__lowerCAmelCase : Union[str, Any] = True
def __lowerCamelCase ( self :int ):
super().setUp()
snake_case__ : Tuple = ['''</s>''', '''<unk>''', '''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est''', '''\u0120''', '''<pad>''']
snake_case__ : Optional[Any] = dict(zip(__lowercase ,range(len(__lowercase ) ) ) )
snake_case__ : List[Any] = Path(self.tmpdirname )
save_json(__lowercase ,save_dir / VOCAB_FILES_NAMES['''vocab_file'''] )
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(__lowercase ,save_dir / VOCAB_FILES_NAMES['''spm_file'''] )
snake_case__ : str = MaMaaaTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def __lowerCamelCase ( self :Optional[int] ,**__lowercase :Optional[int] ):
return MaMaaaTokenizer.from_pretrained(self.tmpdirname ,**__lowercase )
def __lowerCamelCase ( self :Union[str, Any] ,__lowercase :Tuple ):
return (
"This is a test",
"This is a test",
)
def __lowerCamelCase ( self :Tuple ):
snake_case__ : Tuple = '''</s>'''
snake_case__ : List[Any] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__lowercase ) ,__lowercase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__lowercase ) ,__lowercase )
def __lowerCamelCase ( self :Union[str, Any] ):
snake_case__ : Dict = self.get_tokenizer()
snake_case__ : Union[str, Any] = list(tokenizer.get_vocab().keys() )
self.assertEqual(vocab_keys[0] ,'''</s>''' )
self.assertEqual(vocab_keys[1] ,'''<unk>''' )
self.assertEqual(vocab_keys[-1] ,'''<s>''' )
self.assertEqual(len(__lowercase ) ,tokenizer.vocab_size + len(tokenizer.get_added_vocab() ) )
@unittest.skip('''Skip this test while all models are still to be uploaded.''' )
def __lowerCamelCase ( self :List[Any] ):
pass
def __lowerCamelCase ( self :Optional[Any] ):
snake_case__ : List[Any] = self.get_tokenizer()
snake_case__ : Optional[int] = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(__lowercase ,['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__lowercase ) ,[2, 3, 4, 5, 6] ,)
snake_case__ : str = tokenizer.convert_ids_to_tokens([2, 3, 4, 5, 6] )
self.assertListEqual(__lowercase ,['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
snake_case__ : Optional[int] = tokenizer.convert_tokens_to_string(__lowercase )
self.assertEqual(__lowercase ,'''This is a test''' )
@slow
def __lowerCamelCase ( self :Union[str, Any] ):
# fmt: off
snake_case__ : Tuple = {'''input_ids''': [[1_2_8_0_2_2, 1_1_0_1_0_8, 3_9_7, 1_1, 3_8_2_7_2, 2_2_4_7, 1_2_4_8_1_1, 2_8_5, 1_8_1_0_5, 1_5_8_6, 2_0_7, 7, 3_9_5_3_4, 4_4_2_8, 3_9_7, 1_0_1_9, 1_8_1_0_5, 1_5_8_6, 2_0_7, 7, 4_1_3_3_7, 1_6_7_8_6, 2_4_1, 7, 2_0_2_1_4, 1_7, 1_2_5_6_9_0, 1_0_3_9_8, 7, 4_4_3_7_8, 5_8_0_6_9, 6_8_3_4_2, 7_7_9_8, 7_3_4_3, 1_1, 2_9_9, 3_3_3_1_0, 4, 1_5_8, 3_7_3_5_0, 9_4_0_7_7, 4_5_6_9, 2_9_9, 3_3_3_1_0, 9_0, 4, 5_2_8_4_0, 2_9_0, 4, 3_1_2_7_0, 1_1_2, 2_9_9, 6_8_2, 4, 5_2_8_4_0, 3_9_9_5_3, 1_4_0_7_9, 1_9_3, 5_2_5_1_9, 9_0_8_9_4, 1_7_8_9_4, 1_2_0_6_9_7, 1_1, 4_0_4_4_5, 5_5_1, 1_7, 1_0_1_9, 5_2_5_1_9, 9_0_8_9_4, 1_7_7_5_6, 9_6_3, 1_1, 4_0_4_4_5, 4_8_0, 1_7, 9_7_9_2, 1_1_2_0, 5_1_7_3, 1_3_9_3, 6_2_4_0, 1_6_7_8_6, 2_4_1, 1_2_0_9_9_6, 2_8, 1_2_4_5, 1_3_9_3, 1_1_8_2_4_0, 1_1_1_2_3, 1_0_1_9, 9_3_6_1_2, 2_6_9_1, 1_0_6_1_8, 9_8_0_5_8, 1_2_0_4_0_9, 1_9_2_8, 2_7_9, 4, 4_0_6_8_3, 3_6_7, 1_7_8, 2_0_7, 1_0_1_9, 1_0_3, 1_0_3_1_2_1, 5_0_6, 6_5_2_9_6, 5, 2], [1_2_8_0_2_2, 2_1_2_1_7, 3_6_7, 1_1_7, 1_2_5_4_5_0, 1_2_8, 7_1_9, 7, 7_3_0_8, 4_0, 9_3_6_1_2, 1_2_6_6_9, 1_1_1_6, 1_6_7_0_4, 7_1, 1_7_7_8_5, 3_6_9_9, 1_5_5_9_2, 3_5, 1_4_4, 9_5_8_4, 2_4_1, 1_1_9_4_3, 7_1_3, 9_5_0, 7_9_9, 2_2_4_7, 8_8_4_2_7, 1_5_0, 1_4_9, 1_1_8_8_1_3, 1_2_0_7_0_6, 1_0_1_9, 1_0_6_9_0_6, 8_1_5_1_8, 2_8, 1_2_2_4, 2_2_7_9_9, 3_9_7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1_2_8_0_2_2, 1_6_5_8, 1_2_3_3_1_1, 5_1_5_5, 5_5_7_8, 4_7_2_2, 2_7_9, 1_4_9_4_7, 2_3_6_6, 1_1_2_0, 1_1_9_7, 1_4, 1_3_4_8, 9_2_3_2, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__lowercase ,model_name='''facebook/m2m100_418M''' ,revision='''c168bae485c864188cf9aa0e4108b0b6934dc91e''' ,)
@require_torch
@require_sentencepiece
@require_tokenizers
class a ( unittest.TestCase ):
__lowerCAmelCase : Union[str, Any] = """facebook/m2m100_418M"""
__lowerCAmelCase : Union[str, Any] = [
"""In my opinion, there are two levels of response from the French government.""",
"""NSA Affair Emphasizes Complete Lack of Debate on Intelligence""",
]
__lowerCAmelCase : Optional[Any] = [
"""Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.""",
"""L'affaire NSA souligne l'absence totale de débat sur le renseignement""",
]
# fmt: off
__lowerCAmelCase : Dict = [EN_CODE, 5_93, 19_49, 11_57_81, 4, 7_15_86, 42_34, 6_06_33, 12_62_33, 4_32, 12_38_08, 1_55_92, 11_97, 11_71_32, 12_06_18, 5, 2]
@classmethod
def __lowerCamelCase ( cls :Union[str, Any] ):
snake_case__ : MaMaaaTokenizer = MaMaaaTokenizer.from_pretrained(
cls.checkpoint_name ,src_lang='''en''' ,tgt_lang='''fr''' )
snake_case__ : Union[str, Any] = 1
return cls
def __lowerCamelCase ( self :Tuple ):
self.assertEqual(self.tokenizer.get_lang_id('''ar''' ) ,1_2_8_0_0_6 )
self.assertEqual(self.tokenizer.get_lang_id('''en''' ) ,1_2_8_0_2_2 )
self.assertEqual(self.tokenizer.get_lang_id('''ro''' ) ,1_2_8_0_7_6 )
self.assertEqual(self.tokenizer.get_lang_id('''mr''' ) ,1_2_8_0_6_3 )
def __lowerCamelCase ( self :Any ):
snake_case__ : Optional[int] = self.tokenizer.get_vocab()
self.assertEqual(len(__lowercase ) ,self.tokenizer.vocab_size )
self.assertEqual(vocab['''<unk>'''] ,3 )
self.assertIn(self.tokenizer.get_lang_token('''en''' ) ,__lowercase )
def __lowerCamelCase ( self :Optional[Any] ):
snake_case__ : Optional[int] = '''en'''
snake_case__ : Any = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens ,__lowercase )
def __lowerCamelCase ( self :List[Any] ):
self.assertIn(__lowercase ,self.tokenizer.all_special_ids )
# fmt: off
snake_case__ : int = [FR_CODE, 5_3_6_4, 8_2, 8_6_4_2, 4, 2_9_4, 4_7, 8, 1_4_0_2_8, 1_3_6, 3_2_8_6, 9_7_0_6, 6, 9_0_7_9_7, 6, 1_4_4_0_1_2, 1_6_2, 8_8_1_2_8, 3_0_0_6_1, 5, 2]
# fmt: on
snake_case__ : Tuple = self.tokenizer.decode(__lowercase ,skip_special_tokens=__lowercase )
snake_case__ : Optional[int] = self.tokenizer.decode(generated_ids[1:] ,skip_special_tokens=__lowercase )
self.assertEqual(__lowercase ,__lowercase )
self.assertNotIn(self.tokenizer.eos_token ,__lowercase )
def __lowerCamelCase ( self :Any ):
snake_case__ : List[Any] = tempfile.mkdtemp()
snake_case__ : List[Any] = self.tokenizer.lang_token_to_id
self.tokenizer.save_pretrained(__lowercase )
snake_case__ : Any = MaMaaaTokenizer.from_pretrained(__lowercase )
self.assertDictEqual(new_tok.lang_token_to_id ,__lowercase )
@require_torch
def __lowerCamelCase ( self :str ):
snake_case__ : Dict = '''en'''
snake_case__ : List[Any] = '''fr'''
snake_case__ : Union[str, Any] = self.tokenizer(self.src_text ,text_target=self.tgt_text ,padding=__lowercase ,return_tensors='''pt''' )
snake_case__ : Optional[int] = shift_tokens_right(
batch['''labels'''] ,self.tokenizer.pad_token_id ,self.tokenizer.eos_token_id )
for k in batch:
snake_case__ : Optional[int] = batch[k].tolist()
# batch = {k: v.tolist() for k,v in batch.items()}
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
# batch.decoder_inputs_ids[0][0] ==
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == FR_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2] == [2, FR_CODE]
@require_torch
def __lowerCamelCase ( self :Optional[int] ):
snake_case__ : Optional[Any] = '''mr'''
self.assertListEqual(self.tokenizer.prefix_tokens ,[self.tokenizer.get_lang_id('''mr''' )] )
self.assertListEqual(self.tokenizer.suffix_tokens ,[self.tokenizer.eos_token_id] )
snake_case__ : Any = '''zh'''
self.assertListEqual(self.tokenizer.prefix_tokens ,[self.tokenizer.get_lang_id('''zh''' )] )
self.assertListEqual(self.tokenizer.suffix_tokens ,[self.tokenizer.eos_token_id] )
@require_torch
def __lowerCamelCase ( self :Tuple ):
snake_case__ : Union[str, Any] = '''mr'''
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens ,[self.tokenizer.get_lang_id('''mr''' )] )
self.assertListEqual(self.tokenizer.suffix_tokens ,[self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens ,[self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
snake_case__ : List[str] = '''zh'''
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens ,[self.tokenizer.get_lang_id('''zh''' )] )
self.assertListEqual(self.tokenizer.suffix_tokens ,[self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens ,[self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
@require_torch
def __lowerCamelCase ( self :Tuple ):
snake_case__ : str = self.tokenizer._build_translation_inputs('''A test''' ,return_tensors='''pt''' ,src_lang='''en''' ,tgt_lang='''ar''' )
self.assertEqual(
nested_simplify(__lowercase ) ,{
# en_XX, A, test, EOS
'''input_ids''': [[1_2_8_0_2_2, 5_8, 4_1_8_3, 2]],
'''attention_mask''': [[1, 1, 1, 1]],
# ar_AR
'''forced_bos_token_id''': 1_2_8_0_0_6,
} ,)
| 44 | 0 |
def __magic_name__ ( __a : Dict ):
'''simple docstring'''
if num <= 0:
raise ValueError("""Input must be a positive integer""" )
UpperCamelCase__ = [True] * (num + 1)
UpperCamelCase__ = 2
while p * p <= num:
if primes[p]:
for i in range(p * p , num + 1 , __a ):
UpperCamelCase__ = False
p += 1
return [prime for prime in range(2 , num + 1 ) if primes[prime]]
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase_ = int(input('''Enter a positive integer: ''').strip())
print(prime_sieve_eratosthenes(user_num))
| 244 | '''simple docstring'''
import logging
import numpy as np
import pytest
from scipy.linalg import eigh
logging.basicConfig(level=logging.INFO, format="""%(message)s""")
def __UpperCamelCase ( UpperCAmelCase ):
return input_array.reshape((input_array.size, 1) )
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
lowercase__ : Dict = np.nan
for i in range(UpperCAmelCase ):
lowercase__ : Optional[Any] = features[:, labels == i]
lowercase__ : Optional[Any] = data.mean(1 )
# Centralize the data of class i
lowercase__ : Dict = data - column_reshape(UpperCAmelCase )
if i > 0:
# If covariance_sum is not None
covariance_sum += np.dot(UpperCAmelCase , centered_data.T )
else:
# If covariance_sum is np.nan (i.e. first loop)
lowercase__ : List[str] = np.dot(UpperCAmelCase , centered_data.T )
return covariance_sum / features.shape[1]
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
lowercase__ : Tuple = features.mean(1 )
lowercase__ : Dict = np.nan
for i in range(UpperCAmelCase ):
lowercase__ : List[str] = features[:, labels == i]
lowercase__ : int = data.shape[1]
lowercase__ : Optional[int] = data.mean(1 )
if i > 0:
# If covariance_sum is not None
covariance_sum += device_data * np.dot(
column_reshape(UpperCAmelCase ) - column_reshape(UpperCAmelCase ) , (column_reshape(UpperCAmelCase ) - column_reshape(UpperCAmelCase )).T , )
else:
# If covariance_sum is np.nan (i.e. first loop)
lowercase__ : Optional[int] = device_data * np.dot(
column_reshape(UpperCAmelCase ) - column_reshape(UpperCAmelCase ) , (column_reshape(UpperCAmelCase ) - column_reshape(UpperCAmelCase )).T , )
return covariance_sum / features.shape[1]
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ):
# Check if the features have been loaded
if features.any():
lowercase__ : Optional[Any] = features.mean(1 )
# Center the dataset
lowercase__ : List[str] = features - np.reshape(UpperCAmelCase , (data_mean.size, 1) )
lowercase__ : Optional[Any] = np.dot(UpperCAmelCase , centered_data.T ) / features.shape[1]
lowercase__ , lowercase__ : Tuple = np.linalg.eigh(UpperCAmelCase )
# Take all the columns in the reverse order (-1), and then takes only the first
lowercase__ : str = eigenvectors[:, ::-1][:, 0:dimensions]
# Project the database on the new space
lowercase__ : Tuple = np.dot(filtered_eigenvectors.T , UpperCAmelCase )
logging.info('''Principal Component Analysis computed''' )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format='''%(message)s''' , force=UpperCAmelCase )
logging.error('''Dataset empty''' )
raise AssertionError
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
assert classes > dimensions
# Check if features have been already loaded
if features.any:
lowercase__ , lowercase__ : Any = eigh(
covariance_between_classes(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) , covariance_within_classes(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) , )
lowercase__ : Optional[int] = eigenvectors[:, ::-1][:, :dimensions]
lowercase__ , lowercase__ , lowercase__ : Optional[int] = np.linalg.svd(UpperCAmelCase )
lowercase__ : List[str] = svd_matrix[:, 0:dimensions]
lowercase__ : str = np.dot(filtered_svd_matrix.T , UpperCAmelCase )
logging.info('''Linear Discriminant Analysis computed''' )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format='''%(message)s''' , force=UpperCAmelCase )
logging.error('''Dataset empty''' )
raise AssertionError
def __UpperCamelCase ( ):
# Create dummy dataset with 2 classes and 3 features
lowercase__ : List[str] = np.array([[1, 2, 3, 4, 5], [2, 3, 4, 5, 6], [3, 4, 5, 6, 7]] )
lowercase__ : Optional[Any] = np.array([0, 0, 0, 1, 1] )
lowercase__ : str = 2
lowercase__ : Dict = 2
# Assert that the function raises an AssertionError if dimensions > classes
with pytest.raises(UpperCAmelCase ) as error_info:
lowercase__ : int = linear_discriminant_analysis(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
if isinstance(UpperCAmelCase , np.ndarray ):
raise AssertionError(
'''Did not raise AssertionError for dimensions > classes''' )
assert error_info.type is AssertionError
def __UpperCamelCase ( ):
lowercase__ : Optional[int] = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]] )
lowercase__ : int = 2
lowercase__ : Any = np.array([[6.9_2_8_2_0_3_2_3, 8.6_6_0_2_5_4_0_4, 1_0.3_9_2_3_0_4_8_5], [3.0, 3.0, 3.0]] )
with pytest.raises(UpperCAmelCase ) as error_info:
lowercase__ : Dict = principal_component_analysis(UpperCAmelCase , UpperCAmelCase )
if not np.allclose(UpperCAmelCase , UpperCAmelCase ):
raise AssertionError
assert error_info.type is AssertionError
if __name__ == "__main__":
import doctest
doctest.testmod()
| 198 | 0 |
'''simple docstring'''
from collections import deque
def __a ( _UpperCamelCase: List[Any] ) -> List[Any]:
"""simple docstring"""
_snake_case = len(_UpperCamelCase )
_snake_case = deque()
_snake_case = [False for _ in range(_UpperCamelCase )]
_snake_case = [-1 for _ in range(_UpperCamelCase )]
_snake_case = index_of[:]
def strong_connect(_UpperCamelCase: Union[str, Any] , _UpperCamelCase: Union[str, Any] , _UpperCamelCase: Tuple ):
_snake_case = index # the number when this node is seen
_snake_case = index # lowest rank node reachable from here
index += 1
stack.append(_UpperCamelCase )
_snake_case = True
for w in g[v]:
if index_of[w] == -1:
_snake_case = strong_connect(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
_snake_case = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
elif on_stack[w]:
_snake_case = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
if lowlink_of[v] == index_of[v]:
_snake_case = []
_snake_case = stack.pop()
_snake_case = False
component.append(_UpperCamelCase )
while w != v:
_snake_case = stack.pop()
_snake_case = False
component.append(_UpperCamelCase )
components.append(_UpperCamelCase )
return index
_snake_case = []
for v in range(_UpperCamelCase ):
if index_of[v] == -1:
strong_connect(_UpperCamelCase , 0 , _UpperCamelCase )
return components
def __a ( _UpperCamelCase: List[Any] , _UpperCamelCase: Dict ) -> List[Any]:
"""simple docstring"""
_snake_case = [[] for _ in range(_UpperCamelCase )]
for u, v in edges:
g[u].append(_UpperCamelCase )
return g
if __name__ == "__main__":
# Test
UpperCamelCase_ : Union[str, Any] = 7
UpperCamelCase_ : Optional[Any] = [0, 0, 1, 2, 3, 3, 4, 4, 6]
UpperCamelCase_ : Union[str, Any] = [1, 3, 2, 0, 1, 4, 5, 6, 5]
UpperCamelCase_ : int = [(u, v) for u, v in zip(source, target)]
UpperCamelCase_ : Any = create_graph(n_vertices, edges)
assert [[5], [6], [4], [3, 2, 1, 0]] == tarjan(g)
| 364 |
'''simple docstring'''
def __a ( _UpperCamelCase: int ) -> str:
"""simple docstring"""
if number > 0:
raise ValueError("input must be a negative integer" )
_snake_case = len(bin(_UpperCamelCase )[3:] )
_snake_case = bin(abs(_UpperCamelCase ) - (1 << binary_number_length) )[3:]
_snake_case = (
(
"1"
+ "0" * (binary_number_length - len(_UpperCamelCase ))
+ twos_complement_number
)
if number < 0
else "0"
)
return "0b" + twos_complement_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 142 | 0 |
"""simple docstring"""
import unittest
import torch
from torch import nn
from diffusers.models.activations import get_activation
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self ):
__a = get_activation('''swish''' )
self.assertIsInstance(_a , nn.SiLU )
self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def __UpperCAmelCase ( self ):
__a = get_activation('''silu''' )
self.assertIsInstance(_a , nn.SiLU )
self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def __UpperCAmelCase ( self ):
__a = get_activation('''mish''' )
self.assertIsInstance(_a , nn.Mish )
self.assertEqual(act(torch.tensor(-200 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def __UpperCAmelCase ( self ):
__a = get_activation('''gelu''' )
self.assertIsInstance(_a , nn.GELU )
self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
| 45 |
"""simple docstring"""
import gc
import unittest
from transformers import CTRLConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
)
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self , _a , _a=14 , _a=7 , _a=True , _a=True , _a=True , _a=True , _a=True , _a=99 , _a=32 , _a=5 , _a=4 , _a=37 , _a="gelu" , _a=0.1 , _a=0.1 , _a=512 , _a=16 , _a=2 , _a=0.02 , _a=3 , _a=4 , _a=None , ):
__a = parent
__a = batch_size
__a = seq_length
__a = is_training
__a = use_token_type_ids
__a = use_input_mask
__a = use_labels
__a = use_mc_token_ids
__a = vocab_size
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = intermediate_size
__a = hidden_act
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = max_position_embeddings
__a = type_vocab_size
__a = type_sequence_label_size
__a = initializer_range
__a = num_labels
__a = num_choices
__a = scope
__a = self.vocab_size - 1
def __UpperCAmelCase ( self ):
__a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__a = None
if self.use_input_mask:
__a = random_attention_mask([self.batch_size, self.seq_length] )
__a = None
if self.use_token_type_ids:
__a = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__a = None
if self.use_mc_token_ids:
__a = ids_tensor([self.batch_size, self.num_choices] , self.seq_length )
__a = None
__a = None
__a = None
if self.use_labels:
__a = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__a = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__a = ids_tensor([self.batch_size] , self.num_choices )
__a = self.get_config()
__a = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
token_type_ids,
mc_token_ids,
sequence_labels,
token_labels,
choice_labels,
)
def __UpperCAmelCase ( self ):
return CTRLConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
def __UpperCAmelCase ( self , _a , _a , _a , _a , _a , *_a ):
__a = CTRLModel(config=_a )
model.to(_a )
model.eval()
model(_a , token_type_ids=_a , head_mask=_a )
model(_a , token_type_ids=_a )
__a = model(_a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(len(result.past_key_values ) , config.n_layer )
def __UpperCAmelCase ( self , _a , _a , _a , _a , _a , *_a ):
__a = CTRLLMHeadModel(_a )
model.to(_a )
model.eval()
__a = model(_a , token_type_ids=_a , labels=_a )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCAmelCase ( self ):
__a = self.prepare_config_and_inputs()
(
(
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) ,
) = config_and_inputs
__a = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''head_mask''': head_mask}
return config, inputs_dict
def __UpperCAmelCase ( self , _a , _a , _a , _a , *_a ):
__a = self.num_labels
__a = CTRLForSequenceClassification(_a )
model.to(_a )
model.eval()
__a = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__a = model(_a , token_type_ids=_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
@require_torch
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : str = (CTRLModel, CTRLLMHeadModel, CTRLForSequenceClassification) if is_torch_available() else ()
__UpperCAmelCase : Union[str, Any] = (CTRLLMHeadModel,) if is_torch_available() else ()
__UpperCAmelCase : Union[str, Any] = (
{
'feature-extraction': CTRLModel,
'text-classification': CTRLForSequenceClassification,
'text-generation': CTRLLMHeadModel,
'zero-shot': CTRLForSequenceClassification,
}
if is_torch_available()
else {}
)
__UpperCAmelCase : Optional[Any] = True
__UpperCAmelCase : List[Any] = False
__UpperCAmelCase : str = False
def __UpperCAmelCase ( self , _a , _a , _a , _a , _a ):
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `CTRLConfig` was never used in pipeline tests, either because of a missing checkpoint or because a tiny
# config could not be created.
return True
return False
def __UpperCAmelCase ( self ):
__a = CTRLModelTester(self )
__a = ConfigTester(self , config_class=_a , n_embd=37 )
def __UpperCAmelCase ( self ):
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
def __UpperCAmelCase ( self ):
self.config_tester.run_common_tests()
def __UpperCAmelCase ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_ctrl_model(*_a )
def __UpperCAmelCase ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*_a )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def __UpperCAmelCase ( self ):
pass
@slow
def __UpperCAmelCase ( self ):
for model_name in CTRL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a = CTRLModel.from_pretrained(_a )
self.assertIsNotNone(_a )
@unittest.skip('''The model doesn\'t support left padding''' ) # and it's not used enough to be worth fixing :)
def __UpperCAmelCase ( self ):
pass
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self ):
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
@slow
def __UpperCAmelCase ( self ):
__a = CTRLLMHeadModel.from_pretrained('''ctrl''' )
model.to(_a )
__a = torch.tensor(
[[11_859, 0, 1_611, 8]] , dtype=torch.long , device=_a ) # Legal the president is
__a = [
11_859,
0,
1_611,
8,
5,
150,
26_449,
2,
19,
348,
469,
3,
2_595,
48,
20_740,
246_533,
246_533,
19,
30,
5,
] # Legal the president is a good guy and I don't want to lose my job. \n \n I have a
__a = model.generate(_a , do_sample=_a )
self.assertListEqual(output_ids[0].tolist() , _a )
| 45 | 1 |
import numpy as np
def _UpperCamelCase ( lowerCAmelCase : str , lowerCAmelCase : Dict , lowerCAmelCase : Dict , lowerCAmelCase : List[str] , lowerCAmelCase : int ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ = int(np.ceil((x_end - xa) / h ) )
lowerCAmelCase__ = np.zeros((n + 1,) )
lowerCAmelCase__ = ya
lowerCAmelCase__ = xa
for k in range(UpperCamelCase_ ):
lowerCAmelCase__ = f(UpperCamelCase_ , y[k] )
lowerCAmelCase__ = f(x + 0.5 * h , y[k] + 0.5 * h * ka )
lowerCAmelCase__ = f(x + 0.5 * h , y[k] + 0.5 * h * ka )
lowerCAmelCase__ = f(x + h , y[k] + h * ka )
lowerCAmelCase__ = y[k] + (1 / 6) * h * (ka + 2 * ka + 2 * ka + ka)
x += h
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 361 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.speechta import SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaProcessor
from ..utils import is_datasets_available
from .base import PipelineTool
if is_datasets_available():
from datasets import load_dataset
class __SCREAMING_SNAKE_CASE ( __lowercase):
_SCREAMING_SNAKE_CASE : List[Any] = '''microsoft/speecht5_tts'''
_SCREAMING_SNAKE_CASE : Any = (
'''This is a tool that reads an English text out loud. It takes an input named `text` which should contain the '''
'''text to read (in English) and returns a waveform object containing the sound.'''
)
_SCREAMING_SNAKE_CASE : int = '''text_reader'''
_SCREAMING_SNAKE_CASE : List[str] = SpeechTaProcessor
_SCREAMING_SNAKE_CASE : Optional[int] = SpeechTaForTextToSpeech
_SCREAMING_SNAKE_CASE : List[Any] = SpeechTaHifiGan
_SCREAMING_SNAKE_CASE : Optional[int] = ['''text''']
_SCREAMING_SNAKE_CASE : List[Any] = ['''audio''']
def UpperCamelCase__ ( self ):
"""simple docstring"""
if self.post_processor is None:
lowerCAmelCase__ = 'microsoft/speecht5_hifigan'
super().setup()
def UpperCamelCase__ ( self , _UpperCamelCase , _UpperCamelCase=None ):
"""simple docstring"""
lowerCAmelCase__ = self.pre_processor(text=_UpperCamelCase , return_tensors='pt' , truncation=_UpperCamelCase )
if speaker_embeddings is None:
if not is_datasets_available():
raise ImportError('Datasets needs to be installed if not passing speaker embeddings.' )
lowerCAmelCase__ = load_dataset('Matthijs/cmu-arctic-xvectors' , split='validation' )
lowerCAmelCase__ = torch.tensor(embeddings_dataset[73_05]['xvector'] ).unsqueeze(0 )
return {"input_ids": inputs["input_ids"], "speaker_embeddings": speaker_embeddings}
def UpperCamelCase__ ( self , _UpperCamelCase ):
"""simple docstring"""
with torch.no_grad():
return self.model.generate_speech(**_UpperCamelCase )
def UpperCamelCase__ ( self , _UpperCamelCase ):
"""simple docstring"""
with torch.no_grad():
return self.post_processor(_UpperCamelCase ).cpu().detach()
| 122 | 0 |
"""simple docstring"""
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
SCREAMING_SNAKE_CASE : Optional[Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : Optional[Any] = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
SCREAMING_SNAKE_CASE : List[Any] = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class _UpperCAmelCase :
'''simple docstring'''
lowerCamelCase__ =field(
default=__snake_case, metadata={'help': 'Model type selected in the list: ' + ', '.join(__snake_case )} )
lowerCamelCase__ =field(
default=__snake_case, metadata={'help': 'The input data dir. Should contain the .json files for the SQuAD task.'} )
lowerCamelCase__ =field(
default=128, metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
}, )
lowerCamelCase__ =field(
default=128, metadata={'help': 'When splitting up a long document into chunks, how much stride to take between chunks.'}, )
lowerCamelCase__ =field(
default=64, metadata={
'help': (
'The maximum number of tokens for the question. Questions longer than this will '
'be truncated to this length.'
)
}, )
lowerCamelCase__ =field(
default=30, metadata={
'help': (
'The maximum length of an answer that can be generated. This is needed because the start '
'and end predictions are not conditioned on one another.'
)
}, )
lowerCamelCase__ =field(
default=__snake_case, metadata={'help': 'Overwrite the cached training and evaluation sets'} )
lowerCamelCase__ =field(
default=__snake_case, metadata={'help': 'If true, the SQuAD examples contain some that do not have an answer.'} )
lowerCamelCase__ =field(
default=0.0, metadata={'help': 'If null_score - best_non_null is greater than the threshold predict null.'} )
lowerCamelCase__ =field(
default=20, metadata={'help': 'If null_score - best_non_null is greater than the threshold predict null.'} )
lowerCamelCase__ =field(
default=0, metadata={
'help': (
'language id of input for language-specific xlm models (see'
' tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)'
)
}, )
lowerCamelCase__ =field(default=1, metadata={'help': 'multiple threads for converting example to features'} )
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ ='train'
lowerCamelCase__ ='dev'
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ =42
lowerCamelCase__ =42
lowerCamelCase__ =42
lowerCamelCase__ =42
def __init__(self , a_ , a_ , a_ = None , a_ = Split.train , a_ = False , a_ = None , a_ = "pt" , ):
'''simple docstring'''
__snake_case : Optional[Any] = args
__snake_case : Optional[Any] = is_language_sensitive
__snake_case : Dict = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(a_ , a_ ):
try:
__snake_case : Any = Split[mode]
except KeyError:
raise KeyError('''mode is not a valid split name''' )
__snake_case : Optional[int] = mode
# Load data features from cache or dataset file
__snake_case : Tuple = '''v2''' if args.version_2_with_negative else '''v1'''
__snake_case : Any = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , f"""cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}""" , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
__snake_case : Any = cached_features_file + '''.lock'''
with FileLock(a_ ):
if os.path.exists(a_ ) and not args.overwrite_cache:
__snake_case : int = time.time()
__snake_case : Optional[Any] = torch.load(a_ )
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
__snake_case : Tuple = self.old_features['''features''']
__snake_case : Union[str, Any] = self.old_features.get('''dataset''' , a_ )
__snake_case : Optional[Any] = self.old_features.get('''examples''' , a_ )
logger.info(
f"""Loading features from cached file {cached_features_file} [took %.3f s]""" , time.time() - start )
if self.dataset is None or self.examples is None:
logger.warning(
f"""Deleting cached file {cached_features_file} will allow dataset and examples to be cached in"""
''' future run''' )
else:
if mode == Split.dev:
__snake_case : str = self.processor.get_dev_examples(args.data_dir )
else:
__snake_case : int = self.processor.get_train_examples(args.data_dir )
__snake_case , __snake_case : int = squad_convert_examples_to_features(
examples=self.examples , tokenizer=a_ , max_seq_length=args.max_seq_length , doc_stride=args.doc_stride , max_query_length=args.max_query_length , is_training=mode == Split.train , threads=args.threads , return_dataset=a_ , )
__snake_case : Union[str, Any] = time.time()
torch.save(
{'''features''': self.features, '''dataset''': self.dataset, '''examples''': self.examples} , a_ , )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f"""Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]""" )
def __len__(self ):
'''simple docstring'''
return len(self.features )
def __getitem__(self , a_ ):
'''simple docstring'''
__snake_case : Tuple = self.features[i]
__snake_case : Any = torch.tensor(feature.input_ids , dtype=torch.long )
__snake_case : Optional[Any] = torch.tensor(feature.attention_mask , dtype=torch.long )
__snake_case : str = torch.tensor(feature.token_type_ids , dtype=torch.long )
__snake_case : Optional[Any] = torch.tensor(feature.cls_index , dtype=torch.long )
__snake_case : Optional[Any] = torch.tensor(feature.p_mask , dtype=torch.float )
__snake_case : str = torch.tensor(feature.is_impossible , dtype=torch.float )
__snake_case : List[str] = {
'''input_ids''': input_ids,
'''attention_mask''': attention_mask,
'''token_type_ids''': token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({'''cls_index''': cls_index, '''p_mask''': p_mask} )
if self.args.version_2_with_negative:
inputs.update({'''is_impossible''': is_impossible} )
if self.is_language_sensitive:
inputs.update({'''langs''': (torch.ones(input_ids.shape , dtype=torch.intaa ) * self.args.lang_id)} )
if self.mode == Split.train:
__snake_case : Dict = torch.tensor(feature.start_position , dtype=torch.long )
__snake_case : str = torch.tensor(feature.end_position , dtype=torch.long )
inputs.update({'''start_positions''': start_positions, '''end_positions''': end_positions} )
return inputs
| 102 |
import hashlib
import unittest
from typing import Dict
import numpy as np
from transformers import (
MODEL_FOR_MASK_GENERATION_MAPPING,
TF_MODEL_FOR_MASK_GENERATION_MAPPING,
is_vision_available,
pipeline,
)
from transformers.pipelines import MaskGenerationPipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
if is_vision_available():
from PIL import Image
else:
class _snake_case :
@staticmethod
def SCREAMING_SNAKE_CASE ( *_a , **_a ):
pass
def lowerCAmelCase_ ( _snake_case : Image ) -> str:
'''simple docstring'''
__magic_name__ : Optional[int] = hashlib.mda(image.tobytes() )
return m.hexdigest()[:10]
def lowerCAmelCase_ ( _snake_case : Image ) -> Dict:
'''simple docstring'''
__magic_name__ : List[Any] = np.array(_snake_case )
__magic_name__ : Optional[int] = npimg.shape
return {"hash": hashimage(_snake_case ), "shape": shape}
@is_pipeline_test
@require_vision
@require_torch
class _snake_case ( unittest.TestCase ):
UpperCamelCase__ = dict(
(list(MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if MODEL_FOR_MASK_GENERATION_MAPPING else []) )
UpperCamelCase__ = dict(
(list(TF_MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if TF_MODEL_FOR_MASK_GENERATION_MAPPING else []) )
def SCREAMING_SNAKE_CASE ( self , _a , _a , _a ):
__magic_name__ : Dict = MaskGenerationPipeline(model=_a , image_processor=_a )
return image_segmenter, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def SCREAMING_SNAKE_CASE ( self , _a , _a ):
pass
@require_tf
@unittest.skip("Image segmentation not implemented in TF" )
def SCREAMING_SNAKE_CASE ( self ):
pass
@slow
@require_torch
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Dict = pipeline("mask-generation" , model="facebook/sam-vit-huge" )
__magic_name__ : str = image_segmenter("http://images.cocodataset.org/val2017/000000039769.jpg" , points_per_batch=256 )
# Shortening by hashing
__magic_name__ : Dict = []
for i, o in enumerate(outputs["masks"] ):
new_outupt += [{"mask": mask_to_test_readable(_a ), "scores": outputs["scores"][i]}]
# fmt: off
self.assertEqual(
nested_simplify(_a , decimals=4 ) , [
{"mask": {"hash": "115ad19f5f", "shape": (480, 640)}, "scores": 1.04_44},
{"mask": {"hash": "6affa964c6", "shape": (480, 640)}, "scores": 1.0_21},
{"mask": {"hash": "dfe28a0388", "shape": (480, 640)}, "scores": 1.01_67},
{"mask": {"hash": "c0a5f4a318", "shape": (480, 640)}, "scores": 1.01_32},
{"mask": {"hash": "fe8065c197", "shape": (480, 640)}, "scores": 1.00_53},
{"mask": {"hash": "e2d0b7a0b7", "shape": (480, 640)}, "scores": 0.99_67},
{"mask": {"hash": "453c7844bd", "shape": (480, 640)}, "scores": 0.9_93},
{"mask": {"hash": "3d44f2926d", "shape": (480, 640)}, "scores": 0.99_09},
{"mask": {"hash": "64033ddc3f", "shape": (480, 640)}, "scores": 0.98_79},
{"mask": {"hash": "801064ff79", "shape": (480, 640)}, "scores": 0.98_34},
{"mask": {"hash": "6172f276ef", "shape": (480, 640)}, "scores": 0.97_16},
{"mask": {"hash": "b49e60e084", "shape": (480, 640)}, "scores": 0.96_12},
{"mask": {"hash": "a811e775fd", "shape": (480, 640)}, "scores": 0.95_99},
{"mask": {"hash": "a6a8ebcf4b", "shape": (480, 640)}, "scores": 0.95_52},
{"mask": {"hash": "9d8257e080", "shape": (480, 640)}, "scores": 0.95_32},
{"mask": {"hash": "32de6454a8", "shape": (480, 640)}, "scores": 0.95_16},
{"mask": {"hash": "af3d4af2c8", "shape": (480, 640)}, "scores": 0.94_99},
{"mask": {"hash": "3c6db475fb", "shape": (480, 640)}, "scores": 0.94_83},
{"mask": {"hash": "c290813fb9", "shape": (480, 640)}, "scores": 0.94_64},
{"mask": {"hash": "b6f0b8f606", "shape": (480, 640)}, "scores": 0.9_43},
{"mask": {"hash": "92ce16bfdf", "shape": (480, 640)}, "scores": 0.9_43},
{"mask": {"hash": "c749b25868", "shape": (480, 640)}, "scores": 0.94_08},
{"mask": {"hash": "efb6cab859", "shape": (480, 640)}, "scores": 0.93_35},
{"mask": {"hash": "1ff2eafb30", "shape": (480, 640)}, "scores": 0.93_26},
{"mask": {"hash": "788b798e24", "shape": (480, 640)}, "scores": 0.92_62},
{"mask": {"hash": "abea804f0e", "shape": (480, 640)}, "scores": 0.89_99},
{"mask": {"hash": "7b9e8ddb73", "shape": (480, 640)}, "scores": 0.89_86},
{"mask": {"hash": "cd24047c8a", "shape": (480, 640)}, "scores": 0.89_84},
{"mask": {"hash": "6943e6bcbd", "shape": (480, 640)}, "scores": 0.88_73},
{"mask": {"hash": "b5f47c9191", "shape": (480, 640)}, "scores": 0.88_71}
] , )
# fmt: on
@require_torch
@slow
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : str = "facebook/sam-vit-huge"
__magic_name__ : str = pipeline("mask-generation" , model=_a )
__magic_name__ : Tuple = image_segmenter(
"http://images.cocodataset.org/val2017/000000039769.jpg" , pred_iou_thresh=1 , points_per_batch=256 )
# Shortening by hashing
__magic_name__ : Any = []
for i, o in enumerate(outputs["masks"] ):
new_outupt += [{"mask": mask_to_test_readable(_a ), "scores": outputs["scores"][i]}]
self.assertEqual(
nested_simplify(_a , decimals=4 ) , [
{"mask": {"hash": "115ad19f5f", "shape": (480, 640)}, "scores": 1.04_44},
{"mask": {"hash": "6affa964c6", "shape": (480, 640)}, "scores": 1.02_10},
{"mask": {"hash": "dfe28a0388", "shape": (480, 640)}, "scores": 1.01_67},
{"mask": {"hash": "c0a5f4a318", "shape": (480, 640)}, "scores": 1.01_32},
{"mask": {"hash": "fe8065c197", "shape": (480, 640)}, "scores": 1.00_53},
] , )
| 281 | 0 |
import gc
import unittest
from transformers import MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, FillMaskPipeline, pipeline
from transformers.pipelines import PipelineException
from transformers.testing_utils import (
is_pipeline_test,
is_torch_available,
nested_simplify,
require_tf,
require_torch,
require_torch_gpu,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
lowerCAmelCase__ = MODEL_FOR_MASKED_LM_MAPPING
lowerCAmelCase__ = TF_MODEL_FOR_MASKED_LM_MAPPING
def SCREAMING_SNAKE_CASE_( self ) -> List[str]:
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
if is_torch_available():
import torch
torch.cuda.empty_cache()
@require_tf
def SCREAMING_SNAKE_CASE_( self ) -> int:
lowerCamelCase_ = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , top_k=2 , framework="tf" )
lowerCamelCase_ = unmasker("My name is <mask>" )
self.assertEqual(
nested_simplify(lowercase , decimals=6 ) , [
{"sequence": "My name is grouped", "score": 2.1e-05, "token": 38015, "token_str": " grouped"},
{"sequence": "My name is accuser", "score": 2.1e-05, "token": 25506, "token_str": " accuser"},
] , )
lowerCamelCase_ = unmasker("The largest city in France is <mask>" )
self.assertEqual(
nested_simplify(lowercase , decimals=6 ) , [
{
"sequence": "The largest city in France is grouped",
"score": 2.1e-05,
"token": 38015,
"token_str": " grouped",
},
{
"sequence": "The largest city in France is accuser",
"score": 2.1e-05,
"token": 25506,
"token_str": " accuser",
},
] , )
lowerCamelCase_ = unmasker("My name is <mask>" , targets=[" Patrick", " Clara", " Teven"] , top_k=3 )
self.assertEqual(
nested_simplify(lowercase , decimals=6 ) , [
{"sequence": "My name is Clara", "score": 2e-05, "token": 13606, "token_str": " Clara"},
{"sequence": "My name is Patrick", "score": 2e-05, "token": 3499, "token_str": " Patrick"},
{"sequence": "My name is Te", "score": 1.9e-05, "token": 2941, "token_str": " Te"},
] , )
@require_torch
def SCREAMING_SNAKE_CASE_( self ) -> Any:
lowerCamelCase_ = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , top_k=2 , framework="pt" )
lowerCamelCase_ = unmasker("My name is <mask>" )
self.assertEqual(
nested_simplify(lowercase , decimals=6 ) , [
{"sequence": "My name is Maul", "score": 2.2e-05, "token": 35676, "token_str": " Maul"},
{"sequence": "My name isELS", "score": 2.2e-05, "token": 16416, "token_str": "ELS"},
] , )
lowerCamelCase_ = unmasker("The largest city in France is <mask>" )
self.assertEqual(
nested_simplify(lowercase , decimals=6 ) , [
{
"sequence": "The largest city in France is Maul",
"score": 2.2e-05,
"token": 35676,
"token_str": " Maul",
},
{"sequence": "The largest city in France isELS", "score": 2.2e-05, "token": 16416, "token_str": "ELS"},
] , )
lowerCamelCase_ = unmasker("My name is <mask>" , targets=[" Patrick", " Clara", " Teven"] , top_k=3 )
self.assertEqual(
nested_simplify(lowercase , decimals=6 ) , [
{"sequence": "My name is Patrick", "score": 2.1e-05, "token": 3499, "token_str": " Patrick"},
{"sequence": "My name is Te", "score": 2e-05, "token": 2941, "token_str": " Te"},
{"sequence": "My name is Clara", "score": 2e-05, "token": 13606, "token_str": " Clara"},
] , )
lowerCamelCase_ = unmasker("My name is <mask> <mask>" , top_k=2 )
self.assertEqual(
nested_simplify(lowercase , decimals=6 ) , [
[
{
"score": 2.2e-05,
"token": 35676,
"token_str": " Maul",
"sequence": "<s>My name is Maul<mask></s>",
},
{"score": 2.2e-05, "token": 16416, "token_str": "ELS", "sequence": "<s>My name isELS<mask></s>"},
],
[
{
"score": 2.2e-05,
"token": 35676,
"token_str": " Maul",
"sequence": "<s>My name is<mask> Maul</s>",
},
{"score": 2.2e-05, "token": 16416, "token_str": "ELS", "sequence": "<s>My name is<mask>ELS</s>"},
],
] , )
@require_torch_gpu
def SCREAMING_SNAKE_CASE_( self ) -> Optional[Any]:
lowerCamelCase_ = pipeline("fill-mask" , model="hf-internal-testing/tiny-random-distilbert" , device=0 , framework="pt" )
# convert model to fp16
pipe.model.half()
lowerCamelCase_ = pipe("Paris is the [MASK] of France." )
# We actually don't care about the result, we just want to make sure
# it works, meaning the float16 tensor got casted back to float32
# for postprocessing.
self.assertIsInstance(lowercase , lowercase )
@slow
@require_torch
def SCREAMING_SNAKE_CASE_( self ) -> Dict:
lowerCamelCase_ = pipeline(task="fill-mask" , model="distilroberta-base" , top_k=2 , framework="pt" )
self.run_large_test(lowercase )
@slow
@require_tf
def SCREAMING_SNAKE_CASE_( self ) -> Optional[int]:
lowerCamelCase_ = pipeline(task="fill-mask" , model="distilroberta-base" , top_k=2 , framework="tf" )
self.run_large_test(lowercase )
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> Union[str, Any]:
lowerCamelCase_ = unmasker("My name is <mask>" )
self.assertEqual(
nested_simplify(lowercase ) , [
{"sequence": "My name is John", "score": 0.0_0_8, "token": 610, "token_str": " John"},
{"sequence": "My name is Chris", "score": 0.0_0_7, "token": 1573, "token_str": " Chris"},
] , )
lowerCamelCase_ = unmasker("The largest city in France is <mask>" )
self.assertEqual(
nested_simplify(lowercase ) , [
{
"sequence": "The largest city in France is Paris",
"score": 0.2_5_1,
"token": 2201,
"token_str": " Paris",
},
{
"sequence": "The largest city in France is Lyon",
"score": 0.2_1_4,
"token": 12790,
"token_str": " Lyon",
},
] , )
lowerCamelCase_ = unmasker("My name is <mask>" , targets=[" Patrick", " Clara", " Teven"] , top_k=3 )
self.assertEqual(
nested_simplify(lowercase ) , [
{"sequence": "My name is Patrick", "score": 0.0_0_5, "token": 3499, "token_str": " Patrick"},
{"sequence": "My name is Clara", "score": 0.0_0_0, "token": 13606, "token_str": " Clara"},
{"sequence": "My name is Te", "score": 0.0_0_0, "token": 2941, "token_str": " Te"},
] , )
@require_torch
def SCREAMING_SNAKE_CASE_( self ) -> Optional[int]:
lowerCamelCase_ = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , framework="pt" )
lowerCamelCase_ = None
lowerCamelCase_ = None
self.run_pipeline_test(lowercase , [] )
@require_tf
def SCREAMING_SNAKE_CASE_( self ) -> int:
lowerCamelCase_ = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , framework="tf" )
lowerCamelCase_ = None
lowerCamelCase_ = None
self.run_pipeline_test(lowercase , [] )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase ) -> Optional[int]:
if tokenizer is None or tokenizer.mask_token_id is None:
self.skipTest("The provided tokenizer has no mask token, (probably reformer or wav2vec2)" )
lowerCamelCase_ = FillMaskPipeline(model=lowercase , tokenizer=lowercase )
lowerCamelCase_ = [
f'This is another {tokenizer.mask_token} test',
]
return fill_masker, examples
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase ) -> List[str]:
lowerCamelCase_ = fill_masker.tokenizer
lowerCamelCase_ = fill_masker.model
lowerCamelCase_ = fill_masker(
f'This is a {tokenizer.mask_token}' , )
self.assertEqual(
lowercase , [
{"sequence": ANY(lowercase ), "score": ANY(lowercase ), "token": ANY(lowercase ), "token_str": ANY(lowercase )},
{"sequence": ANY(lowercase ), "score": ANY(lowercase ), "token": ANY(lowercase ), "token_str": ANY(lowercase )},
{"sequence": ANY(lowercase ), "score": ANY(lowercase ), "token": ANY(lowercase ), "token_str": ANY(lowercase )},
{"sequence": ANY(lowercase ), "score": ANY(lowercase ), "token": ANY(lowercase ), "token_str": ANY(lowercase )},
{"sequence": ANY(lowercase ), "score": ANY(lowercase ), "token": ANY(lowercase ), "token_str": ANY(lowercase )},
] , )
lowerCamelCase_ = fill_masker([f'This is a {tokenizer.mask_token}'] )
self.assertEqual(
lowercase , [
{"sequence": ANY(lowercase ), "score": ANY(lowercase ), "token": ANY(lowercase ), "token_str": ANY(lowercase )},
{"sequence": ANY(lowercase ), "score": ANY(lowercase ), "token": ANY(lowercase ), "token_str": ANY(lowercase )},
{"sequence": ANY(lowercase ), "score": ANY(lowercase ), "token": ANY(lowercase ), "token_str": ANY(lowercase )},
{"sequence": ANY(lowercase ), "score": ANY(lowercase ), "token": ANY(lowercase ), "token_str": ANY(lowercase )},
{"sequence": ANY(lowercase ), "score": ANY(lowercase ), "token": ANY(lowercase ), "token_str": ANY(lowercase )},
] , )
lowerCamelCase_ = fill_masker([f'This is a {tokenizer.mask_token}', f'Another {tokenizer.mask_token} great test.'] )
self.assertEqual(
lowercase , [
[
{"sequence": ANY(lowercase ), "score": ANY(lowercase ), "token": ANY(lowercase ), "token_str": ANY(lowercase )},
{"sequence": ANY(lowercase ), "score": ANY(lowercase ), "token": ANY(lowercase ), "token_str": ANY(lowercase )},
{"sequence": ANY(lowercase ), "score": ANY(lowercase ), "token": ANY(lowercase ), "token_str": ANY(lowercase )},
{"sequence": ANY(lowercase ), "score": ANY(lowercase ), "token": ANY(lowercase ), "token_str": ANY(lowercase )},
{"sequence": ANY(lowercase ), "score": ANY(lowercase ), "token": ANY(lowercase ), "token_str": ANY(lowercase )},
],
[
{"sequence": ANY(lowercase ), "score": ANY(lowercase ), "token": ANY(lowercase ), "token_str": ANY(lowercase )},
{"sequence": ANY(lowercase ), "score": ANY(lowercase ), "token": ANY(lowercase ), "token_str": ANY(lowercase )},
{"sequence": ANY(lowercase ), "score": ANY(lowercase ), "token": ANY(lowercase ), "token_str": ANY(lowercase )},
{"sequence": ANY(lowercase ), "score": ANY(lowercase ), "token": ANY(lowercase ), "token_str": ANY(lowercase )},
{"sequence": ANY(lowercase ), "score": ANY(lowercase ), "token": ANY(lowercase ), "token_str": ANY(lowercase )},
],
] , )
with self.assertRaises(lowercase ):
fill_masker([None] )
# No mask_token is not supported
with self.assertRaises(lowercase ):
fill_masker("This is" )
self.run_test_top_k(lowercase , lowercase )
self.run_test_targets(lowercase , lowercase )
self.run_test_top_k_targets(lowercase , lowercase )
self.fill_mask_with_duplicate_targets_and_top_k(lowercase , lowercase )
self.fill_mask_with_multiple_masks(lowercase , lowercase )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase ) -> str:
lowerCamelCase_ = tokenizer.get_vocab()
lowerCamelCase_ = sorted(vocab.keys() )[:2]
# Pipeline argument
lowerCamelCase_ = FillMaskPipeline(model=lowercase , tokenizer=lowercase , targets=lowercase )
lowerCamelCase_ = fill_masker(f'This is a {tokenizer.mask_token}' )
self.assertEqual(
lowercase , [
{"sequence": ANY(lowercase ), "score": ANY(lowercase ), "token": ANY(lowercase ), "token_str": ANY(lowercase )},
{"sequence": ANY(lowercase ), "score": ANY(lowercase ), "token": ANY(lowercase ), "token_str": ANY(lowercase )},
] , )
lowerCamelCase_ = {vocab[el] for el in targets}
self.assertEqual({el["token"] for el in outputs} , lowercase )
lowerCamelCase_ = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el["token_str"] for el in outputs} , set(lowercase ) )
# Call argument
lowerCamelCase_ = FillMaskPipeline(model=lowercase , tokenizer=lowercase )
lowerCamelCase_ = fill_masker(f'This is a {tokenizer.mask_token}' , targets=lowercase )
self.assertEqual(
lowercase , [
{"sequence": ANY(lowercase ), "score": ANY(lowercase ), "token": ANY(lowercase ), "token_str": ANY(lowercase )},
{"sequence": ANY(lowercase ), "score": ANY(lowercase ), "token": ANY(lowercase ), "token_str": ANY(lowercase )},
] , )
lowerCamelCase_ = {vocab[el] for el in targets}
self.assertEqual({el["token"] for el in outputs} , lowercase )
lowerCamelCase_ = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el["token_str"] for el in outputs} , set(lowercase ) )
# Score equivalence
lowerCamelCase_ = fill_masker(f'This is a {tokenizer.mask_token}' , targets=lowercase )
lowerCamelCase_ = [top_mask["token_str"] for top_mask in outputs]
lowerCamelCase_ = [top_mask["score"] for top_mask in outputs]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(lowercase ) == set(lowercase ):
lowerCamelCase_ = fill_masker(f'This is a {tokenizer.mask_token}' , targets=lowercase )
lowerCamelCase_ = [top_mask["score"] for top_mask in unmasked_targets]
self.assertEqual(nested_simplify(lowercase ) , nested_simplify(lowercase ) )
# Raises with invalid
with self.assertRaises(lowercase ):
lowerCamelCase_ = fill_masker(f'This is a {tokenizer.mask_token}' , targets=[] )
# For some tokenizers, `""` is actually in the vocabulary and the expected error won't raised
if "" not in tokenizer.get_vocab():
with self.assertRaises(lowercase ):
lowerCamelCase_ = fill_masker(f'This is a {tokenizer.mask_token}' , targets=[""] )
with self.assertRaises(lowercase ):
lowerCamelCase_ = fill_masker(f'This is a {tokenizer.mask_token}' , targets="" )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase ) -> int:
lowerCamelCase_ = FillMaskPipeline(model=lowercase , tokenizer=lowercase , top_k=2 )
lowerCamelCase_ = fill_masker(f'This is a {tokenizer.mask_token}' )
self.assertEqual(
lowercase , [
{"sequence": ANY(lowercase ), "score": ANY(lowercase ), "token": ANY(lowercase ), "token_str": ANY(lowercase )},
{"sequence": ANY(lowercase ), "score": ANY(lowercase ), "token": ANY(lowercase ), "token_str": ANY(lowercase )},
] , )
lowerCamelCase_ = FillMaskPipeline(model=lowercase , tokenizer=lowercase )
lowerCamelCase_ = fill_masker(f'This is a {tokenizer.mask_token}' , top_k=2 )
self.assertEqual(
lowercase , [
{"sequence": ANY(lowercase ), "score": ANY(lowercase ), "token": ANY(lowercase ), "token_str": ANY(lowercase )},
{"sequence": ANY(lowercase ), "score": ANY(lowercase ), "token": ANY(lowercase ), "token_str": ANY(lowercase )},
] , )
self.assertEqual(nested_simplify(lowercase ) , nested_simplify(lowercase ) )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase ) -> Union[str, Any]:
lowerCamelCase_ = tokenizer.get_vocab()
lowerCamelCase_ = FillMaskPipeline(model=lowercase , tokenizer=lowercase )
# top_k=2, ntargets=3
lowerCamelCase_ = sorted(vocab.keys() )[:3]
lowerCamelCase_ = fill_masker(f'This is a {tokenizer.mask_token}' , top_k=2 , targets=lowercase )
# If we use the most probably targets, and filter differently, we should still
# have the same results
lowerCamelCase_ = [el["token_str"] for el in sorted(lowercase , key=lambda lowercase : x["score"] , reverse=lowercase )]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(lowercase ).issubset(lowercase ):
lowerCamelCase_ = fill_masker(f'This is a {tokenizer.mask_token}' , top_k=3 , targets=lowercase )
# They should yield exactly the same result
self.assertEqual(nested_simplify(lowercase ) , nested_simplify(lowercase ) )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase ) -> List[str]:
lowerCamelCase_ = FillMaskPipeline(model=lowercase , tokenizer=lowercase )
lowerCamelCase_ = tokenizer.get_vocab()
# String duplicates + id duplicates
lowerCamelCase_ = sorted(vocab.keys() )[:3]
lowerCamelCase_ = [targets[0], targets[1], targets[0], targets[2], targets[1]]
lowerCamelCase_ = fill_masker(f'My name is {tokenizer.mask_token}' , targets=lowercase , top_k=10 )
# The target list contains duplicates, so we can't output more
# than them
self.assertEqual(len(lowercase ) , 3 )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase ) -> int:
lowerCamelCase_ = FillMaskPipeline(model=lowercase , tokenizer=lowercase )
lowerCamelCase_ = fill_masker(
f'This is a {tokenizer.mask_token} {tokenizer.mask_token} {tokenizer.mask_token}' , top_k=2 )
self.assertEqual(
lowercase , [
[
{"sequence": ANY(lowercase ), "score": ANY(lowercase ), "token": ANY(lowercase ), "token_str": ANY(lowercase )},
{"sequence": ANY(lowercase ), "score": ANY(lowercase ), "token": ANY(lowercase ), "token_str": ANY(lowercase )},
],
[
{"sequence": ANY(lowercase ), "score": ANY(lowercase ), "token": ANY(lowercase ), "token_str": ANY(lowercase )},
{"sequence": ANY(lowercase ), "score": ANY(lowercase ), "token": ANY(lowercase ), "token_str": ANY(lowercase )},
],
[
{"sequence": ANY(lowercase ), "score": ANY(lowercase ), "token": ANY(lowercase ), "token_str": ANY(lowercase )},
{"sequence": ANY(lowercase ), "score": ANY(lowercase ), "token": ANY(lowercase ), "token_str": ANY(lowercase )},
],
] , )
| 47 |
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.31.0''')
require_version('''datasets>=1.8.0''', '''To fix: pip install -r examples/pytorch/text-classification/requirements.txt''')
__A =logging.getLogger(__name__)
@dataclass
class _SCREAMING_SNAKE_CASE :
lowerCAmelCase__ = field(
default=1_28 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
lowerCAmelCase__ = field(
default=snake_case_ , metadata={'help': 'Overwrite the cached preprocessed datasets or not.'} )
lowerCAmelCase__ = field(
default=snake_case_ , metadata={
'help': (
'Whether to pad all samples to `max_seq_length`. '
'If False, will pad the samples dynamically when batching to the maximum length in the batch.'
)
} , )
lowerCAmelCase__ = field(
default=snake_case_ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
lowerCAmelCase__ = field(
default=snake_case_ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
lowerCAmelCase__ = field(
default=snake_case_ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of prediction examples to this '
'value if set.'
)
} , )
@dataclass
class _SCREAMING_SNAKE_CASE :
lowerCAmelCase__ = field(
default=snake_case_ , metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
lowerCAmelCase__ = field(
default=snake_case_ , metadata={'help': 'Evaluation language. Also train language if `train_language` is set to None.'} )
lowerCAmelCase__ = field(
default=snake_case_ , metadata={'help': 'Train language if it is different from the evaluation language.'} )
lowerCAmelCase__ = field(
default=snake_case_ , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
lowerCAmelCase__ = field(
default=snake_case_ , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
lowerCAmelCase__ = field(
default=snake_case_ , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
lowerCAmelCase__ = field(
default=snake_case_ , metadata={'help': 'arg to indicate if tokenizer should do lower case in AutoTokenizer.from_pretrained()'} , )
lowerCAmelCase__ = field(
default=snake_case_ , metadata={'help': 'Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'} , )
lowerCAmelCase__ = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
lowerCAmelCase__ = field(
default=snake_case_ , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
lowerCAmelCase__ = field(
default=snake_case_ , metadata={'help': 'Will enable to load a pretrained model whose head dimensions are different.'} , )
def lowerCamelCase_ ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowerCamelCase_ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_xnli" , lowerCamelCase__ )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
lowerCamelCase_ = training_args.get_process_log_level()
logger.setLevel(lowerCamelCase__ )
datasets.utils.logging.set_verbosity(lowerCamelCase__ )
transformers.utils.logging.set_verbosity(lowerCamelCase__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'
+ F'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
logger.info(F'Training/evaluation parameters {training_args}' )
# Detecting last checkpoint.
lowerCamelCase_ = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowerCamelCase_ = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. '
"Use --overwrite_output_dir to overcome." )
elif last_checkpoint is not None:
logger.info(
F'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Set seed before initializing model.
set_seed(training_args.seed )
# In distributed training, the load_dataset function guarantees that only one local process can concurrently
# download the dataset.
# Downloading and loading xnli dataset from the hub.
if training_args.do_train:
if model_args.train_language is None:
lowerCamelCase_ = load_dataset(
"xnli" , model_args.language , split="train" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
lowerCamelCase_ = load_dataset(
"xnli" , model_args.train_language , split="train" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
lowerCamelCase_ = train_dataset.features["label"].names
if training_args.do_eval:
lowerCamelCase_ = load_dataset(
"xnli" , model_args.language , split="validation" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
lowerCamelCase_ = eval_dataset.features["label"].names
if training_args.do_predict:
lowerCamelCase_ = load_dataset(
"xnli" , model_args.language , split="test" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
lowerCamelCase_ = predict_dataset.features["label"].names
# Labels
lowerCamelCase_ = len(lowerCamelCase__ )
# Load pretrained model and tokenizer
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowerCamelCase_ = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=lowerCamelCase__ , idalabel={str(lowerCamelCase__ ): label for i, label in enumerate(lowerCamelCase__ )} , labelaid={label: i for i, label in enumerate(lowerCamelCase__ )} , finetuning_task="xnli" , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
lowerCamelCase_ = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , do_lower_case=model_args.do_lower_case , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
lowerCamelCase_ = AutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=lowerCamelCase__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
# Preprocessing the datasets
# Padding strategy
if data_args.pad_to_max_length:
lowerCamelCase_ = "max_length"
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
lowerCamelCase_ = False
def preprocess_function(lowerCamelCase__ ):
# Tokenize the texts
return tokenizer(
examples["premise"] , examples["hypothesis"] , padding=lowerCamelCase__ , max_length=data_args.max_seq_length , truncation=lowerCamelCase__ , )
if training_args.do_train:
if data_args.max_train_samples is not None:
lowerCamelCase_ = min(len(lowerCamelCase__ ) , data_args.max_train_samples )
lowerCamelCase_ = train_dataset.select(range(lowerCamelCase__ ) )
with training_args.main_process_first(desc="train dataset map pre-processing" ):
lowerCamelCase_ = train_dataset.map(
lowerCamelCase__ , batched=lowerCamelCase__ , load_from_cache_file=not data_args.overwrite_cache , desc="Running tokenizer on train dataset" , )
# Log a few random samples from the training set:
for index in random.sample(range(len(lowerCamelCase__ ) ) , 3 ):
logger.info(F'Sample {index} of the training set: {train_dataset[index]}.' )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
lowerCamelCase_ = min(len(lowerCamelCase__ ) , data_args.max_eval_samples )
lowerCamelCase_ = eval_dataset.select(range(lowerCamelCase__ ) )
with training_args.main_process_first(desc="validation dataset map pre-processing" ):
lowerCamelCase_ = eval_dataset.map(
lowerCamelCase__ , batched=lowerCamelCase__ , load_from_cache_file=not data_args.overwrite_cache , desc="Running tokenizer on validation dataset" , )
if training_args.do_predict:
if data_args.max_predict_samples is not None:
lowerCamelCase_ = min(len(lowerCamelCase__ ) , data_args.max_predict_samples )
lowerCamelCase_ = predict_dataset.select(range(lowerCamelCase__ ) )
with training_args.main_process_first(desc="prediction dataset map pre-processing" ):
lowerCamelCase_ = predict_dataset.map(
lowerCamelCase__ , batched=lowerCamelCase__ , load_from_cache_file=not data_args.overwrite_cache , desc="Running tokenizer on prediction dataset" , )
# Get the metric function
lowerCamelCase_ = evaluate.load("xnli" )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(lowerCamelCase__ ):
lowerCamelCase_ = p.predictions[0] if isinstance(p.predictions , lowerCamelCase__ ) else p.predictions
lowerCamelCase_ = np.argmax(lowerCamelCase__ , axis=1 )
return metric.compute(predictions=lowerCamelCase__ , references=p.label_ids )
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
lowerCamelCase_ = default_data_collator
elif training_args.fpaa:
lowerCamelCase_ = DataCollatorWithPadding(lowerCamelCase__ , pad_to_multiple_of=8 )
else:
lowerCamelCase_ = None
# Initialize our Trainer
lowerCamelCase_ = Trainer(
model=lowerCamelCase__ , args=lowerCamelCase__ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=lowerCamelCase__ , tokenizer=lowerCamelCase__ , data_collator=lowerCamelCase__ , )
# Training
if training_args.do_train:
lowerCamelCase_ = None
if training_args.resume_from_checkpoint is not None:
lowerCamelCase_ = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowerCamelCase_ = last_checkpoint
lowerCamelCase_ = trainer.train(resume_from_checkpoint=lowerCamelCase__ )
lowerCamelCase_ = train_result.metrics
lowerCamelCase_ = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(lowerCamelCase__ )
)
lowerCamelCase_ = min(lowerCamelCase__ , len(lowerCamelCase__ ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics("train" , lowerCamelCase__ )
trainer.save_metrics("train" , lowerCamelCase__ )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("*** Evaluate ***" )
lowerCamelCase_ = trainer.evaluate(eval_dataset=lowerCamelCase__ )
lowerCamelCase_ = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(lowerCamelCase__ )
lowerCamelCase_ = min(lowerCamelCase__ , len(lowerCamelCase__ ) )
trainer.log_metrics("eval" , lowerCamelCase__ )
trainer.save_metrics("eval" , lowerCamelCase__ )
# Prediction
if training_args.do_predict:
logger.info("*** Predict ***" )
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = trainer.predict(lowerCamelCase__ , metric_key_prefix="predict" )
lowerCamelCase_ = (
data_args.max_predict_samples if data_args.max_predict_samples is not None else len(lowerCamelCase__ )
)
lowerCamelCase_ = min(lowerCamelCase__ , len(lowerCamelCase__ ) )
trainer.log_metrics("predict" , lowerCamelCase__ )
trainer.save_metrics("predict" , lowerCamelCase__ )
lowerCamelCase_ = np.argmax(lowerCamelCase__ , axis=1 )
lowerCamelCase_ = os.path.join(training_args.output_dir , "predictions.txt" )
if trainer.is_world_process_zero():
with open(lowerCamelCase__ , "w" ) as writer:
writer.write("index\tprediction\n" )
for index, item in enumerate(lowerCamelCase__ ):
lowerCamelCase_ = label_list[item]
writer.write(F'{index}\t{item}\n' )
if __name__ == "__main__":
main()
| 47 | 1 |
import copy
import re
class _a :
"""simple docstring"""
_lowerCamelCase : Optional[int] = 'hp'
_lowerCamelCase : str = {}
_lowerCamelCase : List[Any] = None
@classmethod
def __A ( cls : Optional[int] , UpperCAmelCase : Dict , UpperCAmelCase : Dict ):
A_ = prefix
A_ = defaults
cls.build_naming_info()
@staticmethod
def __A ( UpperCAmelCase : Any , UpperCAmelCase : Any ):
if len(UpperCAmelCase ) == 0:
return ""
A_ = None
if any(char.isdigit() for char in word ):
raise Exception(f'''Parameters should not contain numbers: \'{word}\' contains a number''' )
if word in info["short_word"]:
return info["short_word"][word]
for prefix_len in range(1 , len(UpperCAmelCase ) + 1 ):
A_ = word[:prefix_len]
if prefix in info["reverse_short_word"]:
continue
else:
A_ = prefix
break
if short_word is None:
# Paranoid fallback
def int_to_alphabetic(UpperCAmelCase : Tuple ):
A_ = ""
while integer != 0:
A_ = chr(ord("A" ) + integer % 10 ) + s
integer //= 10
return s
A_ = 0
while True:
A_ = word + "#" + int_to_alphabetic(UpperCAmelCase )
if sword in info["reverse_short_word"]:
continue
else:
A_ = sword
break
A_ = short_word
A_ = word
return short_word
@staticmethod
def __A ( UpperCAmelCase : int , UpperCAmelCase : Optional[int] ):
A_ = param_name.split("_" )
A_ = [TrialShortNamer.shortname_for_word(UpperCAmelCase , UpperCAmelCase ) for word in words]
# We try to create a separatorless short name, but if there is a collision we have to fallback
# to a separated short name
A_ = ["", "_"]
for separator in separators:
A_ = separator.join(UpperCAmelCase )
if shortname not in info["reverse_short_param"]:
A_ = shortname
A_ = param_name
return shortname
return param_name
@staticmethod
def __A ( UpperCAmelCase : str , UpperCAmelCase : Any ):
A_ = TrialShortNamer.shortname_for_key(UpperCAmelCase , UpperCAmelCase )
A_ = short_name
A_ = param_name
@classmethod
def __A ( cls : int ):
if cls.NAMING_INFO is not None:
return
A_ = {
"short_word": {},
"reverse_short_word": {},
"short_param": {},
"reverse_short_param": {},
}
A_ = list(cls.DEFAULTS.keys() )
for k in field_keys:
cls.add_new_param_name(UpperCAmelCase , UpperCAmelCase )
A_ = info
@classmethod
def __A ( cls : List[Any] , UpperCAmelCase : Optional[int] ):
cls.build_naming_info()
assert cls.PREFIX is not None
A_ = [copy.copy(cls.PREFIX )]
for k, v in params.items():
if k not in cls.DEFAULTS:
raise Exception(f'''You should provide a default value for the param name {k} with value {v}''' )
if v == cls.DEFAULTS[k]:
# The default value is not added to the name
continue
A_ = cls.NAMING_INFO["short_param"][k]
if isinstance(UpperCAmelCase , UpperCAmelCase ):
A_ = 1 if v else 0
A_ = "" if isinstance(UpperCAmelCase , (int, float) ) else "-"
A_ = f'''{key}{sep}{v}'''
name.append(UpperCAmelCase )
return "_".join(UpperCAmelCase )
@classmethod
def __A ( cls : Tuple , UpperCAmelCase : Optional[Any] ):
A_ = repr[len(cls.PREFIX ) + 1 :]
if repr == "":
A_ = []
else:
A_ = repr.split("_" )
A_ = {}
for value in values:
if "-" in value:
A_ , A_ = value.split("-" )
else:
A_ = re.sub("[0-9.]" , "" , UpperCAmelCase )
A_ = float(re.sub("[^0-9.]" , "" , UpperCAmelCase ) )
A_ = cls.NAMING_INFO["reverse_short_param"][p_k]
A_ = p_v
for k in cls.DEFAULTS:
if k not in parameters:
A_ = cls.DEFAULTS[k]
return parameters | 312 |
import unittest
from typing import Tuple
import torch
from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device
from diffusers.utils.testing_utils import require_torch
@require_torch
class _a :
"""simple docstring"""
@property
def __A ( self : Union[str, Any] ):
return self.get_dummy_input()
@property
def __A ( self : int ):
if self.block_type == "down":
return (4, 32, 16, 16)
elif self.block_type == "mid":
return (4, 32, 32, 32)
elif self.block_type == "up":
return (4, 32, 64, 64)
raise ValueError(f'''\'{self.block_type}\' is not a supported block_type. Set it to \'up\', \'mid\', or \'down\'.''' )
def __A ( self : Union[str, Any] , UpperCAmelCase : List[Any]=True , UpperCAmelCase : str=False , UpperCAmelCase : Tuple=False , UpperCAmelCase : Optional[Any]=False , ):
A_ = 4
A_ = 32
A_ = (32, 32)
A_ = torch.manual_seed(0 )
A_ = torch.device(UpperCAmelCase )
A_ = (batch_size, num_channels) + sizes
A_ = randn_tensor(UpperCAmelCase , generator=UpperCAmelCase , device=UpperCAmelCase )
A_ = {"hidden_states": hidden_states}
if include_temb:
A_ = 128
A_ = randn_tensor((batch_size, temb_channels) , generator=UpperCAmelCase , device=UpperCAmelCase )
if include_res_hidden_states_tuple:
A_ = torch.manual_seed(1 )
A_ = (randn_tensor(UpperCAmelCase , generator=UpperCAmelCase , device=UpperCAmelCase ),)
if include_encoder_hidden_states:
A_ = floats_tensor((batch_size, 32, 32) ).to(UpperCAmelCase )
if include_skip_sample:
A_ = randn_tensor(((batch_size, 3) + sizes) , generator=UpperCAmelCase , device=UpperCAmelCase )
return dummy_input
def __A ( self : Optional[int] ):
A_ = {
"in_channels": 32,
"out_channels": 32,
"temb_channels": 128,
}
if self.block_type == "up":
A_ = 32
if self.block_type == "mid":
init_dict.pop("out_channels" )
A_ = self.dummy_input
return init_dict, inputs_dict
def __A ( self : List[str] , UpperCAmelCase : Optional[Any] ):
A_ , A_ = self.prepare_init_args_and_inputs_for_common()
A_ = self.block_class(**UpperCAmelCase )
unet_block.to(UpperCAmelCase )
unet_block.eval()
with torch.no_grad():
A_ = unet_block(**UpperCAmelCase )
if isinstance(UpperCAmelCase , UpperCAmelCase ):
A_ = output[0]
self.assertEqual(output.shape , self.output_shape )
A_ = output[0, -1, -3:, -3:]
A_ = torch.tensor(UpperCAmelCase ).to(UpperCAmelCase )
assert torch_all_close(output_slice.flatten() , UpperCAmelCase , atol=5E-3 )
@unittest.skipIf(torch_device == "mps" , "Training is not supported in mps" )
def __A ( self : Union[str, Any] ):
A_ , A_ = self.prepare_init_args_and_inputs_for_common()
A_ = self.block_class(**UpperCAmelCase )
model.to(UpperCAmelCase )
model.train()
A_ = model(**UpperCAmelCase )
if isinstance(UpperCAmelCase , UpperCAmelCase ):
A_ = output[0]
A_ = torch.device(UpperCAmelCase )
A_ = randn_tensor(output.shape , device=UpperCAmelCase )
A_ = torch.nn.functional.mse_loss(UpperCAmelCase , UpperCAmelCase )
loss.backward() | 312 | 1 |
import importlib
import os
import sys
# This is required to make the module import works (when the python process is running from the root of the repo)
sys.path.append(""".""")
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> List[str]:
lowerCamelCase__ : Any = test_file.split(os.path.sep )
if components[0:2] != ["tests", "models"]:
raise ValueError(
'`test_file` should start with `tests/models/` (with `/` being the OS specific path separator). Got '
F"""{test_file} instead.""" )
lowerCamelCase__ : Union[str, Any] = components[-1]
if not test_fn.endswith('py' ):
raise ValueError(F"""`test_file` should be a python file. Got {test_fn} instead.""" )
if not test_fn.startswith('test_modeling_' ):
raise ValueError(
F"""`test_file` should point to a file name of the form `test_modeling_*.py`. Got {test_fn} instead.""" )
lowerCamelCase__ : Optional[Any] = components[:-1] + [test_fn.replace('.py' , '' )]
lowerCamelCase__ : Optional[int] = '.'.join(a__ )
return test_module_path
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> Union[str, Any]:
lowerCamelCase__ : Tuple = get_module_path(a__ )
lowerCamelCase__ : int = importlib.import_module(a__ )
return test_module
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> Tuple:
lowerCamelCase__ : int = []
lowerCamelCase__ : Optional[Any] = get_test_module(a__ )
for attr in dir(a__ ):
if attr.endswith('ModelTester' ):
tester_classes.append(getattr(a__ , a__ ) )
# sort with class names
return sorted(a__ , key=lambda _UpperCAmelCase : x.__name__ )
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> int:
lowerCamelCase__ : Any = []
lowerCamelCase__ : Any = get_test_module(a__ )
for attr in dir(a__ ):
lowerCamelCase__ : Any = getattr(a__ , a__ )
# (TF/Flax)ModelTesterMixin is also an attribute in specific model test module. Let's exclude them by checking
# `all_model_classes` is not empty (which also excludes other special classes).
lowerCamelCase__ : Any = getattr(a__ , 'all_model_classes' , [] )
if len(a__ ) > 0:
test_classes.append(a__ )
# sort with class names
return sorted(a__ , key=lambda _UpperCAmelCase : x.__name__ )
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> Union[str, Any]:
lowerCamelCase__ : int = get_test_classes(a__ )
lowerCamelCase__ : List[Any] = set()
for test_class in test_classes:
model_classes.update(test_class.all_model_classes )
# sort with class names
return sorted(a__ , key=lambda _UpperCAmelCase : x.__name__ )
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> Optional[Any]:
lowerCamelCase__ : Optional[int] = test_class()
if hasattr(a__ , 'setUp' ):
test.setUp()
lowerCamelCase__ : int = None
if hasattr(a__ , 'model_tester' ):
# `(TF/Flax)ModelTesterMixin` has this attribute default to `None`. Let's skip this case.
if test.model_tester is not None:
lowerCamelCase__ : List[str] = test.model_tester.__class__
return model_tester
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase ) -> int:
lowerCamelCase__ : Tuple = get_test_classes(a__ )
lowerCamelCase__ : Dict = []
for test_class in test_classes:
if model_class in test_class.all_model_classes:
target_test_classes.append(a__ )
# sort with class names
return sorted(a__ , key=lambda _UpperCAmelCase : x.__name__ )
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase ) -> List[str]:
lowerCamelCase__ : Optional[Any] = get_test_classes_for_model(a__ , a__ )
lowerCamelCase__ : Tuple = []
for test_class in test_classes:
lowerCamelCase__ : Tuple = get_model_tester_from_test_class(a__ )
if tester_class is not None:
tester_classes.append(a__ )
# sort with class names
return sorted(a__ , key=lambda _UpperCAmelCase : x.__name__ )
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> List[str]:
lowerCamelCase__ : int = get_test_classes(a__ )
lowerCamelCase__ : Optional[Any] = {test_class: get_model_tester_from_test_class(a__ ) for test_class in test_classes}
return test_tester_mapping
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> List[Any]:
lowerCamelCase__ : Any = get_model_classes(a__ )
lowerCamelCase__ : Optional[Any] = {
model_class: get_test_classes_for_model(a__ , a__ ) for model_class in model_classes
}
return model_test_mapping
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> Optional[Any]:
lowerCamelCase__ : Tuple = get_model_classes(a__ )
lowerCamelCase__ : Optional[Any] = {
model_class: get_tester_classes_for_model(a__ , a__ ) for model_class in model_classes
}
return model_to_tester_mapping
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> List[str]:
if isinstance(a__ , a__ ):
return o
elif isinstance(a__ , a__ ):
return o.__name__
elif isinstance(a__ , (list, tuple) ):
return [to_json(a__ ) for x in o]
elif isinstance(a__ , a__ ):
return {to_json(a__ ): to_json(a__ ) for k, v in o.items()}
else:
return o
| 359 |
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_UpperCAmelCase : List[Any] = {
"""configuration_trajectory_transformer""": [
"""TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""TrajectoryTransformerConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : List[str] = [
"""TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TrajectoryTransformerModel""",
"""TrajectoryTransformerPreTrainedModel""",
"""load_tf_weights_in_trajectory_transformer""",
]
if TYPE_CHECKING:
from .configuration_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TrajectoryTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TrajectoryTransformerModel,
TrajectoryTransformerPreTrainedModel,
load_tf_weights_in_trajectory_transformer,
)
else:
import sys
_UpperCAmelCase : List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 45 | 0 |
from __future__ import annotations
def lowerCAmelCase ( lowerCAmelCase_ )-> Any:
# preprocessing the first row
for i in range(1 , len(matrix[0] ) ):
matrix[0][i] += matrix[0][i - 1]
# preprocessing the first column
for i in range(1 , len(_a ) ):
matrix[i][0] += matrix[i - 1][0]
# updating the path cost for current position
for i in range(1 , len(_a ) ):
for j in range(1 , len(matrix[0] ) ):
matrix[i][j] += min(matrix[i - 1][j] , matrix[i][j - 1] )
return matrix[-1][-1]
if __name__ == "__main__":
import doctest
doctest.testmod() | 262 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ : int = logging.get_logger(__name__)
lowercase__ : List[Any] = {
'''EleutherAI/gpt-neox-20b''': '''https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/config.json''',
# See all GPTNeoX models at https://huggingface.co/models?filter=gpt_neox
}
class _UpperCAmelCase ( lowerCAmelCase__):
_lowerCAmelCase : List[Any] = """gpt_neox"""
def __init__( self : List[str] , lowercase_ : str=50432 , lowercase_ : List[Any]=6144 , lowercase_ : List[Any]=44 , lowercase_ : Union[str, Any]=64 , lowercase_ : List[str]=24576 , lowercase_ : List[Any]="gelu" , lowercase_ : str=0.25 , lowercase_ : Optional[int]=10000 , lowercase_ : Optional[int]=0.0 , lowercase_ : Optional[int]=0.0 , lowercase_ : int=0.1 , lowercase_ : Tuple=2048 , lowercase_ : Union[str, Any]=0.02 , lowercase_ : List[str]=1E-5 , lowercase_ : str=True , lowercase_ : str=0 , lowercase_ : Union[str, Any]=2 , lowercase_ : List[str]=False , lowercase_ : Optional[int]=True , lowercase_ : List[Any]=None , **lowercase_ : Optional[int] , ):
super().__init__(bos_token_id=lowercase_ , eos_token_id=lowercase_ , **lowercase_ )
snake_case_ : List[str] = vocab_size
snake_case_ : Optional[Any] = max_position_embeddings
snake_case_ : str = hidden_size
snake_case_ : Dict = num_hidden_layers
snake_case_ : Dict = num_attention_heads
snake_case_ : List[Any] = intermediate_size
snake_case_ : List[Any] = hidden_act
snake_case_ : str = rotary_pct
snake_case_ : Dict = rotary_emb_base
snake_case_ : Optional[int] = attention_dropout
snake_case_ : Tuple = hidden_dropout
snake_case_ : Tuple = classifier_dropout
snake_case_ : List[str] = initializer_range
snake_case_ : Union[str, Any] = layer_norm_eps
snake_case_ : Any = use_cache
snake_case_ : Optional[int] = tie_word_embeddings
snake_case_ : Any = use_parallel_residual
snake_case_ : Union[str, Any] = rope_scaling
self._rope_scaling_validation()
if self.hidden_size % self.num_attention_heads != 0:
raise ValueError(
'''The hidden size is not divisble by the number of attention heads! Make sure to update them!''' )
def _snake_case ( self : Optional[int] ):
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , lowercase_ ) or len(self.rope_scaling ) != 2:
raise ValueError(
'''`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '''
f"got {self.rope_scaling}" )
snake_case_ : Any = self.rope_scaling.get('''type''' , lowercase_ )
snake_case_ : Union[str, Any] = self.rope_scaling.get('''factor''' , lowercase_ )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f"`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}" )
if rope_scaling_factor is None or not isinstance(lowercase_ , lowercase_ ) or rope_scaling_factor <= 1.0:
raise ValueError(f"`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}" )
| 264 | 0 |
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> str:
if number > 0:
raise ValueError("""input must be a negative integer""" )
snake_case : Tuple = len(bin(a__ )[3:] )
snake_case : int = bin(abs(a__ ) - (1 << binary_number_length) )[3:]
snake_case : Dict = (
(
"""1"""
+ """0""" * (binary_number_length - len(a__ ))
+ twos_complement_number
)
if number < 0
else """0"""
)
return "0b" + twos_complement_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 360 |
from io import BytesIO
from typing import List, Union
import requests
from ..utils import add_end_docstrings, is_decord_available, is_torch_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_decord_available():
import numpy as np
from decord import VideoReader
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
lowerCamelCase : int = logging.get_logger(__name__)
@add_end_docstrings(UpperCamelCase__ )
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
def __init__( self , *A , **A ) -> Optional[int]:
super().__init__(*A , **A )
requires_backends(self , """decord""" )
self.check_model_type(A )
def UpperCAmelCase ( self , A=None , A=None , A=None ) -> int:
snake_case : Any = {}
if frame_sampling_rate is not None:
snake_case : int = frame_sampling_rate
if num_frames is not None:
snake_case : Union[str, Any] = num_frames
snake_case : str = {}
if top_k is not None:
snake_case : Optional[int] = top_k
return preprocess_params, {}, postprocess_params
def __call__( self , A , **A ) -> Dict:
return super().__call__(A , **A )
def UpperCAmelCase ( self , A , A=None , A=1 ) -> Tuple:
if num_frames is None:
snake_case : Tuple = self.model.config.num_frames
if video.startswith("""http://""" ) or video.startswith("""https://""" ):
snake_case : Optional[int] = BytesIO(requests.get(A ).content )
snake_case : Optional[Any] = VideoReader(A )
videoreader.seek(0 )
snake_case : Optional[Any] = 0
snake_case : Optional[Any] = num_frames * frame_sampling_rate - 1
snake_case : Any = np.linspace(A , A , num=A , dtype=np.intaa )
snake_case : int = videoreader.get_batch(A ).asnumpy()
snake_case : List[Any] = list(A )
snake_case : Optional[Any] = self.image_processor(A , return_tensors=self.framework )
return model_inputs
def UpperCAmelCase ( self , A ) -> List[str]:
snake_case : Dict = self.model(**A )
return model_outputs
def UpperCAmelCase ( self , A , A=5 ) -> int:
if top_k > self.model.config.num_labels:
snake_case : str = self.model.config.num_labels
if self.framework == "pt":
snake_case : List[Any] = model_outputs.logits.softmax(-1 )[0]
snake_case , snake_case : Tuple = probs.topk(A )
else:
raise ValueError(f"""Unsupported framework: {self.framework}""" )
snake_case : List[Any] = scores.tolist()
snake_case : str = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(A , A )]
| 176 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase : Any = logging.get_logger(__name__)
lowercase : str = {
"google/mobilenet_v1_1.0_224": "https://huggingface.co/google/mobilenet_v1_1.0_224/resolve/main/config.json",
"google/mobilenet_v1_0.75_192": "https://huggingface.co/google/mobilenet_v1_0.75_192/resolve/main/config.json",
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
}
class __UpperCAmelCase ( _lowerCamelCase ):
__lowercase = """mobilenet_v1"""
def __init__( self , lowerCAmelCase_=3 , lowerCAmelCase_=2_24 , lowerCAmelCase_=1.0 , lowerCAmelCase_=8 , lowerCAmelCase_="relu6" , lowerCAmelCase_=True , lowerCAmelCase_=0.999 , lowerCAmelCase_=0.02 , lowerCAmelCase_=0.001 , **lowerCAmelCase_ , ):
"""simple docstring"""
super().__init__(**lowerCAmelCase_ )
if depth_multiplier <= 0:
raise ValueError('depth_multiplier must be greater than zero.' )
_snake_case = num_channels
_snake_case = image_size
_snake_case = depth_multiplier
_snake_case = min_depth
_snake_case = hidden_act
_snake_case = tf_padding
_snake_case = classifier_dropout_prob
_snake_case = initializer_range
_snake_case = layer_norm_eps
class __UpperCAmelCase ( _lowerCamelCase ):
__lowercase = version.parse("""1.11""" )
@property
def lowerCamelCase ( self ):
"""simple docstring"""
return OrderedDict([('pixel_values', {0: 'batch'})] )
@property
def lowerCamelCase ( self ):
"""simple docstring"""
if self.task == "image-classification":
return OrderedDict([('logits', {0: 'batch'})] )
else:
return OrderedDict([('last_hidden_state', {0: 'batch'}), ('pooler_output', {0: 'batch'})] )
@property
def lowerCamelCase ( self ):
"""simple docstring"""
return 1E-4
| 42 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from .utils import PaddingStrategy, TensorType, is_tf_tensor, is_torch_tensor, logging, to_numpy
snake_case_ : Dict = logging.get_logger(__name__)
class lowercase__ ( lowercase ):
def __init__( self : List[Any] ,lowerCamelCase__ : int ,lowerCamelCase__ : int ,lowerCamelCase__ : float ,**lowerCamelCase__ : int ):
'''simple docstring'''
_UpperCamelCase : List[Any] = feature_size
_UpperCamelCase : Any = sampling_rate
_UpperCamelCase : Optional[Any] = padding_value
_UpperCamelCase : Union[str, Any] = kwargs.pop('padding_side' ,'right' )
_UpperCamelCase : Dict = kwargs.pop('return_attention_mask' ,lowerCamelCase__ )
super().__init__(**lowerCamelCase__ )
def UpperCamelCase_ ( self : Optional[Any] ,lowerCamelCase__ : Union[
BatchFeature,
List[BatchFeature],
Dict[str, BatchFeature],
Dict[str, List[BatchFeature]],
List[Dict[str, BatchFeature]],
] ,lowerCamelCase__ : Union[bool, str, PaddingStrategy] = True ,lowerCamelCase__ : Optional[int] = None ,lowerCamelCase__ : bool = False ,lowerCamelCase__ : Optional[int] = None ,lowerCamelCase__ : Optional[bool] = None ,lowerCamelCase__ : Optional[Union[str, TensorType]] = None ,):
'''simple docstring'''
# If we have a list of dicts, let's convert it in a dict of lists
# We do this to allow using this method as a collate_fn function in PyTorch Dataloader
if isinstance(lowerCamelCase__ ,(list, tuple) ) and isinstance(processed_features[0] ,(dict, BatchFeature) ):
_UpperCamelCase : int = {
key: [example[key] for example in processed_features] for key in processed_features[0].keys()
}
# The model's main input name, usually `input_values`, has be passed for padding
if self.model_input_names[0] not in processed_features:
raise ValueError(
'You should supply an instance of `transformers.BatchFeature` or list of `transformers.BatchFeature`'
F' to this method that includes {self.model_input_names[0]}, but you provided'
F' {list(processed_features.keys() )}' )
_UpperCamelCase : List[Any] = processed_features[self.model_input_names[0]]
_UpperCamelCase : Dict = (
return_attention_mask if return_attention_mask is not None else self.return_attention_mask
)
if len(lowerCamelCase__ ) == 0:
if return_attention_mask:
_UpperCamelCase : Union[str, Any] = []
return processed_features
# If we have PyTorch/TF tensors or lists as inputs, we cast them as Numpy arrays
# and rebuild them afterwards if no return_tensors is specified
# Note that we lose the specific device the tensor may be on for PyTorch
_UpperCamelCase : List[str] = required_input[0]
if isinstance(lowerCamelCase__ ,(list, tuple) ):
# first_element might be an empty list/tuple in some edge cases so we grab the first non empty element.
_UpperCamelCase : List[str] = 0
while len(required_input[index] ) == 0:
index += 1
if index < len(lowerCamelCase__ ):
_UpperCamelCase : Dict = required_input[index][0]
if return_tensors is None:
if is_tf_tensor(lowerCamelCase__ ):
_UpperCamelCase : Any = 'tf'
elif is_torch_tensor(lowerCamelCase__ ):
_UpperCamelCase : Optional[int] = 'pt'
elif isinstance(lowerCamelCase__ ,(int, float, list, tuple, np.ndarray) ):
_UpperCamelCase : int = 'np'
else:
raise ValueError(
F'type of {first_element} unknown: {type(lowerCamelCase__ )}. '
'Should be one of a python, numpy, pytorch or tensorflow object.' )
for key, value in processed_features.items():
if isinstance(value[0] ,(int, float) ):
_UpperCamelCase : Any = to_numpy(lowerCamelCase__ )
else:
_UpperCamelCase : Any = [to_numpy(lowerCamelCase__ ) for v in value]
# Convert padding_strategy in PaddingStrategy
_UpperCamelCase : Optional[int] = self._get_padding_strategies(padding=lowerCamelCase__ ,max_length=lowerCamelCase__ )
_UpperCamelCase : str = processed_features[self.model_input_names[0]]
_UpperCamelCase : List[str] = len(lowerCamelCase__ )
if not all(len(lowerCamelCase__ ) == batch_size for v in processed_features.values() ):
raise ValueError('Some items in the output dictionary have a different batch size than others.' )
_UpperCamelCase : List[str] = []
for i in range(lowerCamelCase__ ):
_UpperCamelCase : List[str] = {k: v[i] for k, v in processed_features.items()}
# truncation
_UpperCamelCase : List[str] = self._truncate(
lowerCamelCase__ ,max_length=lowerCamelCase__ ,pad_to_multiple_of=lowerCamelCase__ ,truncation=lowerCamelCase__ ,)
truncated_inputs.append(lowerCamelCase__ )
if padding_strategy == PaddingStrategy.LONGEST:
# make sure that `max_length` cannot be longer than the longest truncated length
_UpperCamelCase : Union[str, Any] = max(len(input_slice[self.model_input_names[0]] ) for input_slice in truncated_inputs )
_UpperCamelCase : Any = PaddingStrategy.MAX_LENGTH
_UpperCamelCase : Optional[Any] = {}
for i in range(lowerCamelCase__ ):
# padding
_UpperCamelCase : Any = self._pad(
truncated_inputs[i] ,max_length=lowerCamelCase__ ,padding_strategy=lowerCamelCase__ ,pad_to_multiple_of=lowerCamelCase__ ,return_attention_mask=lowerCamelCase__ ,)
for key, value in outputs.items():
if key not in batch_outputs:
_UpperCamelCase : Dict = []
if value.dtype is np.dtype(np.floataa ):
_UpperCamelCase : Any = value.astype(np.floataa )
batch_outputs[key].append(lowerCamelCase__ )
return BatchFeature(lowerCamelCase__ ,tensor_type=lowerCamelCase__ )
def UpperCamelCase_ ( self : Any ,lowerCamelCase__ : Union[Dict[str, np.ndarray], BatchFeature] ,lowerCamelCase__ : Optional[int] = None ,lowerCamelCase__ : PaddingStrategy = PaddingStrategy.DO_NOT_PAD ,lowerCamelCase__ : Optional[int] = None ,lowerCamelCase__ : Optional[bool] = None ,):
'''simple docstring'''
_UpperCamelCase : Union[str, Any] = processed_features[self.model_input_names[0]]
if padding_strategy == PaddingStrategy.LONGEST:
_UpperCamelCase : Optional[Any] = len(lowerCamelCase__ )
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
_UpperCamelCase : str = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
_UpperCamelCase : str = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(lowerCamelCase__ ) < max_length
if return_attention_mask and "attention_mask" not in processed_features:
_UpperCamelCase : Tuple = np.ones(len(lowerCamelCase__ ) ,dtype=np.intaa )
if needs_to_be_padded:
_UpperCamelCase : Dict = max_length - len(lowerCamelCase__ )
if self.padding_side == "right":
if return_attention_mask:
_UpperCamelCase : Optional[int] = np.pad(
processed_features['attention_mask'] ,(0, difference) )
_UpperCamelCase : Union[str, Any] = ((0, difference), (0, 0)) if self.feature_size > 1 else (0, difference)
_UpperCamelCase : List[Any] = np.pad(
lowerCamelCase__ ,lowerCamelCase__ ,'constant' ,constant_values=self.padding_value )
elif self.padding_side == "left":
if return_attention_mask:
_UpperCamelCase : List[Any] = np.pad(
processed_features['attention_mask'] ,(difference, 0) )
_UpperCamelCase : List[Any] = ((difference, 0), (0, 0)) if self.feature_size > 1 else (difference, 0)
_UpperCamelCase : List[str] = np.pad(
lowerCamelCase__ ,lowerCamelCase__ ,'constant' ,constant_values=self.padding_value )
else:
raise ValueError('Invalid padding strategy:' + str(self.padding_side ) )
return processed_features
def UpperCamelCase_ ( self : Tuple ,lowerCamelCase__ : Union[Dict[str, np.ndarray], BatchFeature] ,lowerCamelCase__ : Optional[int] = None ,lowerCamelCase__ : Optional[int] = None ,lowerCamelCase__ : Optional[bool] = None ,):
'''simple docstring'''
if not truncation:
return processed_features
elif truncation and max_length is None:
raise ValueError('When setting ``truncation=True``, make sure that ``max_length`` is defined.' )
_UpperCamelCase : int = processed_features[self.model_input_names[0]]
# find `max_length` that fits `pad_to_multiple_of`
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
_UpperCamelCase : Optional[Any] = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
_UpperCamelCase : Optional[int] = len(lowerCamelCase__ ) > max_length
if needs_to_be_truncated:
_UpperCamelCase : Dict = processed_features[self.model_input_names[0]][:max_length]
if "attention_mask" in processed_features:
_UpperCamelCase : Optional[Any] = processed_features['attention_mask'][:max_length]
return processed_features
def UpperCamelCase_ ( self : Tuple ,lowerCamelCase__ : int=False ,lowerCamelCase__ : Optional[Any]=None ):
'''simple docstring'''
# Get padding strategy
if padding is not False:
if padding is True:
_UpperCamelCase : Optional[Any] = PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch
elif not isinstance(lowerCamelCase__ ,lowerCamelCase__ ):
_UpperCamelCase : Tuple = PaddingStrategy(lowerCamelCase__ )
elif isinstance(lowerCamelCase__ ,lowerCamelCase__ ):
_UpperCamelCase : Union[str, Any] = padding
else:
_UpperCamelCase : List[Any] = PaddingStrategy.DO_NOT_PAD
# Set max length if needed
if max_length is None:
if padding_strategy == PaddingStrategy.MAX_LENGTH:
raise ValueError(
F'When setting ``padding={PaddingStrategy.MAX_LENGTH}``, make sure that max_length is defined' )
# Test if we have a padding value
if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.padding_value is None):
raise ValueError(
'Asking to pad but the feature_extractor does not have a padding value. Please select a value to use'
' as `padding_value`. For example: `feature_extractor.padding_value = 0.0`.' )
return padding_strategy
| 83 | 0 |
import argparse
import json
import logging
import os
import sys
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, get_gpu_count, slow
SCREAMING_SNAKE_CASE_ = [
os.path.join(os.path.dirname(__file__), dirname)
for dirname in [
'text-classification',
'language-modeling',
'summarization',
'token-classification',
'question-answering',
]
]
sys.path.extend(SRC_DIRS)
if SRC_DIRS is not None:
import run_clm_flax
import run_flax_glue
import run_flax_ner
import run_mlm_flax
import run_qa
import run_summarization_flax
import run_ta_mlm_flax
logging.basicConfig(level=logging.DEBUG)
SCREAMING_SNAKE_CASE_ = logging.getLogger()
def __SCREAMING_SNAKE_CASE ( ) -> Dict:
_UpperCAmelCase : str = argparse.ArgumentParser()
parser.add_argument("-f" )
_UpperCAmelCase : int = parser.parse_args()
return args.f
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: int , lowerCAmelCase: Dict="eval" ) -> Union[str, Any]:
_UpperCAmelCase : Dict = os.path.join(lowerCAmelCase , F'{split}_results.json' )
if os.path.exists(lowerCAmelCase ):
with open(lowerCAmelCase , "r" ) as f:
return json.load(lowerCAmelCase )
raise ValueError(F'can\'t find {path}' )
SCREAMING_SNAKE_CASE_ = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class a ( UpperCAmelCase ):
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : Optional[int] = self.get_auto_remove_tmp_dir()
_UpperCAmelCase : Tuple = f'\n run_glue.py\n --model_name_or_path distilbert-base-uncased\n --output_dir {tmp_dir}\n --train_file ./tests/fixtures/tests_samples/MRPC/train.csv\n --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --learning_rate=1e-4\n --eval_steps=2\n --warmup_steps=2\n --seed=42\n --max_seq_length=128\n '.split()
with patch.object(A_ , "argv" , A_ ):
run_flax_glue.main()
_UpperCAmelCase : Union[str, Any] = get_results(A_ )
self.assertGreaterEqual(result["eval_accuracy"] , 0.75 )
@slow
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = self.get_auto_remove_tmp_dir()
_UpperCAmelCase : Optional[Any] = f'\n run_clm_flax.py\n --model_name_or_path distilgpt2\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --do_train\n --do_eval\n --block_size 128\n --per_device_train_batch_size 4\n --per_device_eval_batch_size 4\n --num_train_epochs 2\n --logging_steps 2 --eval_steps 2\n --output_dir {tmp_dir}\n --overwrite_output_dir\n '.split()
with patch.object(A_ , "argv" , A_ ):
run_clm_flax.main()
_UpperCAmelCase : Any = get_results(A_ )
self.assertLess(result["eval_perplexity"] , 100 )
@slow
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : Dict = self.get_auto_remove_tmp_dir()
_UpperCAmelCase : List[Any] = f'\n run_summarization.py\n --model_name_or_path t5-small\n --train_file tests/fixtures/tests_samples/xsum/sample.json\n --validation_file tests/fixtures/tests_samples/xsum/sample.json\n --test_file tests/fixtures/tests_samples/xsum/sample.json\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --num_train_epochs=3\n --warmup_steps=8\n --do_train\n --do_eval\n --do_predict\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --predict_with_generate\n '.split()
with patch.object(A_ , "argv" , A_ ):
run_summarization_flax.main()
_UpperCAmelCase : Optional[Any] = get_results(A_ , split="test" )
self.assertGreaterEqual(result["test_rouge1"] , 10 )
self.assertGreaterEqual(result["test_rouge2"] , 2 )
self.assertGreaterEqual(result["test_rougeL"] , 7 )
self.assertGreaterEqual(result["test_rougeLsum"] , 7 )
@slow
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : List[Any] = self.get_auto_remove_tmp_dir()
_UpperCAmelCase : Optional[Any] = f'\n run_mlm.py\n --model_name_or_path distilroberta-base\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --max_seq_length 128\n --per_device_train_batch_size 4\n --per_device_eval_batch_size 4\n --logging_steps 2 --eval_steps 2\n --do_train\n --do_eval\n --num_train_epochs=1\n '.split()
with patch.object(A_ , "argv" , A_ ):
run_mlm_flax.main()
_UpperCAmelCase : Tuple = get_results(A_ )
self.assertLess(result["eval_perplexity"] , 42 )
@slow
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : Dict = self.get_auto_remove_tmp_dir()
_UpperCAmelCase : Union[str, Any] = f'\n run_t5_mlm_flax.py\n --model_name_or_path t5-small\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --do_train\n --do_eval\n --max_seq_length 128\n --per_device_train_batch_size 4\n --per_device_eval_batch_size 4\n --num_train_epochs 2\n --logging_steps 2 --eval_steps 2\n --output_dir {tmp_dir}\n --overwrite_output_dir\n '.split()
with patch.object(A_ , "argv" , A_ ):
run_ta_mlm_flax.main()
_UpperCAmelCase : List[Any] = get_results(A_ )
self.assertGreaterEqual(result["eval_accuracy"] , 0.42 )
@slow
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : Tuple = 7 if get_gpu_count() > 1 else 2
_UpperCAmelCase : int = self.get_auto_remove_tmp_dir()
_UpperCAmelCase : Tuple = f'\n run_flax_ner.py\n --model_name_or_path bert-base-uncased\n --train_file tests/fixtures/tests_samples/conll/sample.json\n --validation_file tests/fixtures/tests_samples/conll/sample.json\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --do_train\n --do_eval\n --warmup_steps=2\n --learning_rate=2e-4\n --logging_steps 2 --eval_steps 2\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=2\n --num_train_epochs={epochs}\n --seed 7\n '.split()
with patch.object(A_ , "argv" , A_ ):
run_flax_ner.main()
_UpperCAmelCase : Tuple = get_results(A_ )
self.assertGreaterEqual(result["eval_accuracy"] , 0.75 )
self.assertGreaterEqual(result["eval_f1"] , 0.3 )
@slow
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : List[str] = self.get_auto_remove_tmp_dir()
_UpperCAmelCase : str = f'\n run_qa.py\n --model_name_or_path bert-base-uncased\n --version_2_with_negative\n --train_file tests/fixtures/tests_samples/SQUAD/sample.json\n --validation_file tests/fixtures/tests_samples/SQUAD/sample.json\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --num_train_epochs=3\n --warmup_steps=2\n --do_train\n --do_eval\n --logging_steps 2 --eval_steps 2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n '.split()
with patch.object(A_ , "argv" , A_ ):
run_qa.main()
_UpperCAmelCase : Union[str, Any] = get_results(A_ )
self.assertGreaterEqual(result["eval_f1"] , 30 )
self.assertGreaterEqual(result["eval_exact"] , 30 )
| 367 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import is_speech_available, is_vision_available
from transformers.testing_utils import require_torch
if is_vision_available():
from transformers import TvltImageProcessor
if is_speech_available():
from transformers import TvltFeatureExtractor
from transformers import TvltProcessor
@require_torch
class a ( unittest.TestCase ):
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : int = "ZinengTang/tvlt-base"
_UpperCAmelCase : int = tempfile.mkdtemp()
def _UpperCAmelCase ( self , **A_ ):
'''simple docstring'''
return TvltImageProcessor.from_pretrained(self.checkpoint , **A_ )
def _UpperCAmelCase ( self , **A_ ):
'''simple docstring'''
return TvltFeatureExtractor.from_pretrained(self.checkpoint , **A_ )
def _UpperCAmelCase ( self ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : Dict = self.get_image_processor()
_UpperCAmelCase : Optional[int] = self.get_feature_extractor()
_UpperCAmelCase : Tuple = TvltProcessor(image_processor=A_ , feature_extractor=A_ )
processor.save_pretrained(self.tmpdirname )
_UpperCAmelCase : str = TvltProcessor.from_pretrained(self.tmpdirname )
self.assertIsInstance(processor.feature_extractor , A_ )
self.assertIsInstance(processor.image_processor , A_ )
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : List[Any] = self.get_image_processor()
_UpperCAmelCase : int = self.get_feature_extractor()
_UpperCAmelCase : Tuple = TvltProcessor(image_processor=A_ , feature_extractor=A_ )
_UpperCAmelCase : List[str] = np.ones([12000] )
_UpperCAmelCase : int = feature_extractor(A_ , return_tensors="np" )
_UpperCAmelCase : int = processor(audio=A_ , return_tensors="np" )
for key in audio_dict.keys():
self.assertAlmostEqual(audio_dict[key].sum() , input_processor[key].sum() , delta=1e-2 )
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : List[str] = self.get_image_processor()
_UpperCAmelCase : int = self.get_feature_extractor()
_UpperCAmelCase : Optional[Any] = TvltProcessor(image_processor=A_ , feature_extractor=A_ )
_UpperCAmelCase : Union[str, Any] = np.ones([3, 224, 224] )
_UpperCAmelCase : Tuple = image_processor(A_ , return_tensors="np" )
_UpperCAmelCase : List[str] = processor(images=A_ , return_tensors="np" )
for key in image_dict.keys():
self.assertAlmostEqual(image_dict[key].sum() , input_processor[key].sum() , delta=1e-2 )
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : List[str] = self.get_image_processor()
_UpperCAmelCase : Any = self.get_feature_extractor()
_UpperCAmelCase : Dict = TvltProcessor(image_processor=A_ , feature_extractor=A_ )
_UpperCAmelCase : str = np.ones([12000] )
_UpperCAmelCase : Optional[Any] = np.ones([3, 224, 224] )
_UpperCAmelCase : List[Any] = processor(audio=A_ , images=A_ )
self.assertListEqual(list(inputs.keys() ) , ["audio_values", "audio_mask", "pixel_values", "pixel_mask"] )
# test if it raises when no input is passed
with pytest.raises(A_ ):
processor()
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = self.get_image_processor()
_UpperCAmelCase : int = self.get_feature_extractor()
_UpperCAmelCase : str = TvltProcessor(image_processor=A_ , feature_extractor=A_ )
self.assertListEqual(
processor.model_input_names , image_processor.model_input_names + feature_extractor.model_input_names , msg="`processor` and `image_processor`+`feature_extractor` model input names do not match" , )
| 189 | 0 |
import importlib.metadata
import operator
import re
import sys
from typing import Optional
from packaging import version
__lowerCAmelCase : Union[str, Any] = {
'<': operator.lt,
'<=': operator.le,
'==': operator.eq,
'!=': operator.ne,
'>=': operator.ge,
'>': operator.gt,
}
def __magic_name__ ( A : List[Any], A : List[Any], A : str, A : List[str], A : Optional[Any], A : Union[str, Any] ):
'''simple docstring'''
if got_ver is None or want_ver is None:
raise ValueError(
F"""Unable to compare versions for {requirement}: need={want_ver} found={got_ver}. This is unusual. Consider"""
F""" reinstalling {pkg}.""" )
if not ops[op](version.parse(A ), version.parse(A ) ):
raise ImportError(
F"""{requirement} is required for a normal functioning of this module, but found {pkg}=={got_ver}.{hint}""" )
def __magic_name__ ( A : str, A : Optional[str] = None ):
'''simple docstring'''
a = F"""\n{hint}""" if hint is not None else ""
# non-versioned check
if re.match(R"^[\w_\-\d]+$", A ):
a , a , a = requirement, None, None
else:
a = re.findall(R"^([^!=<>\s]+)([\s!=<>]{1,2}.+)", A )
if not match:
raise ValueError(
"requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23, but"
F""" got {requirement}""" )
a , a = match[0]
a = want_full.split("," ) # there could be multiple requirements
a = {}
for w in want_range:
a = re.findall(R"^([\s!=<>]{1,2})(.+)", A )
if not match:
raise ValueError(
"requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23,"
F""" but got {requirement}""" )
a , a = match[0]
a = want_ver
if op not in ops:
raise ValueError(F"""{requirement}: need one of {list(ops.keys() )}, but got {op}""" )
# special case
if pkg == "python":
a = ".".join([str(A ) for x in sys.version_info[:3]] )
for op, want_ver in wanted.items():
_compare_versions(A, A, A, A, A, A )
return
# check if any version is installed
try:
a = importlib.metadata.version(A )
except importlib.metadata.PackageNotFoundError:
raise importlib.metadata.PackageNotFoundError(
F"""The '{requirement}' distribution was not found and is required by this application. {hint}""" )
# check that the right version is installed if version number or a range was provided
if want_ver is not None:
for op, want_ver in wanted.items():
_compare_versions(A, A, A, A, A, A )
def __magic_name__ ( A : Optional[int] ):
'''simple docstring'''
a = "Try: pip install transformers -U or pip install -e '.[dev]' if you're working with git main"
return require_version(A, A )
| 107 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__a = {
"configuration_timesformer": ["TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "TimesformerConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
"TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TimesformerModel",
"TimesformerForVideoClassification",
"TimesformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timesformer import (
TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimesformerForVideoClassification,
TimesformerModel,
TimesformerPreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 35 | 0 |
import argparse
import os
import jax as jnp
import numpy as onp
import torch
import torch.nn as nn
from music_spectrogram_diffusion import inference
from tax import checkpoints
from diffusers import DDPMScheduler, OnnxRuntimeModel, SpectrogramDiffusionPipeline
from diffusers.pipelines.spectrogram_diffusion import SpectrogramContEncoder, SpectrogramNotesEncoder, TaFilmDecoder
SCREAMING_SNAKE_CASE : Any = "base_with_context"
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> Optional[int]:
_lowercase : Any = nn.Parameter(torch.FloatTensor(weights['token_embedder']['embedding'] ) )
_lowercase : str = nn.Parameter(
torch.FloatTensor(weights['Embed_0']['embedding'] ) , requires_grad=lowerCamelCase_ )
for lyr_num, lyr in enumerate(model.encoders ):
_lowercase : int = weights[F'''layers_{lyr_num}''']
_lowercase : int = nn.Parameter(
torch.FloatTensor(ly_weight['pre_attention_layer_norm']['scale'] ) )
_lowercase : List[Any] = ly_weight['attention']
_lowercase : Union[str, Any] = nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T ) )
_lowercase : Dict = nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T ) )
_lowercase : Optional[int] = nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T ) )
_lowercase : Optional[Any] = nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T ) )
_lowercase : Dict = nn.Parameter(torch.FloatTensor(ly_weight['pre_mlp_layer_norm']['scale'] ) )
_lowercase : Tuple = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_0']['kernel'].T ) )
_lowercase : Dict = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_1']['kernel'].T ) )
_lowercase : Optional[int] = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wo']['kernel'].T ) )
_lowercase : int = nn.Parameter(torch.FloatTensor(weights['encoder_norm']['scale'] ) )
return model
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> int:
_lowercase : Dict = nn.Parameter(torch.FloatTensor(weights['input_proj']['kernel'].T ) )
_lowercase : Union[str, Any] = nn.Parameter(
torch.FloatTensor(weights['Embed_0']['embedding'] ) , requires_grad=lowerCamelCase_ )
for lyr_num, lyr in enumerate(model.encoders ):
_lowercase : str = weights[F'''layers_{lyr_num}''']
_lowercase : Tuple = ly_weight['attention']
_lowercase : List[str] = nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T ) )
_lowercase : List[Any] = nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T ) )
_lowercase : int = nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T ) )
_lowercase : Tuple = nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T ) )
_lowercase : Tuple = nn.Parameter(
torch.FloatTensor(ly_weight['pre_attention_layer_norm']['scale'] ) )
_lowercase : Optional[Any] = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_0']['kernel'].T ) )
_lowercase : List[str] = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_1']['kernel'].T ) )
_lowercase : Union[str, Any] = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wo']['kernel'].T ) )
_lowercase : List[Any] = nn.Parameter(torch.FloatTensor(ly_weight['pre_mlp_layer_norm']['scale'] ) )
_lowercase : Dict = nn.Parameter(torch.FloatTensor(weights['encoder_norm']['scale'] ) )
return model
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> Optional[int]:
_lowercase : Any = nn.Parameter(torch.FloatTensor(weights['time_emb_dense0']['kernel'].T ) )
_lowercase : str = nn.Parameter(torch.FloatTensor(weights['time_emb_dense1']['kernel'].T ) )
_lowercase : Tuple = nn.Parameter(
torch.FloatTensor(weights['Embed_0']['embedding'] ) , requires_grad=lowerCamelCase_ )
_lowercase : int = nn.Parameter(
torch.FloatTensor(weights['continuous_inputs_projection']['kernel'].T ) )
for lyr_num, lyr in enumerate(model.decoders ):
_lowercase : Union[str, Any] = weights[F'''layers_{lyr_num}''']
_lowercase : Dict = nn.Parameter(
torch.FloatTensor(ly_weight['pre_self_attention_layer_norm']['scale'] ) )
_lowercase : Union[str, Any] = nn.Parameter(
torch.FloatTensor(ly_weight['FiLMLayer_0']['DenseGeneral_0']['kernel'].T ) )
_lowercase : int = ly_weight['self_attention']
_lowercase : List[Any] = nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T ) )
_lowercase : Union[str, Any] = nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T ) )
_lowercase : Optional[int] = nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T ) )
_lowercase : Union[str, Any] = nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T ) )
_lowercase : Optional[int] = ly_weight['MultiHeadDotProductAttention_0']
_lowercase : Tuple = nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T ) )
_lowercase : int = nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T ) )
_lowercase : List[Any] = nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T ) )
_lowercase : List[Any] = nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T ) )
_lowercase : int = nn.Parameter(
torch.FloatTensor(ly_weight['pre_cross_attention_layer_norm']['scale'] ) )
_lowercase : str = nn.Parameter(torch.FloatTensor(ly_weight['pre_mlp_layer_norm']['scale'] ) )
_lowercase : int = nn.Parameter(
torch.FloatTensor(ly_weight['FiLMLayer_1']['DenseGeneral_0']['kernel'].T ) )
_lowercase : Any = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_0']['kernel'].T ) )
_lowercase : str = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_1']['kernel'].T ) )
_lowercase : List[str] = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wo']['kernel'].T ) )
_lowercase : str = nn.Parameter(torch.FloatTensor(weights['decoder_norm']['scale'] ) )
_lowercase : List[Any] = nn.Parameter(torch.FloatTensor(weights['spec_out_dense']['kernel'].T ) )
return model
def UpperCamelCase_( lowerCamelCase_ ) -> List[str]:
_lowercase : Dict = checkpoints.load_tax_checkpoint(args.checkpoint_path )
_lowercase : Any = jnp.tree_util.tree_map(onp.array , lowerCamelCase_ )
_lowercase : Any = [
'from __gin__ import dynamic_registration',
'from music_spectrogram_diffusion.models.diffusion import diffusion_utils',
'diffusion_utils.ClassifierFreeGuidanceConfig.eval_condition_weight = 2.0',
'diffusion_utils.DiffusionConfig.classifier_free_guidance = @diffusion_utils.ClassifierFreeGuidanceConfig()',
]
_lowercase : str = os.path.join(args.checkpoint_path , '..' , 'config.gin' )
_lowercase : int = inference.parse_training_gin_file(lowerCamelCase_ , lowerCamelCase_ )
_lowercase : Dict = inference.InferenceModel(args.checkpoint_path , lowerCamelCase_ )
_lowercase : Optional[int] = DDPMScheduler(beta_schedule='squaredcos_cap_v2' , variance_type='fixed_large' )
_lowercase : Any = SpectrogramNotesEncoder(
max_length=synth_model.sequence_length['inputs'] , vocab_size=synth_model.model.module.config.vocab_size , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj='gated-gelu' , )
_lowercase : int = SpectrogramContEncoder(
input_dims=synth_model.audio_codec.n_dims , targets_context_length=synth_model.sequence_length['targets_context'] , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj='gated-gelu' , )
_lowercase : List[str] = TaFilmDecoder(
input_dims=synth_model.audio_codec.n_dims , targets_length=synth_model.sequence_length['targets_context'] , max_decoder_noise_time=synth_model.model.module.config.max_decoder_noise_time , d_model=synth_model.model.module.config.emb_dim , num_layers=synth_model.model.module.config.num_decoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , dropout_rate=synth_model.model.module.config.dropout_rate , )
_lowercase : Tuple = load_notes_encoder(ta_checkpoint['target']['token_encoder'] , lowerCamelCase_ )
_lowercase : List[Any] = load_continuous_encoder(ta_checkpoint['target']['continuous_encoder'] , lowerCamelCase_ )
_lowercase : str = load_decoder(ta_checkpoint['target']['decoder'] , lowerCamelCase_ )
_lowercase : List[str] = OnnxRuntimeModel.from_pretrained('kashif/soundstream_mel_decoder' )
_lowercase : int = SpectrogramDiffusionPipeline(
notes_encoder=lowerCamelCase_ , continuous_encoder=lowerCamelCase_ , decoder=lowerCamelCase_ , scheduler=lowerCamelCase_ , melgan=lowerCamelCase_ , )
if args.save:
pipe.save_pretrained(args.output_path )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument("--output_path", default=None, type=str, required=True, help="Path to the converted model.")
parser.add_argument(
"--save", default=True, type=bool, required=False, help="Whether to save the converted model or not."
)
parser.add_argument(
"--checkpoint_path",
default=F"{MODEL}/checkpoint_500000",
type=str,
required=False,
help="Path to the original jax model checkpoint.",
)
SCREAMING_SNAKE_CASE : Any = parser.parse_args()
main(args)
| 363 |
def UpperCamelCase_( lowerCamelCase_ = 1000 ) -> int:
return sum(2 * a * ((a - 1) // 2) for a in range(3 , n + 1 ) )
if __name__ == "__main__":
print(solution())
| 84 | 0 |
'''simple docstring'''
def a_ ( _lowerCAmelCase ) -> List[Any]: # noqa: E741
__lowerCamelCase : Optional[int] = len(_lowerCamelCase )
__lowerCamelCase : str = 0
__lowerCamelCase : Any = [0] * n
__lowerCamelCase : str = [False] * n
__lowerCamelCase : str = [False] * n
def dfs(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ):
if parent == root:
out_edge_count += 1
__lowerCamelCase : Any = True
__lowerCamelCase : int = at
for to in l[at]:
if to == parent:
pass
elif not visited[to]:
__lowerCamelCase : Union[str, Any] = dfs(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
__lowerCamelCase : Optional[int] = min(low[at] ,low[to] )
# AP found via bridge
if at < low[to]:
__lowerCamelCase : int = True
# AP found via cycle
if at == low[to]:
__lowerCamelCase : Tuple = True
else:
__lowerCamelCase : Union[str, Any] = min(low[at] ,_lowerCamelCase )
return out_edge_count
for i in range(_lowerCamelCase ):
if not visited[i]:
__lowerCamelCase : int = 0
__lowerCamelCase : Dict = dfs(_lowerCamelCase ,_lowerCamelCase ,-1 ,_lowerCamelCase )
__lowerCamelCase : List[str] = out_edge_count > 1
for x in range(len(_lowerCamelCase ) ):
if is_art[x] is True:
print(_lowerCamelCase )
# Adjacency list of graph
_UpperCamelCase = {
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
}
compute_ap(data)
| 208 | """simple docstring"""
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Any ) -> List[Any]: # noqa: E741
_lowerCAmelCase : Optional[int] = len(_lowerCamelCase )
_lowerCAmelCase : str = 0
_lowerCAmelCase : Any = [0] * n
_lowerCAmelCase : str = [False] * n
_lowerCAmelCase : str = [False] * n
def dfs(_lowerCamelCase : Tuple ,_lowerCamelCase : Union[str, Any] ,_lowerCamelCase : Union[str, Any] ,_lowerCamelCase : str ):
if parent == root:
out_edge_count += 1
_lowerCAmelCase : Any = True
_lowerCAmelCase : int = at
for to in l[at]:
if to == parent:
pass
elif not visited[to]:
_lowerCAmelCase : Union[str, Any] = dfs(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
_lowerCAmelCase : Optional[int] = min(low[at] ,low[to] )
# AP found via bridge
if at < low[to]:
_lowerCAmelCase : int = True
# AP found via cycle
if at == low[to]:
_lowerCAmelCase : Tuple = True
else:
_lowerCAmelCase : Union[str, Any] = min(low[at] ,_lowerCamelCase )
return out_edge_count
for i in range(_lowerCamelCase ):
if not visited[i]:
_lowerCAmelCase : int = 0
_lowerCAmelCase : Dict = dfs(_lowerCamelCase ,_lowerCamelCase ,-1 ,_lowerCamelCase )
_lowerCAmelCase : List[str] = out_edge_count > 1
for x in range(len(_lowerCamelCase ) ):
if is_art[x] is True:
print(_lowerCamelCase )
# Adjacency list of graph
_a : Optional[Any] = {
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
}
compute_ap(data)
| 44 | 0 |
import inspect
import math
import tempfile
import unittest
import numpy as np
from transformers import ViTMAEConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMAEForPreTraining, ViTMAEModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _a :
def __init__( self: Union[str, Any] , UpperCamelCase_: Dict , UpperCamelCase_: List[str]=13 , UpperCamelCase_: str=30 , UpperCamelCase_: Tuple=2 , UpperCamelCase_: Any=3 , UpperCamelCase_: Union[str, Any]=True , UpperCamelCase_: Optional[int]=True , UpperCamelCase_: Union[str, Any]=32 , UpperCamelCase_: int=5 , UpperCamelCase_: Tuple=4 , UpperCamelCase_: List[Any]=37 , UpperCamelCase_: Union[str, Any]="gelu" , UpperCamelCase_: List[Any]=0.1 , UpperCamelCase_: int=0.1 , UpperCamelCase_: List[Any]=10 , UpperCamelCase_: Any=0.02 , UpperCamelCase_: str=3 , UpperCamelCase_: List[Any]=0.6 , UpperCamelCase_: Union[str, Any]=None , ) -> Any:
"""simple docstring"""
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = image_size
lowercase__ = patch_size
lowercase__ = num_channels
lowercase__ = is_training
lowercase__ = use_labels
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = intermediate_size
lowercase__ = hidden_act
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = type_sequence_label_size
lowercase__ = initializer_range
lowercase__ = mask_ratio
lowercase__ = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
lowercase__ = (image_size // patch_size) ** 2
lowercase__ = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def lowerCamelCase_ ( self: str ) -> str:
"""simple docstring"""
lowercase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase__ = None
if self.use_labels:
lowercase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase__ = self.get_config()
return config, pixel_values, labels
def lowerCamelCase_ ( self: Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCamelCase_ , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def lowerCamelCase_ ( self: List[Any] , UpperCamelCase_: str , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: Tuple ) -> int:
"""simple docstring"""
lowercase__ = ViTMAEModel(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
lowercase__ = model(UpperCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase_ ( self: Dict , UpperCamelCase_: Dict , UpperCamelCase_: Optional[int] , UpperCamelCase_: Optional[int] ) -> str:
"""simple docstring"""
lowercase__ = ViTMAEForPreTraining(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
lowercase__ = model(UpperCamelCase_ )
lowercase__ = (self.image_size // self.patch_size) ** 2
lowercase__ = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
lowercase__ = 1
lowercase__ = ViTMAEForPreTraining(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
lowercase__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase__ = model(UpperCamelCase_ )
lowercase__ = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def lowerCamelCase_ ( self: List[str] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = self.prepare_config_and_inputs()
lowercase__ = config_and_inputs
lowercase__ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class _a ( __UpperCamelCase , __UpperCamelCase , unittest.TestCase ):
_lowercase : Dict = (ViTMAEModel, ViTMAEForPreTraining) if is_torch_available() else ()
_lowercase : Union[str, Any] = {'''feature-extraction''': ViTMAEModel} if is_torch_available() else {}
_lowercase : Union[str, Any] = False
_lowercase : Optional[int] = False
_lowercase : int = False
_lowercase : Any = False
def lowerCamelCase_ ( self: List[str] ) -> int:
"""simple docstring"""
lowercase__ = ViTMAEModelTester(self )
lowercase__ = ConfigTester(self , config_class=UpperCamelCase_ , has_text_modality=UpperCamelCase_ , hidden_size=37 )
def lowerCamelCase_ ( self: Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='''ViTMAE does not use inputs_embeds''' )
def lowerCamelCase_ ( self: Tuple ) -> List[str]:
"""simple docstring"""
pass
def lowerCamelCase_ ( self: Optional[int] ) -> Tuple:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ = model_class(UpperCamelCase_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowercase__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCamelCase_ , nn.Linear ) )
def lowerCamelCase_ ( self: Optional[Any] ) -> str:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ = model_class(UpperCamelCase_ )
lowercase__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ = [*signature.parameters.keys()]
lowercase__ = ['pixel_values']
self.assertListEqual(arg_names[:1] , UpperCamelCase_ )
def lowerCamelCase_ ( self: Optional[int] ) -> Any:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase_ )
def lowerCamelCase_ ( self: Optional[int] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*UpperCamelCase_ )
def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: int , UpperCamelCase_: int ) -> Any:
"""simple docstring"""
np.random.seed(2 )
lowercase__ = int((pt_model.config.image_size // pt_model.config.patch_size) ** 2 )
lowercase__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
lowercase__ = torch.from_numpy(UpperCamelCase_ )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
lowercase__ = pt_noise
super().check_pt_tf_models(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
def lowerCamelCase_ ( self: Dict ) -> str:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ = model_class(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
lowercase__ = model(**self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ ) )
lowercase__ = outputs[0].cpu().numpy()
lowercase__ = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(UpperCamelCase_ )
lowercase__ = model_class.from_pretrained(UpperCamelCase_ )
model.to(UpperCamelCase_ )
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
lowercase__ = model(**self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ ) )
# Make sure we don't have nans
lowercase__ = after_outputs[0].cpu().numpy()
lowercase__ = 0
lowercase__ = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(UpperCamelCase_ , 1E-5 )
@unittest.skip(
reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.''' )
def lowerCamelCase_ ( self: List[Any] ) -> Tuple:
"""simple docstring"""
pass
@unittest.skip(
reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.''' )
def lowerCamelCase_ ( self: Optional[int] ) -> int:
"""simple docstring"""
pass
@unittest.skip(
reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.''' )
def lowerCamelCase_ ( self: Tuple ) -> str:
"""simple docstring"""
pass
@unittest.skip(reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load''' )
def lowerCamelCase_ ( self: Optional[Any] ) -> List[Any]:
"""simple docstring"""
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def lowerCamelCase_ ( self: str ) -> Optional[int]:
"""simple docstring"""
pass
@slow
def lowerCamelCase_ ( self: Optional[Any] ) -> int:
"""simple docstring"""
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ = ViTMAEModel.from_pretrained(UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
def _a ( ):
"""simple docstring"""
lowercase__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class _a ( unittest.TestCase ):
@cached_property
def lowerCamelCase_ ( self: Tuple ) -> str:
"""simple docstring"""
return ViTImageProcessor.from_pretrained('''facebook/vit-mae-base''' ) if is_vision_available() else None
@slow
def lowerCamelCase_ ( self: Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
np.random.seed(2 )
lowercase__ = ViTMAEForPreTraining.from_pretrained('''facebook/vit-mae-base''' ).to(UpperCamelCase_ )
lowercase__ = self.default_image_processor
lowercase__ = prepare_img()
lowercase__ = image_processor(images=UpperCamelCase_ , return_tensors='''pt''' ).to(UpperCamelCase_ )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
lowercase__ = ViTMAEConfig()
lowercase__ = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
lowercase__ = np.random.uniform(size=(1, num_patches) )
# forward pass
with torch.no_grad():
lowercase__ = model(**UpperCamelCase_ , noise=torch.from_numpy(UpperCamelCase_ ).to(device=UpperCamelCase_ ) )
# verify the logits
lowercase__ = torch.Size((1, 196, 768) )
self.assertEqual(outputs.logits.shape , UpperCamelCase_ )
lowercase__ = torch.tensor(
[[-0.0548, -1.7023, -0.9325], [0.3721, -0.5670, -0.2233], [0.8235, -1.3878, -0.3524]] )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , expected_slice.to(UpperCamelCase_ ) , atol=1E-4 ) )
| 351 |
# DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax
import jax.numpy as jnp
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils_flax import (
CommonSchedulerState,
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
add_noise_common,
get_velocity_common,
)
@flax.struct.dataclass
class _a :
_lowercase : CommonSchedulerState
# setable values
_lowercase : jnp.ndarray
_lowercase : jnp.ndarray
_lowercase : Optional[int] = None
@classmethod
def lowerCamelCase_ ( cls: Dict , UpperCamelCase_: CommonSchedulerState , UpperCamelCase_: jnp.ndarray , UpperCamelCase_: jnp.ndarray ) -> List[str]:
"""simple docstring"""
return cls(common=UpperCamelCase_ , init_noise_sigma=UpperCamelCase_ , timesteps=UpperCamelCase_ )
@dataclass
class _a ( UpperCamelCase__ ):
_lowercase : DDPMSchedulerState
class _a ( UpperCamelCase__ , UpperCamelCase__ ):
_lowercase : Tuple = [e.name for e in FlaxKarrasDiffusionSchedulers]
_lowercase : jnp.dtype
@property
def lowerCamelCase_ ( self: Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
return True
@register_to_config
def __init__( self: Any , UpperCamelCase_: int = 1_000 , UpperCamelCase_: float = 0.0001 , UpperCamelCase_: float = 0.02 , UpperCamelCase_: str = "linear" , UpperCamelCase_: Optional[jnp.ndarray] = None , UpperCamelCase_: str = "fixed_small" , UpperCamelCase_: bool = True , UpperCamelCase_: str = "epsilon" , UpperCamelCase_: jnp.dtype = jnp.floataa , ) -> int:
"""simple docstring"""
lowercase__ = dtype
def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase_: Optional[CommonSchedulerState] = None ) -> DDPMSchedulerState:
"""simple docstring"""
if common is None:
lowercase__ = CommonSchedulerState.create(self )
# standard deviation of the initial noise distribution
lowercase__ = jnp.array(1.0 , dtype=self.dtype )
lowercase__ = jnp.arange(0 , self.config.num_train_timesteps ).round()[::-1]
return DDPMSchedulerState.create(
common=UpperCamelCase_ , init_noise_sigma=UpperCamelCase_ , timesteps=UpperCamelCase_ , )
def lowerCamelCase_ ( self: Dict , UpperCamelCase_: DDPMSchedulerState , UpperCamelCase_: jnp.ndarray , UpperCamelCase_: Optional[int] = None ) -> jnp.ndarray:
"""simple docstring"""
return sample
def lowerCamelCase_ ( self: Dict , UpperCamelCase_: DDPMSchedulerState , UpperCamelCase_: int , UpperCamelCase_: Tuple = () ) -> DDPMSchedulerState:
"""simple docstring"""
lowercase__ = self.config.num_train_timesteps // num_inference_steps
# creates integer timesteps by multiplying by ratio
# rounding to avoid issues when num_inference_step is power of 3
lowercase__ = (jnp.arange(0 , UpperCamelCase_ ) * step_ratio).round()[::-1]
return state.replace(
num_inference_steps=UpperCamelCase_ , timesteps=UpperCamelCase_ , )
def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase_: DDPMSchedulerState , UpperCamelCase_: Optional[Any] , UpperCamelCase_: Optional[int]=None , UpperCamelCase_: str=None ) -> List[Any]:
"""simple docstring"""
lowercase__ = state.common.alphas_cumprod[t]
lowercase__ = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
lowercase__ = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t]
if variance_type is None:
lowercase__ = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small":
lowercase__ = jnp.clip(UpperCamelCase_ , a_min=1E-2_0 )
# for rl-diffuser https://arxiv.org/abs/2205.09991
elif variance_type == "fixed_small_log":
lowercase__ = jnp.log(jnp.clip(UpperCamelCase_ , a_min=1E-2_0 ) )
elif variance_type == "fixed_large":
lowercase__ = state.common.betas[t]
elif variance_type == "fixed_large_log":
# Glide max_log
lowercase__ = jnp.log(state.common.betas[t] )
elif variance_type == "learned":
return predicted_variance
elif variance_type == "learned_range":
lowercase__ = variance
lowercase__ = state.common.betas[t]
lowercase__ = (predicted_variance + 1) / 2
lowercase__ = frac * max_log + (1 - frac) * min_log
return variance
def lowerCamelCase_ ( self: Any , UpperCamelCase_: DDPMSchedulerState , UpperCamelCase_: jnp.ndarray , UpperCamelCase_: int , UpperCamelCase_: jnp.ndarray , UpperCamelCase_: Optional[jax.random.KeyArray] = None , UpperCamelCase_: bool = True , ) -> Union[FlaxDDPMSchedulerOutput, Tuple]:
"""simple docstring"""
lowercase__ = timestep
if key is None:
lowercase__ = jax.random.PRNGKey(0 )
if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]:
lowercase__ , lowercase__ = jnp.split(UpperCamelCase_ , sample.shape[1] , axis=1 )
else:
lowercase__ = None
# 1. compute alphas, betas
lowercase__ = state.common.alphas_cumprod[t]
lowercase__ = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
lowercase__ = 1 - alpha_prod_t
lowercase__ = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
lowercase__ = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
lowercase__ = model_output
elif self.config.prediction_type == "v_prediction":
lowercase__ = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
else:
raise ValueError(
f'prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` '
''' for the FlaxDDPMScheduler.''' )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
lowercase__ = jnp.clip(UpperCamelCase_ , -1 , 1 )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
lowercase__ = (alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t
lowercase__ = state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
lowercase__ = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
def random_variance():
lowercase__ = jax.random.split(UpperCamelCase_ , num=1 )
lowercase__ = jax.random.normal(UpperCamelCase_ , shape=model_output.shape , dtype=self.dtype )
return (self._get_variance(UpperCamelCase_ , UpperCamelCase_ , predicted_variance=UpperCamelCase_ ) ** 0.5) * noise
lowercase__ = jnp.where(t > 0 , random_variance() , jnp.zeros(model_output.shape , dtype=self.dtype ) )
lowercase__ = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample, state)
return FlaxDDPMSchedulerOutput(prev_sample=UpperCamelCase_ , state=UpperCamelCase_ )
def lowerCamelCase_ ( self: int , UpperCamelCase_: DDPMSchedulerState , UpperCamelCase_: jnp.ndarray , UpperCamelCase_: jnp.ndarray , UpperCamelCase_: jnp.ndarray , ) -> jnp.ndarray:
"""simple docstring"""
return add_noise_common(state.common , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
def lowerCamelCase_ ( self: List[str] , UpperCamelCase_: DDPMSchedulerState , UpperCamelCase_: jnp.ndarray , UpperCamelCase_: jnp.ndarray , UpperCamelCase_: jnp.ndarray , ) -> jnp.ndarray:
"""simple docstring"""
return get_velocity_common(state.common , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
def __len__( self: str ) -> List[Any]:
"""simple docstring"""
return self.config.num_train_timesteps
| 93 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
_snake_case = {
'configuration_groupvit': [
'GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'GroupViTConfig',
'GroupViTOnnxConfig',
'GroupViTTextConfig',
'GroupViTVisionConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
'GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'GroupViTModel',
'GroupViTPreTrainedModel',
'GroupViTTextModel',
'GroupViTVisionModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
'TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFGroupViTModel',
'TFGroupViTPreTrainedModel',
'TFGroupViTTextModel',
'TFGroupViTVisionModel',
]
if TYPE_CHECKING:
from .configuration_groupvit import (
GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GroupViTConfig,
GroupViTOnnxConfig,
GroupViTTextConfig,
GroupViTVisionConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_groupvit import (
GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GroupViTModel,
GroupViTPreTrainedModel,
GroupViTTextModel,
GroupViTVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_groupvit import (
TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFGroupViTModel,
TFGroupViTPreTrainedModel,
TFGroupViTTextModel,
TFGroupViTVisionModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 283 |
import unittest
from transformers import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING, is_vision_available, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class __SCREAMING_SNAKE_CASE :
@staticmethod
def __lowerCamelCase ( *A : Dict , **A : Optional[int] ) ->Dict:
pass
@is_pipeline_test
@require_vision
@require_torch
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
_UpperCAmelCase : Optional[int] = MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
def __lowerCamelCase ( self : Any , A : List[str] , A : Tuple , A : List[str] ) ->List[Any]:
lowerCamelCase__ : List[str] = pipeline(
'''zero-shot-object-detection''' , model='''hf-internal-testing/tiny-random-owlvit-object-detection''' )
lowerCamelCase__ : Union[str, Any] = [
{
'''image''': '''./tests/fixtures/tests_samples/COCO/000000039769.png''',
'''candidate_labels''': ['''cat''', '''remote''', '''couch'''],
}
]
return object_detector, examples
def __lowerCamelCase ( self : List[Any] , A : Optional[int] , A : Tuple ) ->Optional[Any]:
lowerCamelCase__ : str = object_detector(examples[0] , threshold=0.0 )
lowerCamelCase__ : Union[str, Any] = len(A )
self.assertGreater(A , 0 )
self.assertEqual(
A , [
{
'''score''': ANY(A ),
'''label''': ANY(A ),
'''box''': {'''xmin''': ANY(A ), '''ymin''': ANY(A ), '''xmax''': ANY(A ), '''ymax''': ANY(A )},
}
for i in range(A )
] , )
@require_tf
@unittest.skip('''Zero Shot Object Detection not implemented in TF''' )
def __lowerCamelCase ( self : Dict ) ->List[Any]:
pass
@require_torch
def __lowerCamelCase ( self : Optional[Any] ) ->List[Any]:
lowerCamelCase__ : Optional[int] = pipeline(
'''zero-shot-object-detection''' , model='''hf-internal-testing/tiny-random-owlvit-object-detection''' )
lowerCamelCase__ : List[Any] = object_detector(
'''./tests/fixtures/tests_samples/COCO/000000039769.png''' , candidate_labels=['''cat''', '''remote''', '''couch'''] , threshold=0.64 , )
self.assertEqual(
nested_simplify(A , decimals=4 ) , [
{'''score''': 0.72_35, '''label''': '''cat''', '''box''': {'''xmin''': 2_0_4, '''ymin''': 1_6_7, '''xmax''': 2_3_2, '''ymax''': 1_9_0}},
{'''score''': 0.72_18, '''label''': '''remote''', '''box''': {'''xmin''': 2_0_4, '''ymin''': 1_6_7, '''xmax''': 2_3_2, '''ymax''': 1_9_0}},
{'''score''': 0.71_84, '''label''': '''couch''', '''box''': {'''xmin''': 2_0_4, '''ymin''': 1_6_7, '''xmax''': 2_3_2, '''ymax''': 1_9_0}},
{'''score''': 0.67_48, '''label''': '''remote''', '''box''': {'''xmin''': 5_7_1, '''ymin''': 8_3, '''xmax''': 5_9_8, '''ymax''': 1_0_3}},
{'''score''': 0.66_56, '''label''': '''cat''', '''box''': {'''xmin''': 5_7_1, '''ymin''': 8_3, '''xmax''': 5_9_8, '''ymax''': 1_0_3}},
{'''score''': 0.66_14, '''label''': '''couch''', '''box''': {'''xmin''': 5_7_1, '''ymin''': 8_3, '''xmax''': 5_9_8, '''ymax''': 1_0_3}},
{'''score''': 0.64_56, '''label''': '''remote''', '''box''': {'''xmin''': 4_9_4, '''ymin''': 1_0_5, '''xmax''': 5_2_1, '''ymax''': 1_2_7}},
{'''score''': 0.6_42, '''label''': '''remote''', '''box''': {'''xmin''': 6_7, '''ymin''': 2_7_4, '''xmax''': 9_3, '''ymax''': 2_9_7}},
{'''score''': 0.64_19, '''label''': '''cat''', '''box''': {'''xmin''': 4_9_4, '''ymin''': 1_0_5, '''xmax''': 5_2_1, '''ymax''': 1_2_7}},
] , )
lowerCamelCase__ : str = object_detector(
[
{
'''image''': '''./tests/fixtures/tests_samples/COCO/000000039769.png''',
'''candidate_labels''': ['''cat''', '''remote''', '''couch'''],
}
] , threshold=0.64 , )
self.assertEqual(
nested_simplify(A , decimals=4 ) , [
[
{'''score''': 0.72_35, '''label''': '''cat''', '''box''': {'''xmin''': 2_0_4, '''ymin''': 1_6_7, '''xmax''': 2_3_2, '''ymax''': 1_9_0}},
{'''score''': 0.72_18, '''label''': '''remote''', '''box''': {'''xmin''': 2_0_4, '''ymin''': 1_6_7, '''xmax''': 2_3_2, '''ymax''': 1_9_0}},
{'''score''': 0.71_84, '''label''': '''couch''', '''box''': {'''xmin''': 2_0_4, '''ymin''': 1_6_7, '''xmax''': 2_3_2, '''ymax''': 1_9_0}},
{'''score''': 0.67_48, '''label''': '''remote''', '''box''': {'''xmin''': 5_7_1, '''ymin''': 8_3, '''xmax''': 5_9_8, '''ymax''': 1_0_3}},
{'''score''': 0.66_56, '''label''': '''cat''', '''box''': {'''xmin''': 5_7_1, '''ymin''': 8_3, '''xmax''': 5_9_8, '''ymax''': 1_0_3}},
{'''score''': 0.66_14, '''label''': '''couch''', '''box''': {'''xmin''': 5_7_1, '''ymin''': 8_3, '''xmax''': 5_9_8, '''ymax''': 1_0_3}},
{'''score''': 0.64_56, '''label''': '''remote''', '''box''': {'''xmin''': 4_9_4, '''ymin''': 1_0_5, '''xmax''': 5_2_1, '''ymax''': 1_2_7}},
{'''score''': 0.6_42, '''label''': '''remote''', '''box''': {'''xmin''': 6_7, '''ymin''': 2_7_4, '''xmax''': 9_3, '''ymax''': 2_9_7}},
{'''score''': 0.64_19, '''label''': '''cat''', '''box''': {'''xmin''': 4_9_4, '''ymin''': 1_0_5, '''xmax''': 5_2_1, '''ymax''': 1_2_7}},
]
] , )
@require_torch
@slow
def __lowerCamelCase ( self : Union[str, Any] ) ->Optional[Any]:
lowerCamelCase__ : Tuple = pipeline('''zero-shot-object-detection''' )
lowerCamelCase__ : str = object_detector(
'''http://images.cocodataset.org/val2017/000000039769.jpg''' , candidate_labels=['''cat''', '''remote''', '''couch'''] , )
self.assertEqual(
nested_simplify(A , decimals=4 ) , [
{'''score''': 0.28_68, '''label''': '''cat''', '''box''': {'''xmin''': 3_2_4, '''ymin''': 2_0, '''xmax''': 6_4_0, '''ymax''': 3_7_3}},
{'''score''': 0.2_77, '''label''': '''remote''', '''box''': {'''xmin''': 4_0, '''ymin''': 7_2, '''xmax''': 1_7_7, '''ymax''': 1_1_5}},
{'''score''': 0.25_37, '''label''': '''cat''', '''box''': {'''xmin''': 1, '''ymin''': 5_5, '''xmax''': 3_1_5, '''ymax''': 4_7_2}},
{'''score''': 0.14_74, '''label''': '''remote''', '''box''': {'''xmin''': 3_3_5, '''ymin''': 7_4, '''xmax''': 3_7_1, '''ymax''': 1_8_7}},
{'''score''': 0.12_08, '''label''': '''couch''', '''box''': {'''xmin''': 4, '''ymin''': 0, '''xmax''': 6_4_2, '''ymax''': 4_7_6}},
] , )
lowerCamelCase__ : List[Any] = object_detector(
[
{
'''image''': '''http://images.cocodataset.org/val2017/000000039769.jpg''',
'''candidate_labels''': ['''cat''', '''remote''', '''couch'''],
},
{
'''image''': '''http://images.cocodataset.org/val2017/000000039769.jpg''',
'''candidate_labels''': ['''cat''', '''remote''', '''couch'''],
},
] , )
self.assertEqual(
nested_simplify(A , decimals=4 ) , [
[
{'''score''': 0.28_68, '''label''': '''cat''', '''box''': {'''xmin''': 3_2_4, '''ymin''': 2_0, '''xmax''': 6_4_0, '''ymax''': 3_7_3}},
{'''score''': 0.2_77, '''label''': '''remote''', '''box''': {'''xmin''': 4_0, '''ymin''': 7_2, '''xmax''': 1_7_7, '''ymax''': 1_1_5}},
{'''score''': 0.25_37, '''label''': '''cat''', '''box''': {'''xmin''': 1, '''ymin''': 5_5, '''xmax''': 3_1_5, '''ymax''': 4_7_2}},
{'''score''': 0.14_74, '''label''': '''remote''', '''box''': {'''xmin''': 3_3_5, '''ymin''': 7_4, '''xmax''': 3_7_1, '''ymax''': 1_8_7}},
{'''score''': 0.12_08, '''label''': '''couch''', '''box''': {'''xmin''': 4, '''ymin''': 0, '''xmax''': 6_4_2, '''ymax''': 4_7_6}},
],
[
{'''score''': 0.28_68, '''label''': '''cat''', '''box''': {'''xmin''': 3_2_4, '''ymin''': 2_0, '''xmax''': 6_4_0, '''ymax''': 3_7_3}},
{'''score''': 0.2_77, '''label''': '''remote''', '''box''': {'''xmin''': 4_0, '''ymin''': 7_2, '''xmax''': 1_7_7, '''ymax''': 1_1_5}},
{'''score''': 0.25_37, '''label''': '''cat''', '''box''': {'''xmin''': 1, '''ymin''': 5_5, '''xmax''': 3_1_5, '''ymax''': 4_7_2}},
{'''score''': 0.14_74, '''label''': '''remote''', '''box''': {'''xmin''': 3_3_5, '''ymin''': 7_4, '''xmax''': 3_7_1, '''ymax''': 1_8_7}},
{'''score''': 0.12_08, '''label''': '''couch''', '''box''': {'''xmin''': 4, '''ymin''': 0, '''xmax''': 6_4_2, '''ymax''': 4_7_6}},
],
] , )
@require_tf
@unittest.skip('''Zero Shot Object Detection not implemented in TF''' )
def __lowerCamelCase ( self : int ) ->Union[str, Any]:
pass
@require_torch
@slow
def __lowerCamelCase ( self : Optional[int] ) ->Optional[int]:
lowerCamelCase__ : Optional[Any] = 0.2
lowerCamelCase__ : List[Any] = pipeline('''zero-shot-object-detection''' )
lowerCamelCase__ : Any = object_detector(
'''http://images.cocodataset.org/val2017/000000039769.jpg''' , candidate_labels=['''cat''', '''remote''', '''couch'''] , threshold=A , )
self.assertEqual(
nested_simplify(A , decimals=4 ) , [
{'''score''': 0.28_68, '''label''': '''cat''', '''box''': {'''xmin''': 3_2_4, '''ymin''': 2_0, '''xmax''': 6_4_0, '''ymax''': 3_7_3}},
{'''score''': 0.2_77, '''label''': '''remote''', '''box''': {'''xmin''': 4_0, '''ymin''': 7_2, '''xmax''': 1_7_7, '''ymax''': 1_1_5}},
{'''score''': 0.25_37, '''label''': '''cat''', '''box''': {'''xmin''': 1, '''ymin''': 5_5, '''xmax''': 3_1_5, '''ymax''': 4_7_2}},
] , )
@require_torch
@slow
def __lowerCamelCase ( self : Any ) ->str:
lowerCamelCase__ : List[Any] = 2
lowerCamelCase__ : Union[str, Any] = pipeline('''zero-shot-object-detection''' )
lowerCamelCase__ : List[str] = object_detector(
'''http://images.cocodataset.org/val2017/000000039769.jpg''' , candidate_labels=['''cat''', '''remote''', '''couch'''] , top_k=A , )
self.assertEqual(
nested_simplify(A , decimals=4 ) , [
{'''score''': 0.28_68, '''label''': '''cat''', '''box''': {'''xmin''': 3_2_4, '''ymin''': 2_0, '''xmax''': 6_4_0, '''ymax''': 3_7_3}},
{'''score''': 0.2_77, '''label''': '''remote''', '''box''': {'''xmin''': 4_0, '''ymin''': 7_2, '''xmax''': 1_7_7, '''ymax''': 1_1_5}},
] , )
| 142 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
_lowerCamelCase : List[Any] = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : List[str] = ["NllbTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Any = ["NllbTokenizerFast"]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb import NllbTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb_fast import NllbTokenizerFast
else:
import sys
_lowerCamelCase : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 99 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.utils import ComputeEnvironment
from .cluster import get_cluster_input
from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
from .sagemaker import get_sagemaker_input
_lowerCamelCase : Optional[Any] = "Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine"
def a__ ( ) -> List[Any]:
UpperCAmelCase : Dict = _ask_options(
'''In which compute environment are you running?''' , ['''This machine''', '''AWS (Amazon SageMaker)'''] , _convert_compute_environment , )
if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
UpperCAmelCase : List[Any] = get_sagemaker_input()
else:
UpperCAmelCase : Optional[Any] = get_cluster_input()
return config
def a__ ( UpperCAmelCase : Union[str, Any]=None ) -> List[Any]:
if subparsers is not None:
UpperCAmelCase : Optional[Any] = subparsers.add_parser('''config''' , description=UpperCAmelCase )
else:
UpperCAmelCase : List[str] = argparse.ArgumentParser('''Accelerate config command''' , description=UpperCAmelCase )
parser.add_argument(
'''--config_file''' , default=UpperCAmelCase , help=(
'''The path to use to store the config file. Will default to a file named default_config.yaml in the cache '''
'''location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '''
'''such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '''
'''with \'huggingface\'.'''
) , )
if subparsers is not None:
parser.set_defaults(func=UpperCAmelCase )
return parser
def a__ ( UpperCAmelCase : List[Any] ) -> Optional[int]:
UpperCAmelCase : str = get_user_input()
if args.config_file is not None:
UpperCAmelCase : Any = args.config_file
else:
if not os.path.isdir(UpperCAmelCase ):
os.makedirs(UpperCAmelCase )
UpperCAmelCase : List[str] = default_yaml_config_file
if config_file.endswith('''.json''' ):
config.to_json_file(UpperCAmelCase )
else:
config.to_yaml_file(UpperCAmelCase )
print(f'''accelerate configuration saved at {config_file}''' )
def a__ ( ) -> Dict:
UpperCAmelCase : str = config_command_parser()
UpperCAmelCase : str = parser.parse_args()
config_command(UpperCAmelCase )
if __name__ == "__main__":
main()
| 99 | 1 |
"""simple docstring"""
def lowerCamelCase ( _UpperCamelCase : Tuple , _UpperCamelCase : Tuple , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Tuple , _UpperCamelCase : Any , _UpperCamelCase : Tuple ) -> List[str]:
'''simple docstring'''
if index == r:
for j in range(a__ ):
print(data[j] , end=""" """ )
print(""" """ )
return
# When no more elements are there to put in data[]
if i >= n:
return
# current is included, put next at next location
__UpperCAmelCase : str = arr[i]
combination_util(a__ , a__ , a__ , index + 1 , a__ , i + 1 )
# current is excluded, replace it with
# next (Note that i+1 is passed, but
# index is not changed)
combination_util(a__ , a__ , a__ , a__ , a__ , i + 1 )
# The main function that prints all combinations
# of size r in arr[] of size n. This function
# mainly uses combinationUtil()
def lowerCamelCase ( _UpperCamelCase : int , _UpperCamelCase : Optional[int] , _UpperCamelCase : Dict ) -> Optional[int]:
'''simple docstring'''
__UpperCAmelCase : str = [0] * r
# Print all combination using temporary array 'data[]'
combination_util(a__ , a__ , a__ , 0 , a__ , 0 )
if __name__ == "__main__":
# Driver code to check the function above
UpperCAmelCase : int = [10, 20, 30, 40, 50]
print_combination(arr, len(arr), 3)
# This code is contributed by Ambuj sahu
| 115 |
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
_A = re.compile('''[^A-Za-z_0-9]''')
# parameters used in DuplicationIndex
_A = 10
_A = 256
def lowerCamelCase__ ( a__ : List[str] ) -> Optional[MinHash]:
if len(a__ ) < MIN_NUM_TOKENS:
return None
UpperCamelCase_ = MinHash(num_perm=a__ )
for token in set(a__ ):
min_hash.update(token.encode() )
return min_hash
def lowerCamelCase__ ( a__ : str ) -> Set[str]:
return {t for t in NON_ALPHA.split(a__ ) if len(t.strip() ) > 0}
class lowercase_ :
def __init__( self , *,
__UpperCamelCase = 0.85 , ):
"""simple docstring"""
UpperCamelCase_ = duplication_jaccard_threshold
UpperCamelCase_ = NUM_PERM
UpperCamelCase_ = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm )
UpperCamelCase_ = defaultdict(__UpperCamelCase )
def lowerCamelCase_ ( self , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
UpperCamelCase_ = self._index.query(__UpperCamelCase )
if code_key in self._index.keys:
print(f'''Duplicate key {code_key}''' )
return
self._index.insert(__UpperCamelCase , __UpperCamelCase )
if len(__UpperCamelCase ) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(__UpperCamelCase )
break
else:
self._duplicate_clusters[close_duplicates[0]].add(__UpperCamelCase )
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = []
for base, duplicates in self._duplicate_clusters.items():
UpperCamelCase_ = [base] + list(__UpperCamelCase )
# reformat the cluster to be a list of dict
UpperCamelCase_ = [{"""base_index""": el[0], """repo_name""": el[1], """path""": el[2]} for el in cluster]
duplicate_clusters.append(__UpperCamelCase )
return duplicate_clusters
def lowerCamelCase_ ( self , __UpperCamelCase ):
"""simple docstring"""
UpperCamelCase_ = self.get_duplicate_clusters()
with open(__UpperCamelCase , """w""" ) as f:
json.dump(__UpperCamelCase , __UpperCamelCase )
def lowerCamelCase__ ( a__ : Optional[int] ) -> List[str]:
UpperCamelCase_ , UpperCamelCase_ = element
UpperCamelCase_ = get_min_hash([t for t in NON_ALPHA.split(data["""content"""] ) if len(t.strip() ) > 0] )
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def lowerCamelCase__ ( a__ : Type[Dataset] ) -> Optional[Any]:
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash , ThreadedIterator(a__ , max_queue_size=1_0000 ) , chunksize=100 , ):
if data is not None:
yield data
def lowerCamelCase__ ( a__ : Type[Dataset] , a__ : float ) -> List[Any]:
UpperCamelCase_ = DuplicationIndex(duplication_jaccard_threshold=a__ )
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(a__ ) ) , max_queue_size=100 ) ):
di.add(a__ , a__ )
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def lowerCamelCase__ ( a__ : str , a__ : str ) -> float:
UpperCamelCase_ = get_tokens(a__ )
UpperCamelCase_ = get_tokens(a__ )
return len(tokensa & tokensa ) / len(tokensa | tokensa )
_A = None
def lowerCamelCase__ ( a__ : str , a__ : str ) -> Optional[Any]:
UpperCamelCase_ = []
for elementa in cluster:
UpperCamelCase_ = _shared_dataset[elementa["""base_index"""]]["""content"""]
for elementa in extremes:
UpperCamelCase_ = _shared_dataset[elementa["""base_index"""]]["""content"""]
if jaccard_similarity(a__ , a__ ) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
UpperCamelCase_ = 1
extremes.append(a__ )
return extremes
def lowerCamelCase__ ( a__ : str , a__ : Optional[int] , a__ : Optional[int] ) -> str:
global _shared_dataset
UpperCamelCase_ = dataset
UpperCamelCase_ = []
UpperCamelCase_ = partial(_find_cluster_extremes_shared , jaccard_threshold=a__ )
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
a__ , a__ , ) , total=len(a__ ) , ):
extremes_list.append(a__ )
return extremes_list
def lowerCamelCase__ ( a__ : Type[Dataset] , a__ : float = 0.85 ) -> Tuple[Type[Dataset], List[List[Dict]]]:
UpperCamelCase_ = make_duplicate_clusters(a__ , a__ )
UpperCamelCase_ = {x["""base_index"""] for cluster in duplicate_clusters for x in cluster}
UpperCamelCase_ = {}
UpperCamelCase_ = find_extremes(a__ , a__ , a__ )
for extremes in extremes_clusters:
for element in extremes:
UpperCamelCase_ = element
UpperCamelCase_ = duplicate_indices - set(extreme_dict.keys() )
UpperCamelCase_ = dataset.filter(lambda a__ , a__ : idx not in remove_indices , with_indices=a__ )
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
UpperCamelCase_ = element["""base_index"""] in extreme_dict
if element["is_extreme"]:
UpperCamelCase_ = extreme_dict[element["""base_index"""]]["""copies"""]
print(f'''Original dataset size: {len(a__ )}''' )
print(f'''Number of duplicate clusters: {len(a__ )}''' )
print(f'''Files in duplicate cluster: {len(a__ )}''' )
print(f'''Unique files in duplicate cluster: {len(a__ )}''' )
print(f'''Filtered dataset size: {len(a__ )}''' )
return ds_filter, duplicate_clusters
| 122 | 0 |
'''simple docstring'''
def UpperCamelCase( UpperCAmelCase_ ):
UpperCAmelCase : Optional[int] = min(UpperCAmelCase_ ) # min() finds the minimum value
UpperCAmelCase : int = max(UpperCAmelCase_ ) # max() finds the maximum value
UpperCAmelCase : int = max_val - min_val + 1 # size is difference of max and min values plus one
# list of pigeonholes of size equal to the variable size
UpperCAmelCase : Optional[Any] = [0] * size
# Populate the pigeonholes.
for x in a:
assert isinstance(UpperCAmelCase_ , UpperCAmelCase_ ), "integers only please"
holes[x - min_val] += 1
# Putting the elements back into the array in an order.
UpperCAmelCase : List[str] = 0
for count in range(UpperCAmelCase_ ):
while holes[count] > 0:
holes[count] -= 1
UpperCAmelCase : Union[str, Any] = count + min_val
i += 1
def UpperCamelCase( ):
UpperCAmelCase : Any = [8, 3, 2, 7, 4, 6, 8]
pigeonhole_sort(UpperCAmelCase_ )
print('Sorted order is:' , ' '.join(UpperCAmelCase_ ) )
if __name__ == "__main__":
main()
| 352 |
'''simple docstring'''
import copy
import unittest
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
MODEL_FOR_QUESTION_ANSWERING_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
)
from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class A_ :
'''simple docstring'''
def __init__( self : Union[str, Any] , lowercase_ : str , lowercase_ : Union[str, Any]=2 , lowercase_ : Optional[Any]=3 , lowercase_ : List[Any]=4 , lowercase_ : Union[str, Any]=2 , lowercase_ : List[Any]=7 , lowercase_ : Any=True , lowercase_ : Tuple=True , lowercase_ : List[str]=True , lowercase_ : Union[str, Any]=True , lowercase_ : str=99 , lowercase_ : str=36 , lowercase_ : int=3 , lowercase_ : int=4 , lowercase_ : Any=37 , lowercase_ : str="gelu" , lowercase_ : List[str]=0.1 , lowercase_ : Optional[int]=0.1 , lowercase_ : int=512 , lowercase_ : int=16 , lowercase_ : Dict=2 , lowercase_ : Dict=0.02 , lowercase_ : Optional[int]=6 , lowercase_ : Tuple=6 , lowercase_ : Any=3 , lowercase_ : Dict=4 , lowercase_ : Any=None , lowercase_ : Tuple=1_000 , ) -> Tuple:
UpperCAmelCase : List[Any] = parent
UpperCAmelCase : List[Any] = batch_size
UpperCAmelCase : List[Any] = num_channels
UpperCAmelCase : Optional[int] = image_size
UpperCAmelCase : int = patch_size
UpperCAmelCase : Tuple = text_seq_length
UpperCAmelCase : int = is_training
UpperCAmelCase : Any = use_input_mask
UpperCAmelCase : Optional[int] = use_token_type_ids
UpperCAmelCase : int = use_labels
UpperCAmelCase : Dict = vocab_size
UpperCAmelCase : List[str] = hidden_size
UpperCAmelCase : Any = num_hidden_layers
UpperCAmelCase : str = num_attention_heads
UpperCAmelCase : Tuple = intermediate_size
UpperCAmelCase : Optional[int] = hidden_act
UpperCAmelCase : str = hidden_dropout_prob
UpperCAmelCase : Any = attention_probs_dropout_prob
UpperCAmelCase : Tuple = max_position_embeddings
UpperCAmelCase : List[str] = type_vocab_size
UpperCAmelCase : List[str] = type_sequence_label_size
UpperCAmelCase : int = initializer_range
UpperCAmelCase : Optional[int] = coordinate_size
UpperCAmelCase : Optional[int] = shape_size
UpperCAmelCase : str = num_labels
UpperCAmelCase : str = num_choices
UpperCAmelCase : int = scope
UpperCAmelCase : Tuple = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
UpperCAmelCase : Any = text_seq_length
UpperCAmelCase : int = (image_size // patch_size) ** 2 + 1
UpperCAmelCase : Optional[int] = self.text_seq_length + self.image_seq_length
def UpperCAmelCase_ ( self : int ) -> Union[str, Any]:
UpperCAmelCase : Optional[int] = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
UpperCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
UpperCAmelCase : int = bbox[i, j, 3]
UpperCAmelCase : List[Any] = bbox[i, j, 1]
UpperCAmelCase : Union[str, Any] = t
if bbox[i, j, 2] < bbox[i, j, 0]:
UpperCAmelCase : Tuple = bbox[i, j, 2]
UpperCAmelCase : List[str] = bbox[i, j, 0]
UpperCAmelCase : List[str] = t
UpperCAmelCase : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase : int = None
if self.use_input_mask:
UpperCAmelCase : Union[str, Any] = random_attention_mask([self.batch_size, self.text_seq_length] )
UpperCAmelCase : int = None
if self.use_token_type_ids:
UpperCAmelCase : List[Any] = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
UpperCAmelCase : Dict = None
UpperCAmelCase : Dict = None
if self.use_labels:
UpperCAmelCase : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase : int = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
UpperCAmelCase : str = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def UpperCAmelCase_ ( self : str , lowercase_ : str , lowercase_ : Tuple , lowercase_ : Any , lowercase_ : Optional[int] , lowercase_ : Union[str, Any] , lowercase_ : str , lowercase_ : str , lowercase_ : Optional[Any] ) -> Any:
UpperCAmelCase : Dict = LayoutLMvaModel(config=lowercase_ )
model.to(lowercase_ )
model.eval()
# text + image
UpperCAmelCase : Optional[Any] = model(lowercase_ , pixel_values=lowercase_ )
UpperCAmelCase : str = model(
lowercase_ , bbox=lowercase_ , pixel_values=lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ )
UpperCAmelCase : Any = model(lowercase_ , bbox=lowercase_ , pixel_values=lowercase_ , token_type_ids=lowercase_ )
UpperCAmelCase : str = model(lowercase_ , bbox=lowercase_ , pixel_values=lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
UpperCAmelCase : List[Any] = model(lowercase_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
UpperCAmelCase : Dict = model(pixel_values=lowercase_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def UpperCAmelCase_ ( self : List[Any] , lowercase_ : int , lowercase_ : Tuple , lowercase_ : List[Any] , lowercase_ : Tuple , lowercase_ : Tuple , lowercase_ : str , lowercase_ : Any , lowercase_ : Union[str, Any] ) -> Union[str, Any]:
UpperCAmelCase : List[Any] = self.num_labels
UpperCAmelCase : Union[str, Any] = LayoutLMvaForSequenceClassification(lowercase_ )
model.to(lowercase_ )
model.eval()
UpperCAmelCase : int = model(
lowercase_ , bbox=lowercase_ , pixel_values=lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , labels=lowercase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase_ ( self : Any , lowercase_ : int , lowercase_ : str , lowercase_ : Any , lowercase_ : int , lowercase_ : Any , lowercase_ : Union[str, Any] , lowercase_ : Union[str, Any] , lowercase_ : int ) -> Any:
UpperCAmelCase : Optional[int] = self.num_labels
UpperCAmelCase : int = LayoutLMvaForTokenClassification(config=lowercase_ )
model.to(lowercase_ )
model.eval()
UpperCAmelCase : Optional[Any] = model(
lowercase_ , bbox=lowercase_ , pixel_values=lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , labels=lowercase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def UpperCAmelCase_ ( self : Optional[Any] , lowercase_ : Optional[Any] , lowercase_ : Tuple , lowercase_ : Tuple , lowercase_ : Union[str, Any] , lowercase_ : List[str] , lowercase_ : Tuple , lowercase_ : Any , lowercase_ : Optional[int] ) -> Optional[int]:
UpperCAmelCase : Union[str, Any] = LayoutLMvaForQuestionAnswering(config=lowercase_ )
model.to(lowercase_ )
model.eval()
UpperCAmelCase : List[str] = model(
lowercase_ , bbox=lowercase_ , pixel_values=lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , start_positions=lowercase_ , end_positions=lowercase_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase_ ( self : str ) -> List[Any]:
UpperCAmelCase : List[str] = self.prepare_config_and_inputs()
(
(
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) ,
) : Optional[int] = config_and_inputs
UpperCAmelCase : Optional[Any] = {
'input_ids': input_ids,
'bbox': bbox,
'pixel_values': pixel_values,
'token_type_ids': token_type_ids,
'attention_mask': input_mask,
}
return config, inputs_dict
@require_torch
class A_ ( _snake_case , _snake_case , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = False
UpperCAmelCase_ : Dict = False
UpperCAmelCase_ : List[Any] = False
UpperCAmelCase_ : List[str] = (
(
LayoutLMvaModel,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaForQuestionAnswering,
)
if is_torch_available()
else ()
)
UpperCAmelCase_ : List[str] = (
{"""document-question-answering""": LayoutLMvaForQuestionAnswering, """feature-extraction""": LayoutLMvaModel}
if is_torch_available()
else {}
)
def UpperCAmelCase_ ( self : Any , lowercase_ : int , lowercase_ : List[Any] , lowercase_ : List[Any] , lowercase_ : Optional[int] , lowercase_ : Optional[Any] ) -> Union[str, Any]:
# `DocumentQuestionAnsweringPipeline` is expected to work with this model, but it combines the text and visual
# embedding along the sequence dimension (dim 1), which causes an error during post-processing as `p_mask` has
# the sequence dimension of the text embedding only.
# (see the line `embedding_output = torch.cat([embedding_output, visual_embeddings], dim=1)`)
return True
def UpperCAmelCase_ ( self : str ) -> Any:
UpperCAmelCase : Union[str, Any] = LayoutLMvaModelTester(self )
UpperCAmelCase : Optional[Any] = ConfigTester(self , config_class=lowercase_ , hidden_size=37 )
def UpperCAmelCase_ ( self : Tuple , lowercase_ : int , lowercase_ : Dict , lowercase_ : Any=False ) -> Optional[Any]:
UpperCAmelCase : str = copy.deepcopy(lowercase_ )
if model_class in get_values(lowercase_ ):
UpperCAmelCase : str = {
k: v.unsqueeze(1 ).expand(-1 , self.model_tester.num_choices , -1 ).contiguous()
if isinstance(lowercase_ , torch.Tensor ) and v.ndim > 1
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(lowercase_ ):
UpperCAmelCase : Dict = torch.ones(self.model_tester.batch_size , dtype=torch.long , device=lowercase_ )
elif model_class in get_values(lowercase_ ):
UpperCAmelCase : int = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowercase_ )
UpperCAmelCase : Optional[int] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowercase_ )
elif model_class in [
*get_values(lowercase_ ),
]:
UpperCAmelCase : List[Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowercase_ )
elif model_class in [
*get_values(lowercase_ ),
]:
UpperCAmelCase : Tuple = torch.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=torch.long , device=lowercase_ , )
return inputs_dict
def UpperCAmelCase_ ( self : Tuple ) -> Union[str, Any]:
self.config_tester.run_common_tests()
def UpperCAmelCase_ ( self : Any ) -> Any:
UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_ )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> int:
UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCAmelCase : Optional[int] = type
self.model_tester.create_and_check_model(*lowercase_ )
def UpperCAmelCase_ ( self : Optional[int] ) -> str:
UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowercase_ )
def UpperCAmelCase_ ( self : Dict ) -> Any:
UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowercase_ )
def UpperCAmelCase_ ( self : Optional[int] ) -> Any:
UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowercase_ )
@slow
def UpperCAmelCase_ ( self : Any ) -> Optional[Any]:
for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase : int = LayoutLMvaModel.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
def UpperCamelCase( ):
UpperCAmelCase : List[str] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
class A_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCAmelCase_ ( self : List[Any] ) -> Dict:
return LayoutLMvaImageProcessor(apply_ocr=lowercase_ ) if is_vision_available() else None
@slow
def UpperCAmelCase_ ( self : Tuple ) -> List[str]:
UpperCAmelCase : int = LayoutLMvaModel.from_pretrained('microsoft/layoutlmv3-base' ).to(lowercase_ )
UpperCAmelCase : Dict = self.default_image_processor
UpperCAmelCase : str = prepare_img()
UpperCAmelCase : Optional[Any] = image_processor(images=lowercase_ , return_tensors='pt' ).pixel_values.to(lowercase_ )
UpperCAmelCase : int = torch.tensor([[1, 2]] )
UpperCAmelCase : Tuple = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]] ).unsqueeze(0 )
# forward pass
UpperCAmelCase : Dict = model(
input_ids=input_ids.to(lowercase_ ) , bbox=bbox.to(lowercase_ ) , pixel_values=pixel_values.to(lowercase_ ) , )
# verify the logits
UpperCAmelCase : Optional[Any] = torch.Size((1, 199, 768) )
self.assertEqual(outputs.last_hidden_state.shape , lowercase_ )
UpperCAmelCase : List[str] = torch.tensor(
[[-0.0529, 0.3618, 0.1632], [-0.1587, -0.1667, -0.0400], [-0.1557, -0.1671, -0.0505]] ).to(lowercase_ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , lowercase_ , atol=1E-4 ) )
| 280 | 0 |
'''simple docstring'''
import collections
import os
from typing import List, Optional, Tuple
from transformers.utils import is_jieba_available, requires_backends
if is_jieba_available():
import jieba
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCamelCase : Optional[int] = logging.get_logger(__name__)
lowerCamelCase : Optional[int] = {"vocab_file": "vocab.txt"}
lowerCamelCase : Dict = {
"vocab_file": {
"openbmb/cpm-ant-10b": "https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt",
},
}
lowerCamelCase : int = {
"openbmb/cpm-ant-10b": 1_0_2_4,
}
def _lowerCAmelCase ( _UpperCamelCase : Tuple ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =collections.OrderedDict()
with open(_UpperCamelCase , 'r' , encoding='utf-8' ) as reader:
_SCREAMING_SNAKE_CASE =reader.readlines()
for index, token in enumerate(_UpperCamelCase ):
_SCREAMING_SNAKE_CASE =token.rstrip('\n' )
_SCREAMING_SNAKE_CASE =index
return vocab
class A__ ( A__ ):
def __init__( self : Dict , _a : Any , _a : Optional[int]="<unk>" , _a : Dict=200 ) -> List[str]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =vocab
_SCREAMING_SNAKE_CASE =unk_token
_SCREAMING_SNAKE_CASE =max_input_chars_per_word
def A ( self : Dict , _a : Dict ) -> int:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =list(_a )
if len(_a ) > self.max_input_chars_per_word:
return [self.unk_token]
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =[]
while start < len(_a ):
_SCREAMING_SNAKE_CASE =len(_a )
_SCREAMING_SNAKE_CASE =None
while start < end:
_SCREAMING_SNAKE_CASE =''.join(chars[start:end] )
if substr in self.vocab:
_SCREAMING_SNAKE_CASE =substr
break
end -= 1
if cur_substr is None:
sub_tokens.append(self.unk_token )
start += 1
else:
sub_tokens.append(_a )
_SCREAMING_SNAKE_CASE =end
return sub_tokens
class A__ ( A__ ):
A__ = VOCAB_FILES_NAMES
A__ = PRETRAINED_VOCAB_FILES_MAP
A__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ = ['input_ids', 'attention_mask']
A__ = False
def __init__( self : Optional[Any] , _a : List[Any] , _a : int="<d>" , _a : Optional[Any]="</d>" , _a : List[Any]="<s>" , _a : Optional[int]="</s>" , _a : Tuple="<pad>" , _a : List[Any]="<unk>" , _a : Optional[Any]="</n>" , _a : Dict="</_>" , _a : List[Any]="left" , **_a : Union[str, Any] , ) -> List[str]:
'''simple docstring'''
requires_backends(self , ['jieba'] )
super().__init__(
bod_token=_a , eod_token=_a , bos_token=_a , eos_token=_a , pad_token=_a , unk_token=_a , line_token=_a , space_token=_a , padding_side=_a , **_a , )
_SCREAMING_SNAKE_CASE =bod_token
_SCREAMING_SNAKE_CASE =eod_token
_SCREAMING_SNAKE_CASE =load_vocab(_a )
_SCREAMING_SNAKE_CASE =self.encoder[space_token]
_SCREAMING_SNAKE_CASE =self.encoder[line_token]
del self.encoder[space_token]
del self.encoder[line_token]
_SCREAMING_SNAKE_CASE =collections.OrderedDict(sorted(self.encoder.items() , key=lambda _a : x[1] ) )
_SCREAMING_SNAKE_CASE ={v: k for k, v in self.encoder.items()}
_SCREAMING_SNAKE_CASE =WordpieceTokenizer(vocab=self.encoder , unk_token=self.unk_token )
@property
def A ( self : Any ) -> Tuple:
'''simple docstring'''
return self.encoder[self.bod_token]
@property
def A ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
return self.encoder[self.eod_token]
@property
def A ( self : str ) -> Optional[Any]:
'''simple docstring'''
return self.encoder["\n"]
@property
def A ( self : Any ) -> int:
'''simple docstring'''
return len(self.encoder )
def A ( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def A ( self : int , _a : Tuple ) -> List[Any]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =[]
for x in jieba.cut(_a , cut_all=_a ):
output_tokens.extend(self.wordpiece_tokenizer.tokenize(_a ) )
return output_tokens
def A ( self : Union[str, Any] , _a : Tuple , **_a : int ) -> Dict:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =[i for i in token_ids if i >= 0]
_SCREAMING_SNAKE_CASE =[
x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id
]
return super()._decode(_a , **_a )
def A ( self : int , _a : Optional[int] ) -> Dict:
'''simple docstring'''
return token in self.encoder
def A ( self : List[Any] , _a : List[str] ) -> str:
'''simple docstring'''
return "".join(_a )
def A ( self : int , _a : List[Any] ) -> str:
'''simple docstring'''
return self.encoder.get(_a , self.encoder.get(self.unk_token ) )
def A ( self : Tuple , _a : List[Any] ) -> List[str]:
'''simple docstring'''
return self.decoder.get(_a , self.unk_token )
def A ( self : Any , _a : str , _a : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if os.path.isdir(_a ):
_SCREAMING_SNAKE_CASE =os.path.join(
_a , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
else:
_SCREAMING_SNAKE_CASE =(filename_prefix + '-' if filename_prefix else '') + save_directory
_SCREAMING_SNAKE_CASE =0
if " " in self.encoder:
_SCREAMING_SNAKE_CASE =self.encoder[' ']
del self.encoder[" "]
if "\n" in self.encoder:
_SCREAMING_SNAKE_CASE =self.encoder['\n']
del self.encoder["\n"]
_SCREAMING_SNAKE_CASE =collections.OrderedDict(sorted(self.encoder.items() , key=lambda _a : x[1] ) )
with open(_a , 'w' , encoding='utf-8' ) as writer:
for token, token_index in self.encoder.items():
if index != token_index:
logger.warning(
f"Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."
' Please check that the vocabulary is not corrupted!' )
_SCREAMING_SNAKE_CASE =token_index
writer.write(token + '\n' )
index += 1
return (vocab_file,)
def A ( self : List[Any] , _a : List[int] , _a : List[int] = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.bos_token_id] + token_ids_a
return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a
def A ( self : Tuple , _a : List[int] , _a : Optional[List[int]] = None , _a : bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_a , token_ids_a=_a , already_has_special_tokens=_a )
if token_ids_a is not None:
return [1] + ([0] * len(_a )) + [1] + ([0] * len(_a ))
return [1] + ([0] * len(_a ))
| 47 |
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
from torchvision.transforms.functional import InterpolationMode
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
ViTImageProcessor,
ViTMAEConfig,
ViTMAEForPreTraining,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
lowerCamelCase : Optional[int] = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.31.0")
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt")
@dataclass
class A__ :
A__ = field(
default='cifar10' , metadata={'help': 'Name of a dataset from the datasets package'} )
A__ = field(
default=A__ , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} )
A__ = field(
default=A__ , metadata={'help': 'The column name of the images in the files.'} )
A__ = field(default=A__ , metadata={'help': 'A folder containing the training data.'} )
A__ = field(default=A__ , metadata={'help': 'A folder containing the validation data.'} )
A__ = field(
default=0.15 , metadata={'help': 'Percent to split off of train for validation.'} )
A__ = field(
default=A__ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
A__ = field(
default=A__ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
def A ( self : Union[str, Any] ) -> int:
'''simple docstring'''
_SCREAMING_SNAKE_CASE ={}
if self.train_dir is not None:
_SCREAMING_SNAKE_CASE =self.train_dir
if self.validation_dir is not None:
_SCREAMING_SNAKE_CASE =self.validation_dir
_SCREAMING_SNAKE_CASE =data_files if data_files else None
@dataclass
class A__ :
A__ = field(
default=A__ , metadata={
'help': (
'The model checkpoint for weights initialization.Don\'t set if you want to train a model from scratch.'
)
} , )
A__ = field(
default=A__ , metadata={'help': 'Pretrained config name or path if not the same as model_name_or_path'} )
A__ = field(
default=A__ , metadata={
'help': (
'Override some existing default config settings when a model is trained from scratch. Example: '
'n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index'
)
} , )
A__ = field(
default=A__ , metadata={'help': 'Where do you want to store the pretrained models downloaded from s3'} )
A__ = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
A__ = field(default=A__ , metadata={'help': 'Name or path of preprocessor config.'} )
A__ = field(
default=A__ , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
A__ = field(
default=0.75 , metadata={'help': 'The ratio of the number of masked tokens in the input sequence.'} )
A__ = field(
default=A__ , metadata={'help': 'Whether or not to train with normalized pixel values as target.'} )
@dataclass
class A__ ( A__ ):
A__ = field(
default=1E-3 , metadata={'help': 'Base learning rate: absolute_lr = base_lr * total_batch_size / 256.'} )
def _lowerCAmelCase ( _UpperCamelCase : int ) -> Tuple:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =torch.stack([example['pixel_values'] for example in examples] )
return {"pixel_values": pixel_values}
def _lowerCAmelCase ( ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =HfArgumentParser((ModelArguments, DataTrainingArguments, CustomTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('run_mae' , _UpperCamelCase , _UpperCamelCase )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
_SCREAMING_SNAKE_CASE =training_args.get_process_log_level()
logger.setLevel(_UpperCamelCase )
transformers.utils.logging.set_verbosity(_UpperCamelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ f"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" )
logger.info(f"Training/evaluation parameters {training_args}" )
# Detecting last checkpoint.
_SCREAMING_SNAKE_CASE =None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_SCREAMING_SNAKE_CASE =get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Initialize our dataset.
_SCREAMING_SNAKE_CASE =load_dataset(
data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# If we don't have a validation split, split off a percentage of train as validation.
_SCREAMING_SNAKE_CASE =None if 'validation' in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , _UpperCamelCase ) and data_args.train_val_split > 0.0:
_SCREAMING_SNAKE_CASE =ds['train'].train_test_split(data_args.train_val_split )
_SCREAMING_SNAKE_CASE =split['train']
_SCREAMING_SNAKE_CASE =split['test']
# Load pretrained model and image processor
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_SCREAMING_SNAKE_CASE ={
'cache_dir': model_args.cache_dir,
'revision': model_args.model_revision,
'use_auth_token': True if model_args.use_auth_token else None,
}
if model_args.config_name:
_SCREAMING_SNAKE_CASE =ViTMAEConfig.from_pretrained(model_args.config_name , **_UpperCamelCase )
elif model_args.model_name_or_path:
_SCREAMING_SNAKE_CASE =ViTMAEConfig.from_pretrained(model_args.model_name_or_path , **_UpperCamelCase )
else:
_SCREAMING_SNAKE_CASE =ViTMAEConfig()
logger.warning('You are instantiating a new config instance from scratch.' )
if model_args.config_overrides is not None:
logger.info(f"Overriding config: {model_args.config_overrides}" )
config.update_from_string(model_args.config_overrides )
logger.info(f"New config: {config}" )
# adapt config
config.update(
{
'mask_ratio': model_args.mask_ratio,
'norm_pix_loss': model_args.norm_pix_loss,
} )
# create image processor
if model_args.image_processor_name:
_SCREAMING_SNAKE_CASE =ViTImageProcessor.from_pretrained(model_args.image_processor_name , **_UpperCamelCase )
elif model_args.model_name_or_path:
_SCREAMING_SNAKE_CASE =ViTImageProcessor.from_pretrained(model_args.model_name_or_path , **_UpperCamelCase )
else:
_SCREAMING_SNAKE_CASE =ViTImageProcessor()
# create model
if model_args.model_name_or_path:
_SCREAMING_SNAKE_CASE =ViTMAEForPreTraining.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=_UpperCamelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info('Training new model from scratch' )
_SCREAMING_SNAKE_CASE =ViTMAEForPreTraining(_UpperCamelCase )
if training_args.do_train:
_SCREAMING_SNAKE_CASE =ds['train'].column_names
else:
_SCREAMING_SNAKE_CASE =ds['validation'].column_names
if data_args.image_column_name is not None:
_SCREAMING_SNAKE_CASE =data_args.image_column_name
elif "image" in column_names:
_SCREAMING_SNAKE_CASE ='image'
elif "img" in column_names:
_SCREAMING_SNAKE_CASE ='img'
else:
_SCREAMING_SNAKE_CASE =column_names[0]
# transformations as done in original MAE paper
# source: https://github.com/facebookresearch/mae/blob/main/main_pretrain.py
if "shortest_edge" in image_processor.size:
_SCREAMING_SNAKE_CASE =image_processor.size['shortest_edge']
else:
_SCREAMING_SNAKE_CASE =(image_processor.size['height'], image_processor.size['width'])
_SCREAMING_SNAKE_CASE =Compose(
[
Lambda(lambda _UpperCamelCase : img.convert('RGB' ) if img.mode != "RGB" else img ),
RandomResizedCrop(_UpperCamelCase , scale=(0.2, 1.0) , interpolation=InterpolationMode.BICUBIC ),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean , std=image_processor.image_std ),
] )
def preprocess_images(_UpperCamelCase : Dict ):
_SCREAMING_SNAKE_CASE =[transforms(_UpperCamelCase ) for image in examples[image_column_name]]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError('--do_train requires a train dataset' )
if data_args.max_train_samples is not None:
_SCREAMING_SNAKE_CASE =ds['train'].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
# Set the training transforms
ds["train"].set_transform(_UpperCamelCase )
if training_args.do_eval:
if "validation" not in ds:
raise ValueError('--do_eval requires a validation dataset' )
if data_args.max_eval_samples is not None:
_SCREAMING_SNAKE_CASE =(
ds['validation'].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
ds["validation"].set_transform(_UpperCamelCase )
# Compute absolute learning rate
_SCREAMING_SNAKE_CASE =(
training_args.train_batch_size * training_args.gradient_accumulation_steps * training_args.world_size
)
if training_args.base_learning_rate is not None:
_SCREAMING_SNAKE_CASE =training_args.base_learning_rate * total_train_batch_size / 2_56
# Initialize our trainer
_SCREAMING_SNAKE_CASE =Trainer(
model=_UpperCamelCase , args=_UpperCamelCase , train_dataset=ds['train'] if training_args.do_train else None , eval_dataset=ds['validation'] if training_args.do_eval else None , tokenizer=_UpperCamelCase , data_collator=_UpperCamelCase , )
# Training
if training_args.do_train:
_SCREAMING_SNAKE_CASE =None
if training_args.resume_from_checkpoint is not None:
_SCREAMING_SNAKE_CASE =training_args.resume_from_checkpoint
elif last_checkpoint is not None:
_SCREAMING_SNAKE_CASE =last_checkpoint
_SCREAMING_SNAKE_CASE =trainer.train(resume_from_checkpoint=_UpperCamelCase )
trainer.save_model()
trainer.log_metrics('train' , train_result.metrics )
trainer.save_metrics('train' , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
_SCREAMING_SNAKE_CASE =trainer.evaluate()
trainer.log_metrics('eval' , _UpperCamelCase )
trainer.save_metrics('eval' , _UpperCamelCase )
# Write model card and (optionally) push to hub
_SCREAMING_SNAKE_CASE ={
'tasks': 'masked-auto-encoding',
'dataset': data_args.dataset_name,
'tags': ['masked-auto-encoding'],
}
if training_args.push_to_hub:
trainer.push_to_hub(**_UpperCamelCase )
else:
trainer.create_model_card(**_UpperCamelCase )
def _lowerCAmelCase ( _UpperCamelCase : List[str] ) -> Optional[int]:
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 47 | 1 |
'''simple docstring'''
import argparse
import glob
import importlib.util
import os
import re
import black
from doc_builder.style_doc import style_docstrings_in_code
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
__lowercase: int = "src/diffusers"
__lowercase: List[str] = "."
# This is to make sure the diffusers module imported is the one in the repo.
__lowercase: str = importlib.util.spec_from_file_location(
"diffusers",
os.path.join(DIFFUSERS_PATH, "__init__.py"),
submodule_search_locations=[DIFFUSERS_PATH],
)
__lowercase: List[Any] = spec.loader.load_module()
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Optional[Any] , _UpperCamelCase : Union[str, Any] ) -> int:
'''simple docstring'''
return line.startswith(_UpperCamelCase ) or len(_UpperCamelCase ) <= 1 or re.search(r"^\s*\)(\s*->.*:|:)\s*$" , _UpperCamelCase ) is not None
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str ) -> Any:
'''simple docstring'''
UpperCamelCase__ = object_name.split("." )
UpperCamelCase__ = 0
# First let's find the module where our object lives.
UpperCamelCase__ = parts[i]
while i < len(_UpperCamelCase ) and not os.path.isfile(os.path.join(_UpperCamelCase , F'{module}.py' ) ):
i += 1
if i < len(_UpperCamelCase ):
UpperCamelCase__ = os.path.join(_UpperCamelCase , parts[i] )
if i >= len(_UpperCamelCase ):
raise ValueError(F'`object_name` should begin with the name of a module of diffusers but got {object_name}.' )
with open(os.path.join(_UpperCamelCase , F'{module}.py' ) , "r" , encoding="utf-8" , newline="\n" ) as f:
UpperCamelCase__ = f.readlines()
# Now let's find the class / func in the code!
UpperCamelCase__ = ""
UpperCamelCase__ = 0
for name in parts[i + 1 :]:
while (
line_index < len(_UpperCamelCase ) and re.search(rF'^{indent}(class|def)\s+{name}(\(|\:)' , lines[line_index] ) is None
):
line_index += 1
indent += " "
line_index += 1
if line_index >= len(_UpperCamelCase ):
raise ValueError(F' {object_name} does not match any function or class in {module}.' )
# We found the beginning of the class / func, now let's find the end (when the indent diminishes).
UpperCamelCase__ = line_index
while line_index < len(_UpperCamelCase ) and _should_continue(lines[line_index] , _UpperCamelCase ):
line_index += 1
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
UpperCamelCase__ = lines[start_index:line_index]
return "".join(_UpperCamelCase )
__lowercase: Tuple = re.compile(r"^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)")
__lowercase: Any = re.compile(r"^\s*(\S+)->(\S+)(\s+.*|$)")
__lowercase: List[str] = re.compile(r"<FILL\s+[^>]*>")
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Dict ) -> Tuple:
'''simple docstring'''
UpperCamelCase__ = code.split("\n" )
UpperCamelCase__ = 0
while idx < len(_UpperCamelCase ) and len(lines[idx] ) == 0:
idx += 1
if idx < len(_UpperCamelCase ):
return re.search(r"^(\s*)\S" , lines[idx] ).groups()[0]
return ""
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ = len(get_indent(_UpperCamelCase ) ) > 0
if has_indent:
UpperCamelCase__ = F'class Bla:\n{code}'
UpperCamelCase__ = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=1_19 , preview=_UpperCamelCase )
UpperCamelCase__ = black.format_str(_UpperCamelCase , mode=_UpperCamelCase )
UpperCamelCase__ , UpperCamelCase__ = style_docstrings_in_code(_UpperCamelCase )
return result[len("class Bla:\n" ) :] if has_indent else result
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str , _UpperCamelCase : int=False ) -> Any:
'''simple docstring'''
with open(_UpperCamelCase , "r" , encoding="utf-8" , newline="\n" ) as f:
UpperCamelCase__ = f.readlines()
UpperCamelCase__ = []
UpperCamelCase__ = 0
# Not a for loop cause `lines` is going to change (if `overwrite=True`).
while line_index < len(_UpperCamelCase ):
UpperCamelCase__ = _re_copy_warning.search(lines[line_index] )
if search is None:
line_index += 1
continue
# There is some copied code here, let's retrieve the original.
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = search.groups()
UpperCamelCase__ = find_code_in_diffusers(_UpperCamelCase )
UpperCamelCase__ = get_indent(_UpperCamelCase )
UpperCamelCase__ = line_index + 1 if indent == theoretical_indent else line_index + 2
UpperCamelCase__ = theoretical_indent
UpperCamelCase__ = start_index
# Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment.
UpperCamelCase__ = True
while line_index < len(_UpperCamelCase ) and should_continue:
line_index += 1
if line_index >= len(_UpperCamelCase ):
break
UpperCamelCase__ = lines[line_index]
UpperCamelCase__ = _should_continue(_UpperCamelCase , _UpperCamelCase ) and re.search(F'^{indent}# End copy' , _UpperCamelCase ) is None
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
UpperCamelCase__ = lines[start_index:line_index]
UpperCamelCase__ = "".join(_UpperCamelCase )
# Remove any nested `Copied from` comments to avoid circular copies
UpperCamelCase__ = [line for line in theoretical_code.split("\n" ) if _re_copy_warning.search(_UpperCamelCase ) is None]
UpperCamelCase__ = "\n".join(_UpperCamelCase )
# Before comparing, use the `replace_pattern` on the original code.
if len(_UpperCamelCase ) > 0:
UpperCamelCase__ = replace_pattern.replace("with" , "" ).split("," )
UpperCamelCase__ = [_re_replace_pattern.search(_UpperCamelCase ) for p in patterns]
for pattern in patterns:
if pattern is None:
continue
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = pattern.groups()
UpperCamelCase__ = re.sub(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
if option.strip() == "all-casing":
UpperCamelCase__ = re.sub(obja.lower() , obja.lower() , _UpperCamelCase )
UpperCamelCase__ = re.sub(obja.upper() , obja.upper() , _UpperCamelCase )
# Blackify after replacement. To be able to do that, we need the header (class or function definition)
# from the previous line
UpperCamelCase__ = blackify(lines[start_index - 1] + theoretical_code )
UpperCamelCase__ = theoretical_code[len(lines[start_index - 1] ) :]
# Test for a diff and act accordingly.
if observed_code != theoretical_code:
diffs.append([object_name, start_index] )
if overwrite:
UpperCamelCase__ = lines[:start_index] + [theoretical_code] + lines[line_index:]
UpperCamelCase__ = start_index + 1
if overwrite and len(_UpperCamelCase ) > 0:
# Warn the user a file has been modified.
print(F'Detected changes, rewriting {filename}.' )
with open(_UpperCamelCase , "w" , encoding="utf-8" , newline="\n" ) as f:
f.writelines(_UpperCamelCase )
return diffs
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : bool = False ) -> Dict:
'''simple docstring'''
UpperCamelCase__ = glob.glob(os.path.join(_UpperCamelCase , "**/*.py" ) , recursive=_UpperCamelCase )
UpperCamelCase__ = []
for filename in all_files:
UpperCamelCase__ = is_copy_consistent(_UpperCamelCase , _UpperCamelCase )
diffs += [F'- {filename}: copy does not match {d[0]} at line {d[1]}' for d in new_diffs]
if not overwrite and len(_UpperCamelCase ) > 0:
UpperCamelCase__ = "\n".join(_UpperCamelCase )
raise Exception(
"Found the following copy inconsistencies:\n"
+ diff
+ "\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them." )
if __name__ == "__main__":
__lowercase: Any = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
__lowercase: List[Any] = parser.parse_args()
check_copies(args.fix_and_overwrite) | 31 |
'''simple docstring'''
import math
import sys
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str ) -> str:
'''simple docstring'''
UpperCamelCase__ = ""
try:
with open(_UpperCamelCase , "rb" ) as binary_file:
UpperCamelCase__ = binary_file.read()
for dat in data:
UpperCamelCase__ = F'{dat:08b}'
result += curr_byte
return result
except OSError:
print("File not accessible" )
sys.exit()
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str ) -> str:
'''simple docstring'''
UpperCamelCase__ = {"0": "0", "1": "1"}
UpperCamelCase__ , UpperCamelCase__ = "", ""
UpperCamelCase__ = len(_UpperCamelCase )
for i in range(len(_UpperCamelCase ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
UpperCamelCase__ = lexicon[curr_string]
result += last_match_id
UpperCamelCase__ = last_match_id + "0"
if math.loga(_UpperCamelCase ).is_integer():
UpperCamelCase__ = {}
for curr_key in list(_UpperCamelCase ):
UpperCamelCase__ = lexicon.pop(_UpperCamelCase )
UpperCamelCase__ = new_lex
UpperCamelCase__ = last_match_id + "1"
index += 1
UpperCamelCase__ = ""
return result
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str , _UpperCamelCase : str ) -> None:
'''simple docstring'''
UpperCamelCase__ = 8
try:
with open(_UpperCamelCase , "wb" ) as opened_file:
UpperCamelCase__ = [
to_write[i : i + byte_length]
for i in range(0 , len(_UpperCamelCase ) , _UpperCamelCase )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append("10000000" )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array[:-1]:
opened_file.write(int(_UpperCamelCase , 2 ).to_bytes(1 , byteorder="big" ) )
except OSError:
print("File not accessible" )
sys.exit()
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str ) -> str:
'''simple docstring'''
UpperCamelCase__ = 0
for letter in data_bits:
if letter == "1":
break
counter += 1
UpperCamelCase__ = data_bits[counter:]
UpperCamelCase__ = data_bits[counter + 1 :]
return data_bits
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str , _UpperCamelCase : str ) -> None:
'''simple docstring'''
UpperCamelCase__ = read_file_binary(_UpperCamelCase )
UpperCamelCase__ = remove_prefix(_UpperCamelCase )
UpperCamelCase__ = decompress_data(_UpperCamelCase )
write_file_binary(_UpperCamelCase , _UpperCamelCase )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2]) | 31 | 1 |
'''simple docstring'''
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
a__ : Optional[Any] =logging.get_logger(__name__)
a__ : List[Any] ={'''tokenizer_file''': '''tokenizer.json'''}
a__ : Dict ={
'''tokenizer_file''': {
'''bigscience/tokenizer''': '''https://huggingface.co/bigscience/tokenizer/blob/main/tokenizer.json''',
'''bigscience/bloom-560m''': '''https://huggingface.co/bigscience/bloom-560m/blob/main/tokenizer.json''',
'''bigscience/bloom-1b1''': '''https://huggingface.co/bigscience/bloom-1b1/blob/main/tokenizer.json''',
'''bigscience/bloom-1b7''': '''https://huggingface.co/bigscience/bloom-1b7/blob/main/tokenizer.json''',
'''bigscience/bloom-3b''': '''https://huggingface.co/bigscience/bloom-3b/blob/main/tokenizer.json''',
'''bigscience/bloom-7b1''': '''https://huggingface.co/bigscience/bloom-7b1/blob/main/tokenizer.json''',
'''bigscience/bloom''': '''https://huggingface.co/bigscience/bloom/blob/main/tokenizer.json''',
},
}
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] =VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ : Any =PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ : Optional[Any] =["input_ids", "attention_mask"]
SCREAMING_SNAKE_CASE_ : Any =None
def __init__( self : Optional[Any] , __A : Any=None , __A : Optional[Any]=None , __A : Optional[Any]=None , __A : int="<unk>" , __A : int="<s>" , __A : Optional[int]="</s>" , __A : Union[str, Any]="<pad>" , __A : List[str]=False , __A : int=False , **__A : List[str] , ):
super().__init__(
__A , __A , tokenizer_file=__A , unk_token=__A , bos_token=__A , eos_token=__A , pad_token=__A , add_prefix_space=__A , clean_up_tokenization_spaces=__A , **__A , )
__UpperCamelCase = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , __A ) != add_prefix_space:
__UpperCamelCase = getattr(__A , pre_tok_state.pop('type' ) )
__UpperCamelCase = add_prefix_space
__UpperCamelCase = pre_tok_class(**__A )
__UpperCamelCase = add_prefix_space
def _lowerCamelCase ( self : Dict , *__A : int , **__A : List[str] ):
__UpperCamelCase = kwargs.get('is_split_into_words' , __A )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with'''
' pretokenized inputs.' )
return super()._batch_encode_plus(*__A , **__A )
def _lowerCamelCase ( self : Any , *__A : List[str] , **__A : List[Any] ):
__UpperCamelCase = kwargs.get('is_split_into_words' , __A )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with'''
' pretokenized inputs.' )
return super()._encode_plus(*__A , **__A )
def _lowerCamelCase ( self : Tuple , __A : str , __A : Optional[str] = None ):
__UpperCamelCase = self._tokenizer.model.save(__A , name=__A )
return tuple(__A )
def _lowerCamelCase ( self : str , __A : "Conversation" ):
__UpperCamelCase = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(__A , add_special_tokens=__A ) + [self.eos_token_id] )
if len(__A ) > self.model_max_length:
__UpperCamelCase = input_ids[-self.model_max_length :]
return input_ids
| 53 |
"""simple docstring"""
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_multi_gpu
from accelerate.utils import patch_environment
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self ):
__a = inspect.getfile(accelerate.test_utils )
__a = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_script.py'''] )
__a = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_distributed_data_loop.py'''] )
__a = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_ops.py'''] )
@require_multi_gpu
def __UpperCAmelCase ( self ):
print(f'''Found {torch.cuda.device_count()} devices.''' )
__a = ['''torchrun''', f'''--nproc_per_node={torch.cuda.device_count()}''', self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_a , env=os.environ.copy() )
@require_multi_gpu
def __UpperCAmelCase ( self ):
print(f'''Found {torch.cuda.device_count()} devices.''' )
__a = ['''torchrun''', f'''--nproc_per_node={torch.cuda.device_count()}''', self.operation_file_path]
print(f'''Command: {cmd}''' )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_a , env=os.environ.copy() )
@require_multi_gpu
def __UpperCAmelCase ( self ):
__a = ['''torchrun''', f'''--nproc_per_node={torch.cuda.device_count()}''', inspect.getfile(self.__class__ )]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_a , env=os.environ.copy() )
@require_multi_gpu
def __UpperCAmelCase ( self ):
print(f'''Found {torch.cuda.device_count()} devices, using 2 devices only''' )
__a = ['''torchrun''', f'''--nproc_per_node={torch.cuda.device_count()}''', self.data_loop_file_path]
with patch_environment(omp_num_threads=1 , cuda_visible_devices='''0,1''' ):
execute_subprocess_async(_a , env=os.environ.copy() )
if __name__ == "__main__":
lowercase_ = Accelerator()
lowercase_ = (accelerator.state.process_index + 2, 1_0)
lowercase_ = torch.randint(0, 1_0, shape).to(accelerator.device)
lowercase_ = ""
lowercase_ = accelerator.pad_across_processes(tensor)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0):
error_msg += "Padding was not done with the right value (0)."
lowercase_ = accelerator.pad_across_processes(tensor, pad_first=True)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
lowercase_ = accelerator.state.num_processes - accelerator.state.process_index - 1
if not torch.equal(tensora[index:], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[:index] == 0):
error_msg += "Padding was not done with the right value (0)."
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 45 | 0 |
def UpperCAmelCase__ ( lowerCamelCase, lowerCamelCase, lowerCamelCase ):
if principal <= 0:
raise Exception("Principal borrowed must be > 0" )
if rate_per_annum < 0:
raise Exception("Rate of interest must be >= 0" )
if years_to_repay <= 0 or not isinstance(lowerCamelCase, lowerCamelCase ):
raise Exception("Years to repay must be an integer > 0" )
# Yearly rate is divided by 12 to get monthly rate
lowercase :Optional[int] = rate_per_annum / 12
# Years to repay is multiplied by 12 to get number of payments as payment is monthly
lowercase :str = years_to_repay * 12
return (
principal
* rate_per_month
* (1 + rate_per_month) ** number_of_payments
/ ((1 + rate_per_month) ** number_of_payments - 1)
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 367 |
from . import __version__
# Backward compatibility imports, to make sure all those objects can be found in file_utils
from .utils import (
CLOUDFRONT_DISTRIB_PREFIX,
CONFIG_NAME,
DISABLE_TELEMETRY,
DUMMY_INPUTS,
DUMMY_MASK,
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
FEATURE_EXTRACTOR_NAME,
FLAX_WEIGHTS_NAME,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
MODEL_CARD_NAME,
MULTIPLE_CHOICE_DUMMY_INPUTS,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
SENTENCEPIECE_UNDERLINE,
SPIECE_UNDERLINE,
TF2_WEIGHTS_NAME,
TF_WEIGHTS_NAME,
TORCH_FX_REQUIRED_VERSION,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
USE_JAX,
USE_TF,
USE_TORCH,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
ContextManagers,
DummyObject,
EntryNotFoundError,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
TensorType,
_LazyModule,
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
cached_property,
copy_func,
default_cache_path,
define_sagemaker_information,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
get_torch_version,
has_file,
http_user_agent,
is_apex_available,
is_bsa_available,
is_coloredlogs_available,
is_datasets_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_librosa_available,
is_offline_mode,
is_onnx_available,
is_pandas_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_tensor,
is_tensorflow_probability_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_training_run_on_sagemaker,
is_vision_available,
replace_return_docstrings,
requires_backends,
to_numpy,
to_py_obj,
torch_only_method,
)
| 158 | 0 |
import random
import timeit
from functools import wraps
from typing import Callable, Optional
from ..configuration_utils import PretrainedConfig
from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING
from ..utils import is_pyanvml_available, is_tf_available, logging
from .benchmark_utils import (
Benchmark,
Memory,
MemorySummary,
measure_peak_memory_cpu,
start_memory_tracing,
stop_memory_tracing,
)
if is_tf_available():
import tensorflow as tf
from tensorflow.python.framework.errors_impl import ResourceExhaustedError
from .benchmark_args_tf import TensorFlowBenchmarkArguments
if is_pyanvml_available():
import pyanvml.pyanvml as nvml
snake_case_ : Tuple = logging.get_logger(__name__)
def A (__A : bool , __A : bool ) -> Optional[Any]:
"""simple docstring"""
def run_func(__A : Optional[Any] ):
@wraps(__A )
def run_in_eager_mode(*__A : Dict , **__A : List[Any] ):
return func(*__A , **__A )
@wraps(__A )
@tf.function(experimental_compile=__A )
def run_in_graph_mode(*__A : Optional[Any] , **__A : Any ):
return func(*__A , **__A )
if do_eager_mode is True:
if use_xla is not False:
raise ValueError(
'''Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`.''' )
return run_in_eager_mode
else:
return run_in_graph_mode
return run_func
def A (__A : int , __A : int , __A : int ) -> ["tf.Tensor"]:
"""simple docstring"""
UpperCAmelCase_ = random.Random()
UpperCAmelCase_ = [rng.randint(0 , vocab_size - 1 ) for i in range(batch_size * sequence_length )]
return tf.constant(__A , shape=(batch_size, sequence_length) , dtype=tf.intaa )
class __snake_case ( a ):
UpperCAmelCase__ : TensorFlowBenchmarkArguments
UpperCAmelCase__ : PretrainedConfig
UpperCAmelCase__ : str = "TensorFlow"
@property
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
return tf.__version__
def lowerCamelCase ( self : Dict , _snake_case : str , _snake_case : int , _snake_case : int):
"""simple docstring"""
UpperCAmelCase_ = self.args.strategy
if strategy is None:
raise ValueError('''A device strategy has to be initialized before using TensorFlow.''')
UpperCAmelCase_ = self._prepare_inference_func(_snake_case , _snake_case , _snake_case)
return self._measure_speed(_inference)
def lowerCamelCase ( self : Any , _snake_case : str , _snake_case : int , _snake_case : int):
"""simple docstring"""
UpperCAmelCase_ = self.args.strategy
if strategy is None:
raise ValueError('''A device strategy has to be initialized before using TensorFlow.''')
UpperCAmelCase_ = self._prepare_train_func(_snake_case , _snake_case , _snake_case)
return self._measure_speed(_train)
def lowerCamelCase ( self : Any , _snake_case : str , _snake_case : int , _snake_case : int):
"""simple docstring"""
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , _snake_case)
UpperCAmelCase_ = self.args.strategy
if strategy is None:
raise ValueError('''A device strategy has to be initialized before using TensorFlow.''')
UpperCAmelCase_ = self._prepare_inference_func(_snake_case , _snake_case , _snake_case)
return self._measure_memory(_inference)
def lowerCamelCase ( self : Optional[Any] , _snake_case : str , _snake_case : int , _snake_case : int):
"""simple docstring"""
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , _snake_case)
UpperCAmelCase_ = self.args.strategy
if strategy is None:
raise ValueError('''A device strategy has to be initialized before using TensorFlow.''')
UpperCAmelCase_ = self._prepare_train_func(_snake_case , _snake_case , _snake_case)
return self._measure_memory(_train)
def lowerCamelCase ( self : Optional[int] , _snake_case : str , _snake_case : int , _snake_case : int):
"""simple docstring"""
UpperCAmelCase_ = self.config_dict[model_name]
if self.args.fpaa:
raise NotImplementedError('''Mixed precision is currently not supported.''')
UpperCAmelCase_ = (
hasattr(_snake_case , '''architectures''')
and isinstance(config.architectures , _snake_case)
and len(config.architectures) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
UpperCAmelCase_ = '''TF''' + config.architectures[0] # prepend 'TF' for tensorflow model
UpperCAmelCase_ = __import__('''transformers''' , fromlist=[model_class])
UpperCAmelCase_ = getattr(_snake_case , _snake_case)
UpperCAmelCase_ = model_cls(_snake_case)
except ImportError:
raise ImportError(
F"""{model_class} does not exist. If you just want to test the pretrained model, you might want to"""
''' set `--only_pretrain_model` or `args.only_pretrain_model=True`.''')
else:
UpperCAmelCase_ = TF_MODEL_MAPPING[config.__class__](_snake_case)
# encoder-decoder has vocab size saved differently
UpperCAmelCase_ = config.vocab_size if hasattr(_snake_case , '''vocab_size''') else config.encoder.vocab_size
UpperCAmelCase_ = random_input_ids(_snake_case , _snake_case , _snake_case)
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla)
def encoder_decoder_forward():
return model(_snake_case , decoder_input_ids=_snake_case , training=_snake_case)
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla)
def encoder_forward():
return model(_snake_case , training=_snake_case)
UpperCAmelCase_ = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward
return _inference
def lowerCamelCase ( self : Optional[Any] , _snake_case : str , _snake_case : int , _snake_case : int):
"""simple docstring"""
UpperCAmelCase_ = self.config_dict[model_name]
if self.args.eager_mode is not False:
raise ValueError('''Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`.''')
if self.args.fpaa:
raise NotImplementedError('''Mixed precision is currently not supported.''')
UpperCAmelCase_ = (
hasattr(_snake_case , '''architectures''')
and isinstance(config.architectures , _snake_case)
and len(config.architectures) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
UpperCAmelCase_ = '''TF''' + config.architectures[0] # prepend 'TF' for tensorflow model
UpperCAmelCase_ = __import__('''transformers''' , fromlist=[model_class])
UpperCAmelCase_ = getattr(_snake_case , _snake_case)
UpperCAmelCase_ = model_cls(_snake_case)
except ImportError:
raise ImportError(
F"""{model_class} does not exist. If you just want to test the pretrained model, you might want to"""
''' set `--only_pretrain_model` or `args.only_pretrain_model=True`.''')
else:
UpperCAmelCase_ = TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](_snake_case)
# encoder-decoder has vocab size saved differently
UpperCAmelCase_ = config.vocab_size if hasattr(_snake_case , '''vocab_size''') else config.encoder.vocab_size
UpperCAmelCase_ = random_input_ids(_snake_case , _snake_case , _snake_case)
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla)
def encoder_decoder_train():
UpperCAmelCase_ = model(_snake_case , decoder_input_ids=_snake_case , labels=_snake_case , training=_snake_case)[0]
UpperCAmelCase_ = tf.gradients(_snake_case , model.trainable_variables)
return gradients
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla)
def encoder_train():
UpperCAmelCase_ = model(_snake_case , labels=_snake_case , training=_snake_case)[0]
UpperCAmelCase_ = tf.gradients(_snake_case , model.trainable_variables)
return gradients
UpperCAmelCase_ = encoder_decoder_train if config.is_encoder_decoder else encoder_train
return _train
def lowerCamelCase ( self : Any , _snake_case : Optional[Any]):
"""simple docstring"""
with self.args.strategy.scope():
try:
if self.args.is_tpu or self.args.use_xla:
# run additional 10 times to stabilize compilation for tpu
logger.info('''Do inference on TPU. Running model 5 times to stabilize compilation''')
timeit.repeat(_snake_case , repeat=1 , number=5)
# as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average
UpperCAmelCase_ = timeit.repeat(
_snake_case , repeat=self.args.repeat , number=10 , )
return min(_snake_case) / 1_0.0
except ResourceExhaustedError as e:
self.print_fn(F"""Doesn't fit on GPU. {e}""")
def lowerCamelCase ( self : Dict , _snake_case : Callable[[], None]):
"""simple docstring"""
logger.info(
'''Note that TensorFlow allocates more memory than '''
'''it might need to speed up computation. '''
'''The memory reported here corresponds to the memory '''
'''reported by `nvidia-smi`, which can vary depending '''
'''on total available memory on the GPU that is used.''')
with self.args.strategy.scope():
try:
if self.args.trace_memory_line_by_line:
if not self.args.eager_mode:
raise ValueError(
'''`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory'''
''' consumption line by line.''')
UpperCAmelCase_ = start_memory_tracing('''transformers''')
if self.args.is_tpu:
# tpu
raise NotImplementedError(
'''Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking'''
''' with `args.memory=False`''')
elif self.args.is_gpu:
# gpu
if not is_pyanvml_available():
logger.warning(
'''py3nvml not installed, we won\'t log GPU memory usage. '''
'''Install py3nvml (pip install py3nvml) to log information about GPU.''')
UpperCAmelCase_ = '''N/A'''
else:
logger.info(
'''Measuring total GPU usage on GPU device. Make sure to not have additional processes'''
''' running on the same GPU.''')
# init nvml
nvml.nvmlInit()
func()
UpperCAmelCase_ = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx)
UpperCAmelCase_ = nvml.nvmlDeviceGetMemoryInfo(_snake_case)
UpperCAmelCase_ = meminfo.used
UpperCAmelCase_ = Memory(_snake_case)
# shutdown nvml
nvml.nvmlShutdown()
else:
# cpu
if self.args.trace_memory_line_by_line:
logger.info(
'''When enabling line by line tracing, the max peak memory for CPU is inaccurate in'''
''' TensorFlow.''')
UpperCAmelCase_ = None
else:
UpperCAmelCase_ = measure_peak_memory_cpu(_snake_case)
UpperCAmelCase_ = Memory(_snake_case) if isinstance(_snake_case , _snake_case) else memory_bytes
if self.args.trace_memory_line_by_line:
UpperCAmelCase_ = stop_memory_tracing(_snake_case)
if memory is None:
UpperCAmelCase_ = summary.total
else:
UpperCAmelCase_ = None
return memory, summary
except ResourceExhaustedError as e:
self.print_fn(F"""Doesn't fit on GPU. {e}""")
return "N/A", None
| 51 |
import json
import os
import unittest
from transformers.models.gptsan_japanese.tokenization_gptsan_japanese import (
VOCAB_FILES_NAMES,
GPTSanJapaneseTokenizer,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowercase__ ( _UpperCAmelCase , unittest.TestCase ):
A__ : Any =GPTSanJapaneseTokenizer
A__ : str =False
A__ : int ={"""do_clean_text""": False, """add_prefix_space""": False}
def A_ ( self : Any ):
super().setUp()
# fmt: off
SCREAMING_SNAKE_CASE__ = ['こん', 'こんに', 'にちは', 'ばんは', '世界,㔺界', '、', '。', '<BR>', '<SP>', '<TAB>', '<URL>', '<EMAIL>', '<TEL>', '<DATE>', '<PRICE>', '<BLOCK>', '<KIGOU>', '<U2000U2BFF>', '<|emoji1|>', '<unk>', '<|bagoftoken|>', '<|endoftext|>']
# fmt: on
SCREAMING_SNAKE_CASE__ = {'emoji': {'\ud83d\ude00': '<|emoji1|>'}, 'emoji_inv': {'<|emoji1|>': '\ud83d\ude00'}} # 😀
SCREAMING_SNAKE_CASE__ = {'unk_token': '<unk>'}
SCREAMING_SNAKE_CASE__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
SCREAMING_SNAKE_CASE__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['emoji_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
with open(self.emoji_file , 'w' ) as emoji_writer:
emoji_writer.write(json.dumps(UpperCAmelCase_ ) )
def A_ ( self : str , **UpperCAmelCase_ : Optional[Any] ):
kwargs.update(self.special_tokens_map )
return GPTSanJapaneseTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase_ )
def A_ ( self : int , UpperCAmelCase_ : List[str] ):
SCREAMING_SNAKE_CASE__ = 'こんにちは、世界。 \nこんばんは、㔺界。😀'
SCREAMING_SNAKE_CASE__ = 'こんにちは、世界。 \nこんばんは、世界。😀'
return input_text, output_text
def A_ ( self : Any , UpperCAmelCase_ : str ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.get_input_output_texts(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = tokenizer.decode(UpperCAmelCase_ , clean_up_tokenization_spaces=UpperCAmelCase_ )
return text, ids
def A_ ( self : str ):
pass # TODO add if relevant
def A_ ( self : Tuple ):
pass # TODO add if relevant
def A_ ( self : int ):
pass # TODO add if relevant
def A_ ( self : Optional[int] ):
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
# Testing tokenization
SCREAMING_SNAKE_CASE__ = 'こんにちは、世界。 こんばんは、㔺界。'
SCREAMING_SNAKE_CASE__ = ['こん', 'にちは', '、', '世界', '。', '<SP>', 'こん', 'ばんは', '、', '㔺界', '。']
SCREAMING_SNAKE_CASE__ = tokenizer.tokenize(UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
# Testing conversion to ids without special tokens
SCREAMING_SNAKE_CASE__ = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6]
SCREAMING_SNAKE_CASE__ = tokenizer.convert_tokens_to_ids(UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
# Testing conversion to ids with special tokens
SCREAMING_SNAKE_CASE__ = tokens + [tokenizer.unk_token]
SCREAMING_SNAKE_CASE__ = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6, 19]
SCREAMING_SNAKE_CASE__ = tokenizer.convert_tokens_to_ids(UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
def A_ ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
# Testing tokenization
SCREAMING_SNAKE_CASE__ = 'こんにちは、<|bagoftoken|>世界。こんばんは、<|bagoftoken|>㔺界。'
SCREAMING_SNAKE_CASE__ = 'こんにちは、、、、世界。こんばんは、、、、世界。'
SCREAMING_SNAKE_CASE__ = tokenizer.encode(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = tokenizer.decode(UpperCAmelCase_ )
self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ )
@slow
def A_ ( self : str ):
SCREAMING_SNAKE_CASE__ = self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese' )
# Testing tokenization
SCREAMING_SNAKE_CASE__ = 'こんにちは、世界。'
SCREAMING_SNAKE_CASE__ = 'こんばんは、㔺界。😀'
SCREAMING_SNAKE_CASE__ = 'こんにちは、世界。こんばんは、世界。😀'
SCREAMING_SNAKE_CASE__ = tokenizer.encode(prefix_text + input_text )
SCREAMING_SNAKE_CASE__ = tokenizer.encode('' , prefix_text=prefix_text + input_text )
SCREAMING_SNAKE_CASE__ = tokenizer.encode(UpperCAmelCase_ , prefix_text=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = tokenizer.decode(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = tokenizer.decode(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = tokenizer.decode(UpperCAmelCase_ )
self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ )
self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ )
self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ )
@slow
def A_ ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE__ = self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese' )
# Testing tokenization
SCREAMING_SNAKE_CASE__ = 'こんにちは、世界。'
SCREAMING_SNAKE_CASE__ = 'こんばんは、㔺界。😀'
SCREAMING_SNAKE_CASE__ = len(tokenizer.encode(UpperCAmelCase_ ) ) - 2
SCREAMING_SNAKE_CASE__ = len(tokenizer.encode(UpperCAmelCase_ ) ) - 2
SCREAMING_SNAKE_CASE__ = [1] + [0] * (len_prefix + len_text + 1)
SCREAMING_SNAKE_CASE__ = [1] * (len_prefix + len_text + 1) + [0]
SCREAMING_SNAKE_CASE__ = [1] + [1] * (len_prefix) + [0] * (len_text + 1)
SCREAMING_SNAKE_CASE__ = tokenizer(prefix_text + input_text ).token_type_ids
SCREAMING_SNAKE_CASE__ = tokenizer('' , prefix_text=prefix_text + input_text ).token_type_ids
SCREAMING_SNAKE_CASE__ = tokenizer(UpperCAmelCase_ , prefix_text=UpperCAmelCase_ ).token_type_ids
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
@slow
def A_ ( self : Tuple ):
SCREAMING_SNAKE_CASE__ = self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese' )
SCREAMING_SNAKE_CASE__ = tokenizer.encode('あンいワ' )
SCREAMING_SNAKE_CASE__ = tokenizer.encode('' , prefix_text='あンいワ' )
SCREAMING_SNAKE_CASE__ = tokenizer.encode('いワ' , prefix_text='あン' )
self.assertEqual(tokenizer.decode(UpperCAmelCase_ ) , tokenizer.decode(UpperCAmelCase_ ) )
self.assertEqual(tokenizer.decode(UpperCAmelCase_ ) , tokenizer.decode(UpperCAmelCase_ ) )
self.assertNotEqual(UpperCAmelCase_ , UpperCAmelCase_ )
self.assertNotEqual(UpperCAmelCase_ , UpperCAmelCase_ )
self.assertEqual(x_token_a[1] , x_token_a[-1] ) # SEG token
self.assertEqual(x_token_a[1] , x_token_a[3] ) # SEG token
@slow
def A_ ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE__ = self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese' )
SCREAMING_SNAKE_CASE__ = [['武田信玄', 'は、'], ['織田信長', 'の配下の、']]
SCREAMING_SNAKE_CASE__ = tokenizer(UpperCAmelCase_ , padding=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = tokenizer.batch_encode_plus(UpperCAmelCase_ , padding=UpperCAmelCase_ )
# fmt: off
SCREAMING_SNAKE_CASE__ = [[35993, 8640, 25948, 35998, 30647, 35675, 35999, 35999], [35993, 10382, 9868, 35998, 30646, 9459, 30646, 35675]]
SCREAMING_SNAKE_CASE__ = [[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0]]
SCREAMING_SNAKE_CASE__ = [[1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]]
# fmt: on
self.assertListEqual(x_token.input_ids , UpperCAmelCase_ )
self.assertListEqual(x_token.token_type_ids , UpperCAmelCase_ )
self.assertListEqual(x_token.attention_mask , UpperCAmelCase_ )
self.assertListEqual(x_token_a.input_ids , UpperCAmelCase_ )
self.assertListEqual(x_token_a.token_type_ids , UpperCAmelCase_ )
self.assertListEqual(x_token_a.attention_mask , UpperCAmelCase_ )
def A_ ( self : Tuple ):
# Intentionally convert some words to accommodate character fluctuations unique to Japanese
pass
def A_ ( self : List[str] ):
# tokenizer has no padding token
pass
| 176 | 0 |
"""simple docstring"""
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class _A ( lowerCAmelCase ):
snake_case__ : List[str] = (DEISMultistepScheduler,)
snake_case__ : Union[str, Any] = (('num_inference_steps', 25),)
def A__ ( self , **__lowerCAmelCase ):
"""simple docstring"""
lowercase = {
"""num_train_timesteps""": 1000,
"""beta_start""": 0.0_0_0_1,
"""beta_end""": 0.0_2,
"""beta_schedule""": """linear""",
"""solver_order""": 2,
}
config.update(**__lowerCAmelCase )
return config
def A__ ( self , __lowerCAmelCase=0 , **__lowerCAmelCase ):
"""simple docstring"""
lowercase = dict(self.forward_default_kwargs )
lowercase = kwargs.pop("""num_inference_steps""" , __lowerCAmelCase )
lowercase = self.dummy_sample
lowercase = 0.1 * sample
lowercase = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
for scheduler_class in self.scheduler_classes:
lowercase = self.get_scheduler_config(**__lowerCAmelCase )
lowercase = scheduler_class(**__lowerCAmelCase )
scheduler.set_timesteps(__lowerCAmelCase )
# copy over dummy past residuals
lowercase = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__lowerCAmelCase )
lowercase = scheduler_class.from_pretrained(__lowerCAmelCase )
new_scheduler.set_timesteps(__lowerCAmelCase )
# copy over dummy past residuals
lowercase = dummy_past_residuals[: new_scheduler.config.solver_order]
lowercase , lowercase = sample, sample
for t in range(__lowerCAmelCase , time_step + scheduler.config.solver_order + 1 ):
lowercase = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ).prev_sample
lowercase = new_scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def A__ ( self ):
"""simple docstring"""
pass
def A__ ( self , __lowerCAmelCase=0 , **__lowerCAmelCase ):
"""simple docstring"""
lowercase = dict(self.forward_default_kwargs )
lowercase = kwargs.pop("""num_inference_steps""" , __lowerCAmelCase )
lowercase = self.dummy_sample
lowercase = 0.1 * sample
lowercase = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
for scheduler_class in self.scheduler_classes:
lowercase = self.get_scheduler_config()
lowercase = scheduler_class(**__lowerCAmelCase )
scheduler.set_timesteps(__lowerCAmelCase )
# copy over dummy past residuals (must be after setting timesteps)
lowercase = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__lowerCAmelCase )
lowercase = scheduler_class.from_pretrained(__lowerCAmelCase )
# copy over dummy past residuals
new_scheduler.set_timesteps(__lowerCAmelCase )
# copy over dummy past residual (must be after setting timesteps)
lowercase = dummy_past_residuals[: new_scheduler.config.solver_order]
lowercase = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ).prev_sample
lowercase = new_scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def A__ ( self , __lowerCAmelCase=None , **__lowerCAmelCase ):
"""simple docstring"""
if scheduler is None:
lowercase = self.scheduler_classes[0]
lowercase = self.get_scheduler_config(**__lowerCAmelCase )
lowercase = scheduler_class(**__lowerCAmelCase )
lowercase = self.scheduler_classes[0]
lowercase = self.get_scheduler_config(**__lowerCAmelCase )
lowercase = scheduler_class(**__lowerCAmelCase )
lowercase = 10
lowercase = self.dummy_model()
lowercase = self.dummy_sample_deter
scheduler.set_timesteps(__lowerCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
lowercase = model(__lowerCAmelCase , __lowerCAmelCase )
lowercase = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ).prev_sample
return sample
def A__ ( self ):
"""simple docstring"""
lowercase = dict(self.forward_default_kwargs )
lowercase = kwargs.pop("""num_inference_steps""" , __lowerCAmelCase )
for scheduler_class in self.scheduler_classes:
lowercase = self.get_scheduler_config()
lowercase = scheduler_class(**__lowerCAmelCase )
lowercase = self.dummy_sample
lowercase = 0.1 * sample
if num_inference_steps is not None and hasattr(__lowerCAmelCase , """set_timesteps""" ):
scheduler.set_timesteps(__lowerCAmelCase )
elif num_inference_steps is not None and not hasattr(__lowerCAmelCase , """set_timesteps""" ):
lowercase = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
lowercase = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
lowercase = dummy_past_residuals[: scheduler.config.solver_order]
lowercase = scheduler.timesteps[5]
lowercase = scheduler.timesteps[6]
lowercase = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ).prev_sample
lowercase = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def A__ ( self ):
"""simple docstring"""
lowercase = DEISMultistepScheduler(**self.get_scheduler_config() )
lowercase = self.full_loop(scheduler=__lowerCAmelCase )
lowercase = torch.mean(torch.abs(__lowerCAmelCase ) )
assert abs(result_mean.item() - 0.2_3_9_1_6 ) < 1E-3
lowercase = DPMSolverSinglestepScheduler.from_config(scheduler.config )
lowercase = DPMSolverMultistepScheduler.from_config(scheduler.config )
lowercase = UniPCMultistepScheduler.from_config(scheduler.config )
lowercase = DEISMultistepScheduler.from_config(scheduler.config )
lowercase = self.full_loop(scheduler=__lowerCAmelCase )
lowercase = torch.mean(torch.abs(__lowerCAmelCase ) )
assert abs(result_mean.item() - 0.2_3_9_1_6 ) < 1E-3
def A__ ( self ):
"""simple docstring"""
for timesteps in [25, 50, 100, 999, 1000]:
self.check_over_configs(num_train_timesteps=__lowerCAmelCase )
def A__ ( self ):
"""simple docstring"""
self.check_over_configs(thresholding=__lowerCAmelCase )
for order in [1, 2, 3]:
for solver_type in ["logrho"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=__lowerCAmelCase , prediction_type=__lowerCAmelCase , sample_max_value=__lowerCAmelCase , algorithm_type="""deis""" , solver_order=__lowerCAmelCase , solver_type=__lowerCAmelCase , )
def A__ ( self ):
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__lowerCAmelCase )
def A__ ( self ):
"""simple docstring"""
for algorithm_type in ["deis"]:
for solver_type in ["logrho"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=__lowerCAmelCase , solver_type=__lowerCAmelCase , prediction_type=__lowerCAmelCase , algorithm_type=__lowerCAmelCase , )
lowercase = self.full_loop(
solver_order=__lowerCAmelCase , solver_type=__lowerCAmelCase , prediction_type=__lowerCAmelCase , algorithm_type=__lowerCAmelCase , )
assert not torch.isnan(__lowerCAmelCase ).any(), "Samples have nan numbers"
def A__ ( self ):
"""simple docstring"""
self.check_over_configs(lower_order_final=__lowerCAmelCase )
self.check_over_configs(lower_order_final=__lowerCAmelCase )
def A__ ( self ):
"""simple docstring"""
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]:
self.check_over_forward(num_inference_steps=__lowerCAmelCase , time_step=0 )
def A__ ( self ):
"""simple docstring"""
lowercase = self.full_loop()
lowercase = torch.mean(torch.abs(__lowerCAmelCase ) )
assert abs(result_mean.item() - 0.2_3_9_1_6 ) < 1E-3
def A__ ( self ):
"""simple docstring"""
lowercase = self.full_loop(prediction_type="""v_prediction""" )
lowercase = torch.mean(torch.abs(__lowerCAmelCase ) )
assert abs(result_mean.item() - 0.0_9_1 ) < 1E-3
def A__ ( self ):
"""simple docstring"""
lowercase = self.scheduler_classes[0]
lowercase = self.get_scheduler_config(thresholding=__lowerCAmelCase , dynamic_thresholding_ratio=0 )
lowercase = scheduler_class(**__lowerCAmelCase )
lowercase = 10
lowercase = self.dummy_model()
lowercase = self.dummy_sample_deter.half()
scheduler.set_timesteps(__lowerCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
lowercase = model(__lowerCAmelCase , __lowerCAmelCase )
lowercase = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ).prev_sample
assert sample.dtype == torch.floataa
| 32 | """simple docstring"""
import argparse
import os
import torch
from transformers import FlavaImageCodebook, FlavaImageCodebookConfig
def UpperCAmelCase__ ( lowerCAmelCase__ :Any , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :Dict , lowerCAmelCase__ :Any ) -> Dict:
'''simple docstring'''
lowercase = s.rsplit(lowerCAmelCase__ , lowerCAmelCase__ )
return new.join(lowerCAmelCase__ )
def UpperCAmelCase__ ( lowerCAmelCase__ :str ) -> List[Any]:
'''simple docstring'''
return sum(param.float().sum() if """encoder.embeddings""" not in key else 0 for key, param in state_dict.items() )
def UpperCAmelCase__ ( lowerCAmelCase__ :Union[str, Any] ) -> List[str]:
'''simple docstring'''
lowercase = {}
lowercase = ["""group_1""", """group_2""", """group_3""", """group_4"""]
for key, value in state_dict.items():
for group_key in group_keys:
if group_key in key:
lowercase = key.replace(f'{group_key}.' , f'{group_key}.group.' )
if "res_path" in key:
lowercase = key.replace("""res_path.""" , """res_path.path.""" )
if key.endswith(""".w""" ):
lowercase = rreplace(lowerCAmelCase__ , """.w""" , """.weight""" , 1 )
if key.endswith(""".b""" ):
lowercase = rreplace(lowerCAmelCase__ , """.b""" , """.bias""" , 1 )
lowercase = value.float()
return upgrade
@torch.no_grad()
def UpperCAmelCase__ ( lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Union[str, Any]=None , lowerCAmelCase__ :Any=True ) -> Any:
'''simple docstring'''
from dall_e import Encoder
lowercase = Encoder()
if os.path.exists(lowerCAmelCase__ ):
lowercase = torch.load(lowerCAmelCase__ )
else:
lowercase = torch.hub.load_state_dict_from_url(lowerCAmelCase__ )
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
lowercase = ckpt.state_dict()
encoder.load_state_dict(lowerCAmelCase__ )
if config_path is not None:
lowercase = FlavaImageCodebookConfig.from_pretrained(lowerCAmelCase__ )
else:
lowercase = FlavaImageCodebookConfig()
lowercase = FlavaImageCodebook(lowerCAmelCase__ ).eval()
lowercase = encoder.state_dict()
lowercase = upgrade_state_dict(lowerCAmelCase__ )
hf_model.load_state_dict(lowerCAmelCase__ )
lowercase = hf_model.state_dict()
lowercase = count_parameters(lowerCAmelCase__ )
lowercase = count_parameters(lowerCAmelCase__ )
assert torch.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1e-3 )
if save_checkpoint:
hf_model.save_pretrained(lowerCAmelCase__ )
else:
return hf_state_dict
if __name__ == "__main__":
__lowerCAmelCase : Tuple =argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to flava checkpoint""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
__lowerCAmelCase : Any =parser.parse_args()
convert_dalle_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 32 | 1 |
"""simple docstring"""
import PIL.Image
import PIL.ImageOps
from packaging import version
from PIL import Image
if version.parse(version.parse(PIL.__version__).base_version) >= version.parse("9.1.0"):
__A = {
'''linear''': PIL.Image.Resampling.BILINEAR,
'''bilinear''': PIL.Image.Resampling.BILINEAR,
'''bicubic''': PIL.Image.Resampling.BICUBIC,
'''lanczos''': PIL.Image.Resampling.LANCZOS,
'''nearest''': PIL.Image.Resampling.NEAREST,
}
else:
__A = {
'''linear''': PIL.Image.LINEAR,
'''bilinear''': PIL.Image.BILINEAR,
'''bicubic''': PIL.Image.BICUBIC,
'''lanczos''': PIL.Image.LANCZOS,
'''nearest''': PIL.Image.NEAREST,
}
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> int:
lowercase__: str = (images / 2 + 0.5).clamp(0 , 1 )
lowercase__: Dict = images.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
lowercase__: Union[str, Any] = numpy_to_pil(__lowerCAmelCase )
return images
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> Tuple:
if images.ndim == 3:
lowercase__: Tuple = images[None, ...]
lowercase__: List[str] = (images * 2_5_5).round().astype('''uint8''' )
if images.shape[-1] == 1:
# special case for grayscale (single channel) images
lowercase__: Tuple = [Image.fromarray(image.squeeze() , mode='''L''' ) for image in images]
else:
lowercase__: List[str] = [Image.fromarray(__lowerCAmelCase ) for image in images]
return pil_images
| 177 |
import math
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from .attention_processor import Attention
from .embeddings import get_timestep_embedding
from .modeling_utils import ModelMixin
class __a ( A__ , A__ ):
@register_to_config
def __init__( self : str , SCREAMING_SNAKE_CASE : int = 1_28 , SCREAMING_SNAKE_CASE : int = 2_56 , SCREAMING_SNAKE_CASE : float = 2_0_0_0.0 , SCREAMING_SNAKE_CASE : int = 7_68 , SCREAMING_SNAKE_CASE : int = 12 , SCREAMING_SNAKE_CASE : int = 12 , SCREAMING_SNAKE_CASE : int = 64 , SCREAMING_SNAKE_CASE : int = 20_48 , SCREAMING_SNAKE_CASE : float = 0.1 , ):
'''simple docstring'''
super().__init__()
UpperCamelCase__ : Optional[Any] = nn.Sequential(
nn.Linear(SCREAMING_SNAKE_CASE , d_model * 4 , bias=SCREAMING_SNAKE_CASE ) , nn.SiLU() , nn.Linear(d_model * 4 , d_model * 4 , bias=SCREAMING_SNAKE_CASE ) , nn.SiLU() , )
UpperCamelCase__ : Optional[int] = nn.Embedding(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Optional[Any] = False
UpperCamelCase__ : Union[str, Any] = nn.Linear(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , bias=SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Optional[Any] = nn.Dropout(p=SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Any = nn.ModuleList()
for lyr_num in range(SCREAMING_SNAKE_CASE ):
# FiLM conditional T5 decoder
UpperCamelCase__ : Optional[int] = DecoderLayer(d_model=SCREAMING_SNAKE_CASE , d_kv=SCREAMING_SNAKE_CASE , num_heads=SCREAMING_SNAKE_CASE , d_ff=SCREAMING_SNAKE_CASE , dropout_rate=SCREAMING_SNAKE_CASE )
self.decoders.append(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Dict = TaLayerNorm(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Optional[Any] = nn.Dropout(p=SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Any = nn.Linear(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , bias=SCREAMING_SNAKE_CASE )
def __lowercase ( self : List[Any] , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : List[str] ):
'''simple docstring'''
UpperCamelCase__ : Optional[Any] = torch.mul(query_input.unsqueeze(-1 ) , key_input.unsqueeze(-2 ) )
return mask.unsqueeze(-3 )
def __lowercase ( self : List[Any] , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Optional[Any] ):
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ : Tuple = decoder_input_tokens.shape
assert decoder_noise_time.shape == (batch,)
# decoder_noise_time is in [0, 1), so rescale to expected timing range.
UpperCamelCase__ : List[str] = get_timestep_embedding(
decoder_noise_time * self.config.max_decoder_noise_time , embedding_dim=self.config.d_model , max_period=self.config.max_decoder_noise_time , ).to(dtype=self.dtype )
UpperCamelCase__ : Any = self.conditioning_emb(SCREAMING_SNAKE_CASE ).unsqueeze(1 )
assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4)
UpperCamelCase__ : Optional[int] = decoder_input_tokens.shape[1]
# If we want to use relative positions for audio context, we can just offset
# this sequence by the length of encodings_and_masks.
UpperCamelCase__ : Optional[int] = torch.broadcast_to(
torch.arange(SCREAMING_SNAKE_CASE , device=decoder_input_tokens.device ) , (batch, seq_length) , )
UpperCamelCase__ : Dict = self.position_encoding(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Dict = self.continuous_inputs_projection(SCREAMING_SNAKE_CASE )
inputs += position_encodings
UpperCamelCase__ : Optional[Any] = self.dropout(SCREAMING_SNAKE_CASE )
# decoder: No padding present.
UpperCamelCase__ : Dict = torch.ones(
decoder_input_tokens.shape[:2] , device=decoder_input_tokens.device , dtype=inputs.dtype )
# Translate encoding masks to encoder-decoder masks.
UpperCamelCase__ : Optional[int] = [(x, self.encoder_decoder_mask(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )) for x, y in encodings_and_masks]
# cross attend style: concat encodings
UpperCamelCase__ : int = torch.cat([x[0] for x in encodings_and_encdec_masks] , dim=1 )
UpperCamelCase__ : List[Any] = torch.cat([x[1] for x in encodings_and_encdec_masks] , dim=-1 )
for lyr in self.decoders:
UpperCamelCase__ : int = lyr(
SCREAMING_SNAKE_CASE , conditioning_emb=SCREAMING_SNAKE_CASE , encoder_hidden_states=SCREAMING_SNAKE_CASE , encoder_attention_mask=SCREAMING_SNAKE_CASE , )[0]
UpperCamelCase__ : Tuple = self.decoder_norm(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Any = self.post_dropout(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Tuple = self.spec_out(SCREAMING_SNAKE_CASE )
return spec_out
class __a ( nn.Module ):
def __init__( self : int , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : List[Any]=1e-6 ):
'''simple docstring'''
super().__init__()
UpperCamelCase__ : List[str] = nn.ModuleList()
# cond self attention: layer 0
self.layer.append(
TaLayerSelfAttentionCond(d_model=SCREAMING_SNAKE_CASE , d_kv=SCREAMING_SNAKE_CASE , num_heads=SCREAMING_SNAKE_CASE , dropout_rate=SCREAMING_SNAKE_CASE ) )
# cross attention: layer 1
self.layer.append(
TaLayerCrossAttention(
d_model=SCREAMING_SNAKE_CASE , d_kv=SCREAMING_SNAKE_CASE , num_heads=SCREAMING_SNAKE_CASE , dropout_rate=SCREAMING_SNAKE_CASE , layer_norm_epsilon=SCREAMING_SNAKE_CASE , ) )
# Film Cond MLP + dropout: last layer
self.layer.append(
TaLayerFFCond(d_model=SCREAMING_SNAKE_CASE , d_ff=SCREAMING_SNAKE_CASE , dropout_rate=SCREAMING_SNAKE_CASE , layer_norm_epsilon=SCREAMING_SNAKE_CASE ) )
def __lowercase ( self : int , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Any=None , SCREAMING_SNAKE_CASE : List[str]=None , SCREAMING_SNAKE_CASE : Union[str, Any]=None , SCREAMING_SNAKE_CASE : Any=None , SCREAMING_SNAKE_CASE : Optional[Any]=None , ):
'''simple docstring'''
UpperCamelCase__ : List[Any] = self.layer[0](
SCREAMING_SNAKE_CASE , conditioning_emb=SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , )
if encoder_hidden_states is not None:
UpperCamelCase__ : int = torch.where(encoder_attention_mask > 0 , 0 , -1e1_0 ).to(
encoder_hidden_states.dtype )
UpperCamelCase__ : Tuple = self.layer[1](
SCREAMING_SNAKE_CASE , key_value_states=SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , )
# Apply Film Conditional Feed Forward layer
UpperCamelCase__ : Any = self.layer[-1](SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return (hidden_states,)
class __a ( nn.Module ):
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : List[str] ):
'''simple docstring'''
super().__init__()
UpperCamelCase__ : Union[str, Any] = TaLayerNorm(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : List[Any] = TaFiLMLayer(in_features=d_model * 4 , out_features=SCREAMING_SNAKE_CASE )
UpperCamelCase__ : List[Any] = Attention(query_dim=SCREAMING_SNAKE_CASE , heads=SCREAMING_SNAKE_CASE , dim_head=SCREAMING_SNAKE_CASE , out_bias=SCREAMING_SNAKE_CASE , scale_qk=SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Any = nn.Dropout(SCREAMING_SNAKE_CASE )
def __lowercase ( self : List[str] , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : List[str]=None , SCREAMING_SNAKE_CASE : List[Any]=None , ):
'''simple docstring'''
UpperCamelCase__ : str = self.layer_norm(SCREAMING_SNAKE_CASE )
if conditioning_emb is not None:
UpperCamelCase__ : List[Any] = self.FiLMLayer(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Self-attention block
UpperCamelCase__ : Optional[Any] = self.attention(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Tuple = hidden_states + self.dropout(SCREAMING_SNAKE_CASE )
return hidden_states
class __a ( nn.Module ):
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : List[Any] ):
'''simple docstring'''
super().__init__()
UpperCamelCase__ : str = Attention(query_dim=SCREAMING_SNAKE_CASE , heads=SCREAMING_SNAKE_CASE , dim_head=SCREAMING_SNAKE_CASE , out_bias=SCREAMING_SNAKE_CASE , scale_qk=SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Union[str, Any] = TaLayerNorm(SCREAMING_SNAKE_CASE , eps=SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Optional[Any] = nn.Dropout(SCREAMING_SNAKE_CASE )
def __lowercase ( self : List[str] , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Optional[Any]=None , SCREAMING_SNAKE_CASE : Optional[int]=None , ):
'''simple docstring'''
UpperCamelCase__ : str = self.layer_norm(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : List[str] = self.attention(
SCREAMING_SNAKE_CASE , encoder_hidden_states=SCREAMING_SNAKE_CASE , attention_mask=attention_mask.squeeze(1 ) , )
UpperCamelCase__ : Optional[Any] = hidden_states + self.dropout(SCREAMING_SNAKE_CASE )
return layer_output
class __a ( nn.Module ):
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : List[str] ):
'''simple docstring'''
super().__init__()
UpperCamelCase__ : Any = TaDenseGatedActDense(d_model=SCREAMING_SNAKE_CASE , d_ff=SCREAMING_SNAKE_CASE , dropout_rate=SCREAMING_SNAKE_CASE )
UpperCamelCase__ : List[Any] = TaFiLMLayer(in_features=d_model * 4 , out_features=SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Dict = TaLayerNorm(SCREAMING_SNAKE_CASE , eps=SCREAMING_SNAKE_CASE )
UpperCamelCase__ : List[str] = nn.Dropout(SCREAMING_SNAKE_CASE )
def __lowercase ( self : Any , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Dict=None ):
'''simple docstring'''
UpperCamelCase__ : List[str] = self.layer_norm(SCREAMING_SNAKE_CASE )
if conditioning_emb is not None:
UpperCamelCase__ : Optional[int] = self.film(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Optional[int] = self.DenseReluDense(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : List[str] = hidden_states + self.dropout(SCREAMING_SNAKE_CASE )
return hidden_states
class __a ( nn.Module ):
def __init__( self : Any , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
super().__init__()
UpperCamelCase__ : Tuple = nn.Linear(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , bias=SCREAMING_SNAKE_CASE )
UpperCamelCase__ : List[Any] = nn.Linear(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , bias=SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Optional[int] = nn.Linear(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , bias=SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Dict = nn.Dropout(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Union[str, Any] = NewGELUActivation()
def __lowercase ( self : int , SCREAMING_SNAKE_CASE : Union[str, Any] ):
'''simple docstring'''
UpperCamelCase__ : Optional[int] = self.act(self.wi_a(SCREAMING_SNAKE_CASE ) )
UpperCamelCase__ : Dict = self.wi_a(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Optional[Any] = hidden_gelu * hidden_linear
UpperCamelCase__ : int = self.dropout(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Tuple = self.wo(SCREAMING_SNAKE_CASE )
return hidden_states
class __a ( nn.Module ):
def __init__( self : Any , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : str=1e-6 ):
'''simple docstring'''
super().__init__()
UpperCamelCase__ : List[str] = nn.Parameter(torch.ones(SCREAMING_SNAKE_CASE ) )
UpperCamelCase__ : Any = eps
def __lowercase ( self : Optional[int] , SCREAMING_SNAKE_CASE : Any ):
'''simple docstring'''
UpperCamelCase__ : int = hidden_states.to(torch.floataa ).pow(2 ).mean(-1 , keepdim=SCREAMING_SNAKE_CASE )
UpperCamelCase__ : str = hidden_states * torch.rsqrt(variance + self.variance_epsilon )
# convert into half-precision if necessary
if self.weight.dtype in [torch.floataa, torch.bfloataa]:
UpperCamelCase__ : Any = hidden_states.to(self.weight.dtype )
return self.weight * hidden_states
class __a ( nn.Module ):
def __lowercase ( self : int , SCREAMING_SNAKE_CASE : torch.Tensor ):
'''simple docstring'''
return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi ) * (input + 0.0_4_4_7_1_5 * torch.pow(SCREAMING_SNAKE_CASE , 3.0 )) ))
class __a ( nn.Module ):
def __init__( self : Tuple , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Union[str, Any] ):
'''simple docstring'''
super().__init__()
UpperCamelCase__ : int = nn.Linear(SCREAMING_SNAKE_CASE , out_features * 2 , bias=SCREAMING_SNAKE_CASE )
def __lowercase ( self : Optional[int] , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Tuple ):
'''simple docstring'''
UpperCamelCase__ : str = self.scale_bias(SCREAMING_SNAKE_CASE )
UpperCamelCase__ , UpperCamelCase__ : List[str] = torch.chunk(SCREAMING_SNAKE_CASE , 2 , -1 )
UpperCamelCase__ : Dict = x * (1 + scale) + shift
return x | 189 | 0 |
import unittest
from transformers import DebertaVaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaVaForMaskedLM,
DebertaVaForMultipleChoice,
DebertaVaForQuestionAnswering,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaModel,
)
from transformers.models.deberta_va.modeling_deberta_va import DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def __init__( self , _A , _A=13 , _A=7 , _A=True , _A=True , _A=True , _A=True , _A=99 , _A=32 , _A=5 , _A=4 , _A=37 , _A="gelu" , _A=0.1 , _A=0.1 , _A=512 , _A=16 , _A=2 , _A=0.02 , _A=False , _A=True , _A="None" , _A=3 , _A=4 , _A=None , ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = parent
SCREAMING_SNAKE_CASE_ = batch_size
SCREAMING_SNAKE_CASE_ = seq_length
SCREAMING_SNAKE_CASE_ = is_training
SCREAMING_SNAKE_CASE_ = use_input_mask
SCREAMING_SNAKE_CASE_ = use_token_type_ids
SCREAMING_SNAKE_CASE_ = use_labels
SCREAMING_SNAKE_CASE_ = vocab_size
SCREAMING_SNAKE_CASE_ = hidden_size
SCREAMING_SNAKE_CASE_ = num_hidden_layers
SCREAMING_SNAKE_CASE_ = num_attention_heads
SCREAMING_SNAKE_CASE_ = intermediate_size
SCREAMING_SNAKE_CASE_ = hidden_act
SCREAMING_SNAKE_CASE_ = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ = max_position_embeddings
SCREAMING_SNAKE_CASE_ = type_vocab_size
SCREAMING_SNAKE_CASE_ = type_sequence_label_size
SCREAMING_SNAKE_CASE_ = initializer_range
SCREAMING_SNAKE_CASE_ = num_labels
SCREAMING_SNAKE_CASE_ = num_choices
SCREAMING_SNAKE_CASE_ = relative_attention
SCREAMING_SNAKE_CASE_ = position_biased_input
SCREAMING_SNAKE_CASE_ = pos_att_type
SCREAMING_SNAKE_CASE_ = scope
def _UpperCamelCase ( self ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE_ = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
SCREAMING_SNAKE_CASE_ = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = None
if self.use_labels:
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE_ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _UpperCamelCase ( self ) -> List[Any]:
return DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def _UpperCamelCase ( self , _A ) -> str:
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def _UpperCamelCase ( self , _A , _A , _A , _A , _A , _A , _A ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = DebertaVaModel(config=_A )
model.to(_A )
model.eval()
SCREAMING_SNAKE_CASE_ = model(_A , attention_mask=_A , token_type_ids=_A )[0]
SCREAMING_SNAKE_CASE_ = model(_A , token_type_ids=_A )[0]
SCREAMING_SNAKE_CASE_ = model(_A )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def _UpperCamelCase ( self , _A , _A , _A , _A , _A , _A , _A ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = DebertaVaForMaskedLM(config=_A )
model.to(_A )
model.eval()
SCREAMING_SNAKE_CASE_ = model(_A , attention_mask=_A , token_type_ids=_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _UpperCamelCase ( self , _A , _A , _A , _A , _A , _A , _A ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = self.num_labels
SCREAMING_SNAKE_CASE_ = DebertaVaForSequenceClassification(_A )
model.to(_A )
model.eval()
SCREAMING_SNAKE_CASE_ = model(_A , attention_mask=_A , token_type_ids=_A , labels=_A )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(_A )
def _UpperCamelCase ( self , _A , _A , _A , _A , _A , _A , _A ) -> Dict:
SCREAMING_SNAKE_CASE_ = self.num_labels
SCREAMING_SNAKE_CASE_ = DebertaVaForTokenClassification(config=_A )
model.to(_A )
model.eval()
SCREAMING_SNAKE_CASE_ = model(_A , attention_mask=_A , token_type_ids=_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _UpperCamelCase ( self , _A , _A , _A , _A , _A , _A , _A ) -> Any:
SCREAMING_SNAKE_CASE_ = DebertaVaForQuestionAnswering(config=_A )
model.to(_A )
model.eval()
SCREAMING_SNAKE_CASE_ = model(
_A , attention_mask=_A , token_type_ids=_A , start_positions=_A , end_positions=_A , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _UpperCamelCase ( self , _A , _A , _A , _A , _A , _A , _A ) -> Tuple:
SCREAMING_SNAKE_CASE_ = DebertaVaForMultipleChoice(config=_A )
model.to(_A )
model.eval()
SCREAMING_SNAKE_CASE_ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE_ = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE_ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE_ = model(
_A , attention_mask=_A , token_type_ids=_A , labels=_A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _UpperCamelCase ( self ) -> Any:
SCREAMING_SNAKE_CASE_ = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) ,
) = config_and_inputs
SCREAMING_SNAKE_CASE_ = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ =(
(
DebertaVaModel,
DebertaVaForMaskedLM,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaForQuestionAnswering,
DebertaVaForMultipleChoice,
)
if is_torch_available()
else ()
)
UpperCAmelCase_ =(
{
"feature-extraction": DebertaVaModel,
"fill-mask": DebertaVaForMaskedLM,
"question-answering": DebertaVaForQuestionAnswering,
"text-classification": DebertaVaForSequenceClassification,
"token-classification": DebertaVaForTokenClassification,
"zero-shot": DebertaVaForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCAmelCase_ =True
UpperCAmelCase_ =False
UpperCAmelCase_ =False
UpperCAmelCase_ =False
UpperCAmelCase_ =False
def _UpperCamelCase ( self ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = DebertaVaModelTester(self )
SCREAMING_SNAKE_CASE_ = ConfigTester(self , config_class=_A , hidden_size=37 )
def _UpperCamelCase ( self ) -> str:
self.config_tester.run_common_tests()
def _UpperCamelCase ( self ) -> str:
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*_A )
def _UpperCamelCase ( self ) -> str:
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*_A )
def _UpperCamelCase ( self ) -> Any:
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*_A )
def _UpperCamelCase ( self ) -> str:
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*_A )
def _UpperCamelCase ( self ) -> Any:
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*_A )
def _UpperCamelCase ( self ) -> Any:
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_multiple_choice(*_A )
@slow
def _UpperCamelCase ( self ) -> int:
for model_name in DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE_ = DebertaVaModel.from_pretrained(_A )
self.assertIsNotNone(_A )
@require_torch
@require_sentencepiece
@require_tokenizers
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
@unittest.skip(reason='''Model not available yet''' )
def _UpperCamelCase ( self ) -> int:
pass
@slow
def _UpperCamelCase ( self ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = DebertaVaModel.from_pretrained('''microsoft/deberta-v2-xlarge''' )
SCREAMING_SNAKE_CASE_ = torch.tensor([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]] )
SCREAMING_SNAKE_CASE_ = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = model(_A , attention_mask=_A )[0]
# compare the actual values for a slice.
SCREAMING_SNAKE_CASE_ = torch.tensor(
[[[0.2356, 0.1948, 0.0369], [-0.1063, 0.3586, -0.5152], [-0.6399, -0.0259, -0.2525]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , _A , atol=1E-4 ) , F'''{output[:, 1:4, 1:4]}''' )
| 361 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
"microsoft/unispeech-sat-base-100h-libri-ft": (
"https://huggingface.co/microsoft/unispeech-sat-base-100h-libri-ft/resolve/main/config.json"
),
# See all UniSpeechSat models at https://huggingface.co/models?filter=unispeech_sat
}
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCAmelCase_ ="unispeech-sat"
def __init__( self , _A=32 , _A=768 , _A=12 , _A=12 , _A=3072 , _A="gelu" , _A=0.1 , _A=0.1 , _A=0.1 , _A=0.0 , _A=0.0 , _A=0.1 , _A=0.1 , _A=0.02 , _A=1E-5 , _A="group" , _A="gelu" , _A=(512, 512, 512, 512, 512, 512, 512) , _A=(5, 2, 2, 2, 2, 2, 2) , _A=(10, 3, 3, 3, 3, 2, 2) , _A=False , _A=128 , _A=16 , _A=False , _A=True , _A=0.05 , _A=10 , _A=2 , _A=0.0 , _A=10 , _A=0 , _A=320 , _A=2 , _A=0.1 , _A=100 , _A=256 , _A=256 , _A=0.1 , _A="mean" , _A=False , _A=False , _A=256 , _A=(512, 512, 512, 512, 1500) , _A=(5, 3, 3, 1, 1) , _A=(1, 2, 3, 1, 1) , _A=512 , _A=0 , _A=1 , _A=2 , _A=504 , **_A , ) -> Tuple:
super().__init__(**_A , pad_token_id=_A , bos_token_id=_A , eos_token_id=_A )
SCREAMING_SNAKE_CASE_ = hidden_size
SCREAMING_SNAKE_CASE_ = feat_extract_norm
SCREAMING_SNAKE_CASE_ = feat_extract_activation
SCREAMING_SNAKE_CASE_ = list(_A )
SCREAMING_SNAKE_CASE_ = list(_A )
SCREAMING_SNAKE_CASE_ = list(_A )
SCREAMING_SNAKE_CASE_ = conv_bias
SCREAMING_SNAKE_CASE_ = num_conv_pos_embeddings
SCREAMING_SNAKE_CASE_ = num_conv_pos_embedding_groups
SCREAMING_SNAKE_CASE_ = len(self.conv_dim )
SCREAMING_SNAKE_CASE_ = num_hidden_layers
SCREAMING_SNAKE_CASE_ = intermediate_size
SCREAMING_SNAKE_CASE_ = hidden_act
SCREAMING_SNAKE_CASE_ = num_attention_heads
SCREAMING_SNAKE_CASE_ = hidden_dropout
SCREAMING_SNAKE_CASE_ = attention_dropout
SCREAMING_SNAKE_CASE_ = activation_dropout
SCREAMING_SNAKE_CASE_ = feat_proj_dropout
SCREAMING_SNAKE_CASE_ = final_dropout
SCREAMING_SNAKE_CASE_ = layerdrop
SCREAMING_SNAKE_CASE_ = layer_norm_eps
SCREAMING_SNAKE_CASE_ = initializer_range
SCREAMING_SNAKE_CASE_ = vocab_size
SCREAMING_SNAKE_CASE_ = num_clusters
SCREAMING_SNAKE_CASE_ = do_stable_layer_norm
SCREAMING_SNAKE_CASE_ = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
F''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
F''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
SCREAMING_SNAKE_CASE_ = apply_spec_augment
SCREAMING_SNAKE_CASE_ = mask_time_prob
SCREAMING_SNAKE_CASE_ = mask_time_length
SCREAMING_SNAKE_CASE_ = mask_time_min_masks
SCREAMING_SNAKE_CASE_ = mask_feature_prob
SCREAMING_SNAKE_CASE_ = mask_feature_length
SCREAMING_SNAKE_CASE_ = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
SCREAMING_SNAKE_CASE_ = num_codevectors_per_group
SCREAMING_SNAKE_CASE_ = num_codevector_groups
SCREAMING_SNAKE_CASE_ = contrastive_logits_temperature
SCREAMING_SNAKE_CASE_ = feat_quantizer_dropout
SCREAMING_SNAKE_CASE_ = num_negatives
SCREAMING_SNAKE_CASE_ = codevector_dim
SCREAMING_SNAKE_CASE_ = proj_codevector_dim
SCREAMING_SNAKE_CASE_ = diversity_loss_weight
# ctc loss
SCREAMING_SNAKE_CASE_ = ctc_loss_reduction
SCREAMING_SNAKE_CASE_ = ctc_zero_infinity
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
SCREAMING_SNAKE_CASE_ = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
SCREAMING_SNAKE_CASE_ = list(_A )
SCREAMING_SNAKE_CASE_ = list(_A )
SCREAMING_SNAKE_CASE_ = list(_A )
SCREAMING_SNAKE_CASE_ = xvector_output_dim
@property
def _UpperCamelCase ( self ) -> str:
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 257 | 0 |
import numpy
# List of input, output pairs
_SCREAMING_SNAKE_CASE : Union[str, Any] = (
((5, 2, 3), 15),
((6, 5, 9), 25),
((11, 12, 13), 41),
((1, 1, 1), 8),
((11, 12, 13), 41),
)
_SCREAMING_SNAKE_CASE : List[Any] = (((5_15, 22, 13), 5_55), ((61, 35, 49), 1_50))
_SCREAMING_SNAKE_CASE : Tuple = [2, 4, 1, 5]
_SCREAMING_SNAKE_CASE : Optional[int] = len(train_data)
_SCREAMING_SNAKE_CASE : Optional[Any] = 0.009
def UpperCAmelCase__ (UpperCamelCase_ ,UpperCamelCase_="train" ):
"""simple docstring"""
return calculate_hypothesis_value(lowercase__ ,lowercase__ ) - output(
lowercase__ ,lowercase__ )
def UpperCAmelCase__ (UpperCamelCase_ ):
"""simple docstring"""
snake_case = 0
for i in range(len(lowercase__ ) - 1 ):
hyp_val += data_input_tuple[i] * parameter_vector[i + 1]
hyp_val += parameter_vector[0]
return hyp_val
def UpperCAmelCase__ (UpperCamelCase_ ,UpperCamelCase_ ):
"""simple docstring"""
if data_set == "train":
return train_data[example_no][1]
elif data_set == "test":
return test_data[example_no][1]
return None
def UpperCAmelCase__ (UpperCamelCase_ ,UpperCamelCase_ ):
"""simple docstring"""
if data_set == "train":
return _hypothesis_value(train_data[example_no][0] )
elif data_set == "test":
return _hypothesis_value(test_data[example_no][0] )
return None
def UpperCAmelCase__ (UpperCamelCase_ ,UpperCamelCase_=m ):
"""simple docstring"""
snake_case = 0
for i in range(lowercase__ ):
if index == -1:
summation_value += _error(lowercase__ )
else:
summation_value += _error(lowercase__ ) * train_data[i][0][index]
return summation_value
def UpperCAmelCase__ (UpperCamelCase_ ):
"""simple docstring"""
snake_case = summation_of_cost_derivative(lowercase__ ,lowercase__ ) / m
return cost_derivative_value
def UpperCAmelCase__ ():
"""simple docstring"""
global parameter_vector
# Tune these values to set a tolerance value for predicted output
snake_case = 0.00_0002
snake_case = 0
snake_case = 0
while True:
j += 1
snake_case = [0, 0, 0, 0]
for i in range(0 ,len(lowercase__ ) ):
snake_case = get_cost_derivative(i - 1 )
snake_case = (
parameter_vector[i] - LEARNING_RATE * cost_derivative
)
if numpy.allclose(
lowercase__ ,lowercase__ ,atol=lowercase__ ,rtol=lowercase__ ,):
break
snake_case = temp_parameter_vector
print(('''Number of iterations:''', j) )
def UpperCAmelCase__ ():
"""simple docstring"""
for i in range(len(lowercase__ ) ):
print(('''Actual output value:''', output(lowercase__ ,'''test''' )) )
print(('''Hypothesis output:''', calculate_hypothesis_value(lowercase__ ,'''test''' )) )
if __name__ == "__main__":
run_gradient_descent()
print("\nTesting gradient descent for a linear hypothesis function.\n")
test_gradient_descent()
| 127 |
"""simple docstring"""
import contextlib
import csv
import json
import os
import sqlitea
import tarfile
import textwrap
import zipfile
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
import datasets
import datasets.config
@pytest.fixture(scope="""session""" )
def _snake_case ( ) -> List[str]:
'''simple docstring'''
lowerCAmelCase_ :Union[str, Any] = 1_0
lowerCAmelCase_ :Optional[int] = datasets.Features(
{
"""tokens""": datasets.Sequence(datasets.Value("""string""" ) ),
"""labels""": datasets.Sequence(datasets.ClassLabel(names=["""negative""", """positive"""] ) ),
"""answers""": datasets.Sequence(
{
"""text""": datasets.Value("""string""" ),
"""answer_start""": datasets.Value("""int32""" ),
} ),
"""id""": datasets.Value("""int64""" ),
} )
lowerCAmelCase_ :int = datasets.Dataset.from_dict(
{
"""tokens""": [["""foo"""] * 5] * n,
"""labels""": [[1] * 5] * n,
"""answers""": [{"""answer_start""": [9_7], """text""": ["""1976"""]}] * 1_0,
"""id""": list(range(lowercase__ ) ),
} , features=lowercase__ , )
return dataset
@pytest.fixture(scope="""session""" )
def _snake_case ( lowercase__ : Tuple , lowercase__ : int ) -> List[str]:
'''simple docstring'''
lowerCAmelCase_ :List[str] = str(tmp_path_factory.mktemp("""data""" ) / """file.arrow""" )
dataset.map(cache_file_name=lowercase__ )
return filename
# FILE_CONTENT + files
__UpperCAmelCase = '\\n Text data.\n Second line of data.'
@pytest.fixture(scope="""session""" )
def _snake_case ( lowercase__ : str ) -> str:
'''simple docstring'''
lowerCAmelCase_ :Union[str, Any] = tmp_path_factory.mktemp("""data""" ) / """file.txt"""
lowerCAmelCase_ :List[Any] = FILE_CONTENT
with open(lowercase__ , """w""" ) as f:
f.write(lowercase__ )
return filename
@pytest.fixture(scope="""session""" )
def _snake_case ( lowercase__ : List[Any] ) -> Tuple:
'''simple docstring'''
import bza
lowerCAmelCase_ :Optional[int] = tmp_path_factory.mktemp("""data""" ) / """file.txt.bz2"""
lowerCAmelCase_ :Tuple = bytes(lowercase__ , """utf-8""" )
with bza.open(lowercase__ , """wb""" ) as f:
f.write(lowercase__ )
return path
@pytest.fixture(scope="""session""" )
def _snake_case ( lowercase__ : Optional[Any] ) -> Dict:
'''simple docstring'''
import gzip
lowerCAmelCase_ :int = str(tmp_path_factory.mktemp("""data""" ) / """file.txt.gz""" )
lowerCAmelCase_ :Tuple = bytes(lowercase__ , """utf-8""" )
with gzip.open(lowercase__ , """wb""" ) as f:
f.write(lowercase__ )
return path
@pytest.fixture(scope="""session""" )
def _snake_case ( lowercase__ : Dict ) -> Optional[int]:
'''simple docstring'''
if datasets.config.LZ4_AVAILABLE:
import lza.frame
lowerCAmelCase_ :List[Any] = tmp_path_factory.mktemp("""data""" ) / """file.txt.lz4"""
lowerCAmelCase_ :int = bytes(lowercase__ , """utf-8""" )
with lza.frame.open(lowercase__ , """wb""" ) as f:
f.write(lowercase__ )
return path
@pytest.fixture(scope="""session""" )
def _snake_case ( lowercase__ : Dict , lowercase__ : Optional[int] ) -> Any:
'''simple docstring'''
if datasets.config.PY7ZR_AVAILABLE:
import pyazr
lowerCAmelCase_ :Dict = tmp_path_factory.mktemp("""data""" ) / """file.txt.7z"""
with pyazr.SevenZipFile(lowercase__ , """w""" ) as archive:
archive.write(lowercase__ , arcname=os.path.basename(lowercase__ ) )
return path
@pytest.fixture(scope="""session""" )
def _snake_case ( lowercase__ : Optional[Any] , lowercase__ : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
import tarfile
lowerCAmelCase_ :Any = tmp_path_factory.mktemp("""data""" ) / """file.txt.tar"""
with tarfile.TarFile(lowercase__ , """w""" ) as f:
f.add(lowercase__ , arcname=os.path.basename(lowercase__ ) )
return path
@pytest.fixture(scope="""session""" )
def _snake_case ( lowercase__ : Tuple ) -> str:
'''simple docstring'''
import lzma
lowerCAmelCase_ :Optional[Any] = tmp_path_factory.mktemp("""data""" ) / """file.txt.xz"""
lowerCAmelCase_ :Optional[Any] = bytes(lowercase__ , """utf-8""" )
with lzma.open(lowercase__ , """wb""" ) as f:
f.write(lowercase__ )
return path
@pytest.fixture(scope="""session""" )
def _snake_case ( lowercase__ : Union[str, Any] , lowercase__ : List[Any] ) -> Any:
'''simple docstring'''
import zipfile
lowerCAmelCase_ :Dict = tmp_path_factory.mktemp("""data""" ) / """file.txt.zip"""
with zipfile.ZipFile(lowercase__ , """w""" ) as f:
f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) )
return path
@pytest.fixture(scope="""session""" )
def _snake_case ( lowercase__ : int ) -> Tuple:
'''simple docstring'''
if datasets.config.ZSTANDARD_AVAILABLE:
import zstandard as zstd
lowerCAmelCase_ :Union[str, Any] = tmp_path_factory.mktemp("""data""" ) / """file.txt.zst"""
lowerCAmelCase_ :Any = bytes(lowercase__ , """utf-8""" )
with zstd.open(lowercase__ , """wb""" ) as f:
f.write(lowercase__ )
return path
@pytest.fixture(scope="""session""" )
def _snake_case ( lowercase__ : List[str] ) -> str:
'''simple docstring'''
lowerCAmelCase_ :str = tmp_path_factory.mktemp("""data""" ) / """file.xml"""
lowerCAmelCase_ :Any = textwrap.dedent(
"""\
<?xml version=\"1.0\" encoding=\"UTF-8\" ?>
<tmx version=\"1.4\">
<header segtype=\"sentence\" srclang=\"ca\" />
<body>
<tu>
<tuv xml:lang=\"ca\"><seg>Contingut 1</seg></tuv>
<tuv xml:lang=\"en\"><seg>Content 1</seg></tuv>
</tu>
<tu>
<tuv xml:lang=\"ca\"><seg>Contingut 2</seg></tuv>
<tuv xml:lang=\"en\"><seg>Content 2</seg></tuv>
</tu>
<tu>
<tuv xml:lang=\"ca\"><seg>Contingut 3</seg></tuv>
<tuv xml:lang=\"en\"><seg>Content 3</seg></tuv>
</tu>
<tu>
<tuv xml:lang=\"ca\"><seg>Contingut 4</seg></tuv>
<tuv xml:lang=\"en\"><seg>Content 4</seg></tuv>
</tu>
<tu>
<tuv xml:lang=\"ca\"><seg>Contingut 5</seg></tuv>
<tuv xml:lang=\"en\"><seg>Content 5</seg></tuv>
</tu>
</body>
</tmx>""" )
with open(lowercase__ , """w""" ) as f:
f.write(lowercase__ )
return filename
__UpperCAmelCase = [
{'col_1': '0', 'col_2': 0, 'col_3': 0.0},
{'col_1': '1', 'col_2': 1, 'col_3': 1.0},
{'col_1': '2', 'col_2': 2, 'col_3': 2.0},
{'col_1': '3', 'col_2': 3, 'col_3': 3.0},
]
__UpperCAmelCase = [
{'col_1': '4', 'col_2': 4, 'col_3': 4.0},
{'col_1': '5', 'col_2': 5, 'col_3': 5.0},
]
__UpperCAmelCase = {
'col_1': ['0', '1', '2', '3'],
'col_2': [0, 1, 2, 3],
'col_3': [0.0, 1.0, 2.0, 3.0],
}
__UpperCAmelCase = [
{'col_3': 0.0, 'col_1': '0', 'col_2': 0},
{'col_3': 1.0, 'col_1': '1', 'col_2': 1},
]
__UpperCAmelCase = [
{'col_1': 's0', 'col_2': 0, 'col_3': 0.0},
{'col_1': 's1', 'col_2': 1, 'col_3': 1.0},
{'col_1': 's2', 'col_2': 2, 'col_3': 2.0},
{'col_1': 's3', 'col_2': 3, 'col_3': 3.0},
]
@pytest.fixture(scope="""session""" )
def _snake_case ( ) -> Union[str, Any]:
'''simple docstring'''
return DATA_DICT_OF_LISTS
@pytest.fixture(scope="""session""" )
def _snake_case ( lowercase__ : int ) -> Any:
'''simple docstring'''
lowerCAmelCase_ :Tuple = datasets.Dataset.from_dict(lowercase__ )
lowerCAmelCase_ :List[Any] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.arrow""" )
dataset.map(cache_file_name=lowercase__ )
return path
@pytest.fixture(scope="""session""" )
def _snake_case ( lowercase__ : int ) -> str:
'''simple docstring'''
lowerCAmelCase_ :List[Any] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.sqlite""" )
with contextlib.closing(sqlitea.connect(lowercase__ ) ) as con:
lowerCAmelCase_ :Union[str, Any] = con.cursor()
cur.execute("""CREATE TABLE dataset(col_1 text, col_2 int, col_3 real)""" )
for item in DATA:
cur.execute("""INSERT INTO dataset(col_1, col_2, col_3) VALUES (?, ?, ?)""" , tuple(item.values() ) )
con.commit()
return path
@pytest.fixture(scope="""session""" )
def _snake_case ( lowercase__ : Tuple ) -> int:
'''simple docstring'''
lowerCAmelCase_ :List[str] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.csv""" )
with open(lowercase__ , """w""" , newline="""""" ) as f:
lowerCAmelCase_ :Optional[int] = csv.DictWriter(lowercase__ , fieldnames=["""col_1""", """col_2""", """col_3"""] )
writer.writeheader()
for item in DATA:
writer.writerow(lowercase__ )
return path
@pytest.fixture(scope="""session""" )
def _snake_case ( lowercase__ : Dict ) -> Any:
'''simple docstring'''
lowerCAmelCase_ :str = str(tmp_path_factory.mktemp("""data""" ) / """dataset2.csv""" )
with open(lowercase__ , """w""" , newline="""""" ) as f:
lowerCAmelCase_ :Dict = csv.DictWriter(lowercase__ , fieldnames=["""col_1""", """col_2""", """col_3"""] )
writer.writeheader()
for item in DATA:
writer.writerow(lowercase__ )
return path
@pytest.fixture(scope="""session""" )
def _snake_case ( lowercase__ : str , lowercase__ : Dict ) -> Union[str, Any]:
'''simple docstring'''
import bza
lowerCAmelCase_ :int = tmp_path_factory.mktemp("""data""" ) / """dataset.csv.bz2"""
with open(lowercase__ , """rb""" ) as f:
lowerCAmelCase_ :Union[str, Any] = f.read()
# data = bytes(FILE_CONTENT, "utf-8")
with bza.open(lowercase__ , """wb""" ) as f:
f.write(lowercase__ )
return path
@pytest.fixture(scope="""session""" )
def _snake_case ( lowercase__ : str , lowercase__ : Optional[Any] , lowercase__ : Any ) -> List[str]:
'''simple docstring'''
lowerCAmelCase_ :str = tmp_path_factory.mktemp("""data""" ) / """dataset.csv.zip"""
with zipfile.ZipFile(lowercase__ , """w""" ) as f:
f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) )
f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) )
return path
@pytest.fixture(scope="""session""" )
def _snake_case ( lowercase__ : List[str] , lowercase__ : List[str] , lowercase__ : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase_ :str = tmp_path_factory.mktemp("""data""" ) / """dataset.csv.zip"""
with zipfile.ZipFile(lowercase__ , """w""" ) as f:
f.write(lowercase__ , arcname=os.path.basename(csv_path.replace(""".csv""" , """.CSV""" ) ) )
f.write(lowercase__ , arcname=os.path.basename(csva_path.replace(""".csv""" , """.CSV""" ) ) )
return path
@pytest.fixture(scope="""session""" )
def _snake_case ( lowercase__ : Optional[int] , lowercase__ : Tuple , lowercase__ : str ) -> Any:
'''simple docstring'''
lowerCAmelCase_ :int = tmp_path_factory.mktemp("""data""" ) / """dataset_with_dir.csv.zip"""
with zipfile.ZipFile(lowercase__ , """w""" ) as f:
f.write(lowercase__ , arcname=os.path.join("""main_dir""" , os.path.basename(lowercase__ ) ) )
f.write(lowercase__ , arcname=os.path.join("""main_dir""" , os.path.basename(lowercase__ ) ) )
return path
@pytest.fixture(scope="""session""" )
def _snake_case ( lowercase__ : Dict ) -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase_ :Optional[int] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.parquet""" )
lowerCAmelCase_ :Optional[Any] = pa.schema(
{
"""col_1""": pa.string(),
"""col_2""": pa.intaa(),
"""col_3""": pa.floataa(),
} )
with open(lowercase__ , """wb""" ) as f:
lowerCAmelCase_ :Optional[int] = pq.ParquetWriter(lowercase__ , schema=lowercase__ )
lowerCAmelCase_ :List[str] = pa.Table.from_pydict({k: [DATA[i][k] for i in range(len(lowercase__ ) )] for k in DATA[0]} , schema=lowercase__ )
writer.write_table(lowercase__ )
writer.close()
return path
@pytest.fixture(scope="""session""" )
def _snake_case ( lowercase__ : Tuple ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase_ :Dict = str(tmp_path_factory.mktemp("""data""" ) / """dataset.json""" )
lowerCAmelCase_ :Union[str, Any] = {"""data""": DATA}
with open(lowercase__ , """w""" ) as f:
json.dump(lowercase__ , lowercase__ )
return path
@pytest.fixture(scope="""session""" )
def _snake_case ( lowercase__ : str ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase_ :List[str] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.json""" )
lowerCAmelCase_ :Optional[Any] = {"""data""": DATA_DICT_OF_LISTS}
with open(lowercase__ , """w""" ) as f:
json.dump(lowercase__ , lowercase__ )
return path
@pytest.fixture(scope="""session""" )
def _snake_case ( lowercase__ : Tuple ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase_ :Optional[int] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.jsonl""" )
with open(lowercase__ , """w""" ) as f:
for item in DATA:
f.write(json.dumps(lowercase__ ) + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def _snake_case ( lowercase__ : Any ) -> List[str]:
'''simple docstring'''
lowerCAmelCase_ :List[Any] = str(tmp_path_factory.mktemp("""data""" ) / """dataset2.jsonl""" )
with open(lowercase__ , """w""" ) as f:
for item in DATA:
f.write(json.dumps(lowercase__ ) + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def _snake_case ( lowercase__ : Optional[int] ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase_ :str = str(tmp_path_factory.mktemp("""data""" ) / """dataset_312.jsonl""" )
with open(lowercase__ , """w""" ) as f:
for item in DATA_312:
f.write(json.dumps(lowercase__ ) + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def _snake_case ( lowercase__ : Any ) -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase_ :Tuple = str(tmp_path_factory.mktemp("""data""" ) / """dataset-str.jsonl""" )
with open(lowercase__ , """w""" ) as f:
for item in DATA_STR:
f.write(json.dumps(lowercase__ ) + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def _snake_case ( lowercase__ : int , lowercase__ : Dict ) -> Optional[int]:
'''simple docstring'''
import gzip
lowerCAmelCase_ :Optional[Any] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.txt.gz""" )
with open(lowercase__ , """rb""" ) as orig_file:
with gzip.open(lowercase__ , """wb""" ) as zipped_file:
zipped_file.writelines(lowercase__ )
return path
@pytest.fixture(scope="""session""" )
def _snake_case ( lowercase__ : List[str] , lowercase__ : List[Any] ) -> Any:
'''simple docstring'''
import gzip
lowerCAmelCase_ :Optional[Any] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.jsonl.gz""" )
with open(lowercase__ , """rb""" ) as orig_file:
with gzip.open(lowercase__ , """wb""" ) as zipped_file:
zipped_file.writelines(lowercase__ )
return path
@pytest.fixture(scope="""session""" )
def _snake_case ( lowercase__ : List[str] , lowercase__ : Optional[int] , lowercase__ : List[Any] ) -> Dict:
'''simple docstring'''
lowerCAmelCase_ :Optional[int] = tmp_path_factory.mktemp("""data""" ) / """dataset.jsonl.zip"""
with zipfile.ZipFile(lowercase__ , """w""" ) as f:
f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) )
f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) )
return path
@pytest.fixture(scope="""session""" )
def _snake_case ( lowercase__ : Any , lowercase__ : str , lowercase__ : Optional[Any] , lowercase__ : Union[str, Any] ) -> Tuple:
'''simple docstring'''
lowerCAmelCase_ :Optional[int] = tmp_path_factory.mktemp("""data""" ) / """dataset_nested.jsonl.zip"""
with zipfile.ZipFile(lowercase__ , """w""" ) as f:
f.write(lowercase__ , arcname=os.path.join("""nested""" , os.path.basename(lowercase__ ) ) )
return path
@pytest.fixture(scope="""session""" )
def _snake_case ( lowercase__ : Any , lowercase__ : List[Any] , lowercase__ : List[str] ) -> int:
'''simple docstring'''
lowerCAmelCase_ :str = tmp_path_factory.mktemp("""data""" ) / """dataset_with_dir.jsonl.zip"""
with zipfile.ZipFile(lowercase__ , """w""" ) as f:
f.write(lowercase__ , arcname=os.path.join("""main_dir""" , os.path.basename(lowercase__ ) ) )
f.write(lowercase__ , arcname=os.path.join("""main_dir""" , os.path.basename(lowercase__ ) ) )
return path
@pytest.fixture(scope="""session""" )
def _snake_case ( lowercase__ : Any , lowercase__ : str , lowercase__ : List[str] ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase_ :Any = tmp_path_factory.mktemp("""data""" ) / """dataset.jsonl.tar"""
with tarfile.TarFile(lowercase__ , """w""" ) as f:
f.add(lowercase__ , arcname=os.path.basename(lowercase__ ) )
f.add(lowercase__ , arcname=os.path.basename(lowercase__ ) )
return path
@pytest.fixture(scope="""session""" )
def _snake_case ( lowercase__ : Dict , lowercase__ : str , lowercase__ : List[str] , lowercase__ : int ) -> Dict:
'''simple docstring'''
lowerCAmelCase_ :int = tmp_path_factory.mktemp("""data""" ) / """dataset_nested.jsonl.tar"""
with tarfile.TarFile(lowercase__ , """w""" ) as f:
f.add(lowercase__ , arcname=os.path.join("""nested""" , os.path.basename(lowercase__ ) ) )
return path
@pytest.fixture(scope="""session""" )
def _snake_case ( lowercase__ : List[str] ) -> Tuple:
'''simple docstring'''
lowerCAmelCase_ :str = ["""0""", """1""", """2""", """3"""]
lowerCAmelCase_ :List[Any] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.txt""" )
with open(lowercase__ , """w""" ) as f:
for item in data:
f.write(item + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def _snake_case ( lowercase__ : List[str] ) -> Dict:
'''simple docstring'''
lowerCAmelCase_ :int = ["""0""", """1""", """2""", """3"""]
lowerCAmelCase_ :List[str] = str(tmp_path_factory.mktemp("""data""" ) / """dataset2.txt""" )
with open(lowercase__ , """w""" ) as f:
for item in data:
f.write(item + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def _snake_case ( lowercase__ : List[Any] ) -> List[str]:
'''simple docstring'''
lowerCAmelCase_ :Dict = ["""0""", """1""", """2""", """3"""]
lowerCAmelCase_ :Tuple = tmp_path_factory.mktemp("""data""" ) / """dataset.abc"""
with open(lowercase__ , """w""" ) as f:
for item in data:
f.write(item + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def _snake_case ( lowercase__ : List[str] , lowercase__ : str , lowercase__ : int ) -> str:
'''simple docstring'''
lowerCAmelCase_ :Any = tmp_path_factory.mktemp("""data""" ) / """dataset.text.zip"""
with zipfile.ZipFile(lowercase__ , """w""" ) as f:
f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) )
f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) )
return path
@pytest.fixture(scope="""session""" )
def _snake_case ( lowercase__ : Tuple , lowercase__ : Tuple , lowercase__ : List[str] ) -> List[str]:
'''simple docstring'''
lowerCAmelCase_ :str = tmp_path_factory.mktemp("""data""" ) / """dataset_with_dir.text.zip"""
with zipfile.ZipFile(lowercase__ , """w""" ) as f:
f.write(lowercase__ , arcname=os.path.join("""main_dir""" , os.path.basename(lowercase__ ) ) )
f.write(lowercase__ , arcname=os.path.join("""main_dir""" , os.path.basename(lowercase__ ) ) )
return path
@pytest.fixture(scope="""session""" )
def _snake_case ( lowercase__ : Optional[int] , lowercase__ : Any , lowercase__ : Tuple ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase_ :Tuple = tmp_path_factory.mktemp("""data""" ) / """dataset.ext.zip"""
with zipfile.ZipFile(lowercase__ , """w""" ) as f:
f.write(lowercase__ , arcname=os.path.basename("""unsupported.ext""" ) )
f.write(lowercase__ , arcname=os.path.basename("""unsupported_2.ext""" ) )
return path
@pytest.fixture(scope="""session""" )
def _snake_case ( lowercase__ : Tuple ) -> Dict:
'''simple docstring'''
lowerCAmelCase_ :Optional[Any] = """\n""".join(["""First""", """Second\u2029with Unicode new line""", """Third"""] )
lowerCAmelCase_ :str = str(tmp_path_factory.mktemp("""data""" ) / """dataset_with_unicode_new_lines.txt""" )
with open(lowercase__ , """w""" , encoding="""utf-8""" ) as f:
f.write(lowercase__ )
return path
@pytest.fixture(scope="""session""" )
def _snake_case ( ) -> int:
'''simple docstring'''
return os.path.join("""tests""" , """features""" , """data""" , """test_image_rgb.jpg""" )
@pytest.fixture(scope="""session""" )
def _snake_case ( ) -> Tuple:
'''simple docstring'''
return os.path.join("""tests""" , """features""" , """data""" , """test_audio_44100.wav""" )
@pytest.fixture(scope="""session""" )
def _snake_case ( lowercase__ : Any , lowercase__ : Tuple ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase_ :Tuple = tmp_path_factory.mktemp("""data""" ) / """dataset.img.zip"""
with zipfile.ZipFile(lowercase__ , """w""" ) as f:
f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) )
f.write(lowercase__ , arcname=os.path.basename(lowercase__ ).replace(""".jpg""" , """2.jpg""" ) )
return path
@pytest.fixture(scope="""session""" )
def _snake_case ( lowercase__ : Tuple ) -> Dict:
'''simple docstring'''
lowerCAmelCase_ :int = tmp_path_factory.mktemp("""data_dir""" )
(data_dir / "subdir").mkdir()
with open(data_dir / """subdir""" / """train.txt""" , """w""" ) as f:
f.write("""foo\n""" * 1_0 )
with open(data_dir / """subdir""" / """test.txt""" , """w""" ) as f:
f.write("""bar\n""" * 1_0 )
# hidden file
with open(data_dir / """subdir""" / """.test.txt""" , """w""" ) as f:
f.write("""bar\n""" * 1_0 )
# hidden directory
(data_dir / ".subdir").mkdir()
with open(data_dir / """.subdir""" / """train.txt""" , """w""" ) as f:
f.write("""foo\n""" * 1_0 )
with open(data_dir / """.subdir""" / """test.txt""" , """w""" ) as f:
f.write("""bar\n""" * 1_0 )
return data_dir
| 84 | 0 |
def _A ( SCREAMING_SNAKE_CASE : float ):
"""simple docstring"""
if edge <= 0 or not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
raise ValueError("Length must be a positive." )
return 3 * ((25 + 10 * (5 ** (1 / 2))) ** (1 / 2)) * (edge**2)
def _A ( SCREAMING_SNAKE_CASE : float ):
"""simple docstring"""
if edge <= 0 or not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
raise ValueError("Length must be a positive." )
return ((15 + (7 * (5 ** (1 / 2)))) / 4) * (edge**3)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 148 |
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
UpperCAmelCase : Tuple = logging.get_logger(__name__)
UpperCAmelCase : List[Any] = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
UpperCAmelCase : Dict = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class __lowerCAmelCase :
_lowercase : str = field(
default=UpperCamelCase__ , metadata={"""help""": """Model type selected in the list: """ + """, """.join(UpperCamelCase__)})
_lowercase : str = field(
default=UpperCamelCase__ , metadata={"""help""": """The input data dir. Should contain the .json files for the SQuAD task."""})
_lowercase : int = field(
default=128 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
_lowercase : int = field(
default=128 , metadata={"""help""": """When splitting up a long document into chunks, how much stride to take between chunks."""} , )
_lowercase : int = field(
default=64 , metadata={
"""help""": (
"""The maximum number of tokens for the question. Questions longer than this will """
"""be truncated to this length."""
)
} , )
_lowercase : int = field(
default=30 , metadata={
"""help""": (
"""The maximum length of an answer that can be generated. This is needed because the start """
"""and end predictions are not conditioned on one another."""
)
} , )
_lowercase : bool = field(
default=UpperCamelCase__ , metadata={"""help""": """Overwrite the cached training and evaluation sets"""})
_lowercase : bool = field(
default=UpperCamelCase__ , metadata={"""help""": """If true, the SQuAD examples contain some that do not have an answer."""})
_lowercase : float = field(
default=0.0 , metadata={"""help""": """If null_score - best_non_null is greater than the threshold predict null."""})
_lowercase : int = field(
default=20 , metadata={"""help""": """If null_score - best_non_null is greater than the threshold predict null."""})
_lowercase : int = field(
default=0 , metadata={
"""help""": (
"""language id of input for language-specific xlm models (see"""
""" tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)"""
)
} , )
_lowercase : int = field(default=1 , metadata={"""help""": """multiple threads for converting example to features"""})
class __lowerCAmelCase ( UpperCamelCase__):
_lowercase : List[Any] = """train"""
_lowercase : Any = """dev"""
class __lowerCAmelCase ( UpperCamelCase__):
_lowercase : SquadDataTrainingArguments
_lowercase : List[SquadFeatures]
_lowercase : Split
_lowercase : bool
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = Split.train , lowerCAmelCase__ = False , lowerCAmelCase__ = None , lowerCAmelCase__ = "pt" , ) -> str:
'''simple docstring'''
a__ : List[Any] =args
a__ : int =is_language_sensitive
a__ : List[str] =SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
try:
a__ : str =Split[mode]
except KeyError:
raise KeyError("mode is not a valid split name" )
a__ : Any =mode
# Load data features from cache or dataset file
a__ : str ="v2" if args.version_2_with_negative else "v1"
a__ : str =os.path.join(
cache_dir if cache_dir is not None else args.data_dir , F'''cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}''' , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
a__ : Dict =cached_features_file + ".lock"
with FileLock(lowerCAmelCase__ ):
if os.path.exists(lowerCAmelCase__ ) and not args.overwrite_cache:
a__ : Any =time.time()
a__ : List[Any] =torch.load(lowerCAmelCase__ )
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
a__ : str =self.old_features["features"]
a__ : str =self.old_features.get("dataset" , lowerCAmelCase__ )
a__ : Optional[int] =self.old_features.get("examples" , lowerCAmelCase__ )
logger.info(
F'''Loading features from cached file {cached_features_file} [took %.3f s]''' , time.time() - start )
if self.dataset is None or self.examples is None:
logger.warning(
F'''Deleting cached file {cached_features_file} will allow dataset and examples to be cached in'''
" future run" )
else:
if mode == Split.dev:
a__ : Dict =self.processor.get_dev_examples(args.data_dir )
else:
a__ : str =self.processor.get_train_examples(args.data_dir )
a__ , a__ : Union[str, Any] =squad_convert_examples_to_features(
examples=self.examples , tokenizer=lowerCAmelCase__ , max_seq_length=args.max_seq_length , doc_stride=args.doc_stride , max_query_length=args.max_query_length , is_training=mode == Split.train , threads=args.threads , return_dataset=lowerCAmelCase__ , )
a__ : Any =time.time()
torch.save(
{"features": self.features, "dataset": self.dataset, "examples": self.examples} , lowerCAmelCase__ , )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
F'''Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]''' )
def __len__( self ) -> Tuple:
'''simple docstring'''
return len(self.features )
def __getitem__( self , lowerCAmelCase__ ) -> Dict[str, torch.Tensor]:
'''simple docstring'''
a__ : str =self.features[i]
a__ : Optional[Any] =torch.tensor(feature.input_ids , dtype=torch.long )
a__ : List[Any] =torch.tensor(feature.attention_mask , dtype=torch.long )
a__ : int =torch.tensor(feature.token_type_ids , dtype=torch.long )
a__ : List[str] =torch.tensor(feature.cls_index , dtype=torch.long )
a__ : int =torch.tensor(feature.p_mask , dtype=torch.float )
a__ : Tuple =torch.tensor(feature.is_impossible , dtype=torch.float )
a__ : Tuple ={
"input_ids": input_ids,
"attention_mask": attention_mask,
"token_type_ids": token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({"cls_index": cls_index, "p_mask": p_mask} )
if self.args.version_2_with_negative:
inputs.update({"is_impossible": is_impossible} )
if self.is_language_sensitive:
inputs.update({"langs": (torch.ones(input_ids.shape , dtype=torch.intaa ) * self.args.lang_id)} )
if self.mode == Split.train:
a__ : int =torch.tensor(feature.start_position , dtype=torch.long )
a__ : Any =torch.tensor(feature.end_position , dtype=torch.long )
inputs.update({"start_positions": start_positions, "end_positions": end_positions} )
return inputs
| 148 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__lowercase = {
'''configuration_data2vec_audio''': ['''DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Data2VecAudioConfig'''],
'''configuration_data2vec_text''': [
'''DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''Data2VecTextConfig''',
'''Data2VecTextOnnxConfig''',
],
'''configuration_data2vec_vision''': [
'''DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''Data2VecVisionConfig''',
'''Data2VecVisionOnnxConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
'''DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Data2VecAudioForAudioFrameClassification''',
'''Data2VecAudioForCTC''',
'''Data2VecAudioForSequenceClassification''',
'''Data2VecAudioForXVector''',
'''Data2VecAudioModel''',
'''Data2VecAudioPreTrainedModel''',
]
__lowercase = [
'''DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Data2VecTextForCausalLM''',
'''Data2VecTextForMaskedLM''',
'''Data2VecTextForMultipleChoice''',
'''Data2VecTextForQuestionAnswering''',
'''Data2VecTextForSequenceClassification''',
'''Data2VecTextForTokenClassification''',
'''Data2VecTextModel''',
'''Data2VecTextPreTrainedModel''',
]
__lowercase = [
'''DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Data2VecVisionForImageClassification''',
'''Data2VecVisionForMaskedImageModeling''',
'''Data2VecVisionForSemanticSegmentation''',
'''Data2VecVisionModel''',
'''Data2VecVisionPreTrainedModel''',
]
if is_tf_available():
__lowercase = [
'''TFData2VecVisionForImageClassification''',
'''TFData2VecVisionForSemanticSegmentation''',
'''TFData2VecVisionModel''',
'''TFData2VecVisionPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_dataavec_audio import DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecAudioConfig
from .configuration_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecTextConfig,
DataaVecTextOnnxConfig,
)
from .configuration_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecVisionConfig,
DataaVecVisionOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dataavec_audio import (
DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecAudioForAudioFrameClassification,
DataaVecAudioForCTC,
DataaVecAudioForSequenceClassification,
DataaVecAudioForXVector,
DataaVecAudioModel,
DataaVecAudioPreTrainedModel,
)
from .modeling_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecTextForCausalLM,
DataaVecTextForMaskedLM,
DataaVecTextForMultipleChoice,
DataaVecTextForQuestionAnswering,
DataaVecTextForSequenceClassification,
DataaVecTextForTokenClassification,
DataaVecTextModel,
DataaVecTextPreTrainedModel,
)
from .modeling_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecVisionForImageClassification,
DataaVecVisionForMaskedImageModeling,
DataaVecVisionForSemanticSegmentation,
DataaVecVisionModel,
DataaVecVisionPreTrainedModel,
)
if is_tf_available():
from .modeling_tf_dataavec_vision import (
TFDataaVecVisionForImageClassification,
TFDataaVecVisionForSemanticSegmentation,
TFDataaVecVisionModel,
TFDataaVecVisionPreTrainedModel,
)
else:
import sys
__lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 43 |
'''simple docstring'''
from math import isqrt, loga
def snake_case_ ( __SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
lowercase_ : Any = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowercase_ : Optional[Any] = False
return [i for i in range(2 , __SCREAMING_SNAKE_CASE ) if is_prime[i]]
def snake_case_ ( __SCREAMING_SNAKE_CASE : int = 800800 , __SCREAMING_SNAKE_CASE : int = 800800 ):
"""simple docstring"""
lowercase_ : Union[str, Any] = degree * loga(__SCREAMING_SNAKE_CASE )
lowercase_ : Any = int(__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[Any] = calculate_prime_numbers(__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[Any] = 0
lowercase_ : List[Any] = 0
lowercase_ : Union[str, Any] = len(__SCREAMING_SNAKE_CASE ) - 1
while left < right:
while (
prime_numbers[right] * loga(prime_numbers[left] )
+ prime_numbers[left] * loga(prime_numbers[right] )
> upper_bound
):
right -= 1
hybrid_integers_count += right - left
left += 1
return hybrid_integers_count
if __name__ == "__main__":
print(f"""{solution() = }""")
| 93 | 0 |
"""simple docstring"""
import json
import os
import unittest
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class snake_case ( UpperCAmelCase , unittest.TestCase ):
__magic_name__ = BioGptTokenizer
__magic_name__ = False
def lowerCamelCase__ ( self : str ):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
a : Dict = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'w</w>',
'r</w>',
't</w>',
'lo',
'low',
'er</w>',
'low</w>',
'lowest</w>',
'newer</w>',
'wider</w>',
'<unk>',
]
a : int = dict(zip(A , range(len(A ) ) ) )
a : int = ['l o 123', 'lo w 1456', 'e r</w> 1789', '']
a : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
a : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' ) as fp:
fp.write(json.dumps(A ) )
with open(self.merges_file , 'w' ) as fp:
fp.write('\n'.join(A ) )
def lowerCamelCase__ ( self : int , A : Optional[Any] ):
'''simple docstring'''
a : Optional[int] = 'lower newer'
a : List[Any] = 'lower newer'
return input_text, output_text
def lowerCamelCase__ ( self : Optional[Any] ):
'''simple docstring'''
a : List[Any] = BioGptTokenizer(self.vocab_file , self.merges_file )
a : int = 'lower'
a : Union[str, Any] = ['low', 'er</w>']
a : List[str] = tokenizer.tokenize(A )
self.assertListEqual(A , A )
a : List[str] = tokens + ['<unk>']
a : Dict = [1_4, 1_5, 2_0]
self.assertListEqual(tokenizer.convert_tokens_to_ids(A ) , A )
@slow
def lowerCamelCase__ ( self : List[Any] ):
'''simple docstring'''
a : Tuple = BioGptTokenizer.from_pretrained('microsoft/biogpt' )
a : Any = tokenizer.encode('sequence builders' , add_special_tokens=A )
a : List[str] = tokenizer.encode('multi-sequence build' , add_special_tokens=A )
a : List[str] = tokenizer.build_inputs_with_special_tokens(A )
a : str = tokenizer.build_inputs_with_special_tokens(A , A )
self.assertTrue(encoded_sentence == [2] + text )
self.assertTrue(encoded_pair == [2] + text + [2] + text_a )
| 360 |
"""simple docstring"""
from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels
from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor
from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels
| 186 | 0 |
from __future__ import annotations
from random import random
from typing import Generic, TypeVar
lowercase : Any = TypeVar("""KT""")
lowercase : Any = TypeVar("""VT""")
class A__ ( Generic[KT, VT] ):
"""simple docstring"""
def __init__( self , lowercase = "root" , lowercase = None) -> Dict:
'''simple docstring'''
a__ : Dict = key
a__ : Dict = value
a__ : list[Node[KT, VT]] = []
def __repr__( self) -> str:
'''simple docstring'''
return F'Node({self.key}: {self.value})'
@property
def __lowercase ( self) -> int:
'''simple docstring'''
return len(self.forward)
class A__ ( Generic[KT, VT] ):
"""simple docstring"""
def __init__( self , lowercase = 0.5 , lowercase = 16) -> str:
'''simple docstring'''
a__ : Node[KT, VT] = Node[KT, VT]()
a__ : Tuple = 0
a__ : Any = p
a__ : Optional[int] = max_level
def __str__( self) -> str:
'''simple docstring'''
a__ : List[Any] = list(self)
if len(lowercase) == 0:
return F'SkipList(level={self.level})'
a__ : Any = max((len(str(lowercase)) for item in items) , default=4)
a__ : Union[str, Any] = max(lowercase , 4) + 4
a__ : Any = self.head
a__ : List[Any] = []
a__ : str = node.forward.copy()
lines.append(F'[{node.key}]'.ljust(lowercase , '-') + '* ' * len(lowercase))
lines.append(' ' * label_size + '| ' * len(lowercase))
while len(node.forward) != 0:
a__ : List[str] = node.forward[0]
lines.append(
F'[{node.key}]'.ljust(lowercase , '-')
+ ' '.join(str(n.key) if n.key == node.key else '|' for n in forwards))
lines.append(' ' * label_size + '| ' * len(lowercase))
a__ : List[Any] = node.forward
lines.append('None'.ljust(lowercase) + '* ' * len(lowercase))
return F'SkipList(level={self.level})\n' + "\n".join(lowercase)
def __iter__( self) -> Optional[int]:
'''simple docstring'''
a__ : Dict = self.head
while len(node.forward) != 0:
yield node.forward[0].key
a__ : Tuple = node.forward[0]
def __lowercase ( self) -> int:
'''simple docstring'''
a__ : int = 1
while random() < self.p and level < self.max_level:
level += 1
return level
def __lowercase ( self , lowercase) -> tuple[Node[KT, VT] | None, list[Node[KT, VT]]]:
'''simple docstring'''
a__ : Union[str, Any] = []
a__ : Tuple = self.head
for i in reversed(range(self.level)):
# i < node.level - When node level is lesser than `i` decrement `i`.
# node.forward[i].key < key - Jumping to node with key value higher
# or equal to searched key would result
# in skipping searched key.
while i < node.level and node.forward[i].key < key:
a__ : Tuple = node.forward[i]
# Each leftmost node (relative to searched node) will potentially have to
# be updated.
update_vector.append(lowercase)
update_vector.reverse() # Note that we were inserting values in reverse order.
# len(node.forward) != 0 - If current node doesn't contain any further
# references then searched key is not present.
# node.forward[0].key == key - Next node key should be equal to search key
# if key is present.
if len(node.forward) != 0 and node.forward[0].key == key:
return node.forward[0], update_vector
else:
return None, update_vector
def __lowercase ( self , lowercase) -> Optional[Any]:
'''simple docstring'''
a__ , a__ : Dict = self._locate_node(lowercase)
if node is not None:
for i, update_node in enumerate(lowercase):
# Remove or replace all references to removed node.
if update_node.level > i and update_node.forward[i].key == key:
if node.level > i:
a__ : int = node.forward[i]
else:
a__ : Tuple = update_node.forward[:i]
def __lowercase ( self , lowercase , lowercase) -> List[str]:
'''simple docstring'''
a__ , a__ : List[str] = self._locate_node(lowercase)
if node is not None:
a__ : Optional[int] = value
else:
a__ : Optional[Any] = self.random_level()
if level > self.level:
# After level increase we have to add additional nodes to head.
for _ in range(self.level - 1 , lowercase):
update_vector.append(self.head)
a__ : str = level
a__ : Union[str, Any] = Node(lowercase , lowercase)
for i, update_node in enumerate(update_vector[:level]):
# Change references to pass through new node.
if update_node.level > i:
new_node.forward.append(update_node.forward[i])
if update_node.level < i + 1:
update_node.forward.append(lowercase)
else:
a__ : Any = new_node
def __lowercase ( self , lowercase) -> VT | None:
'''simple docstring'''
a__ , a__ : Optional[Any] = self._locate_node(lowercase)
if node is not None:
return node.value
return None
def A_ ( ) -> Optional[Any]:
a__ : int = SkipList()
skip_list.insert('Key1' , 3 )
skip_list.insert('Key2' , 12 )
skip_list.insert('Key3' , 41 )
skip_list.insert('Key4' , -19 )
a__ : Any = skip_list.head
a__ : List[Any] = {}
while node.level != 0:
a__ : Dict = node.forward[0]
a__ : Optional[Any] = node.value
assert len(A__ ) == 4
assert all_values["Key1"] == 3
assert all_values["Key2"] == 12
assert all_values["Key3"] == 41
assert all_values["Key4"] == -19
def A_ ( ) -> Any:
a__ : List[str] = SkipList()
skip_list.insert('Key1' , 10 )
skip_list.insert('Key1' , 12 )
skip_list.insert('Key5' , 7 )
skip_list.insert('Key7' , 10 )
skip_list.insert('Key10' , 5 )
skip_list.insert('Key7' , 7 )
skip_list.insert('Key5' , 5 )
skip_list.insert('Key10' , 10 )
a__ : Dict = skip_list.head
a__ : str = {}
while node.level != 0:
a__ : Dict = node.forward[0]
a__ : Tuple = node.value
if len(A__ ) != 4:
print()
assert len(A__ ) == 4
assert all_values["Key1"] == 12
assert all_values["Key7"] == 7
assert all_values["Key5"] == 5
assert all_values["Key10"] == 10
def A_ ( ) -> Tuple:
a__ : List[str] = SkipList()
assert skip_list.find('Some key' ) is None
def A_ ( ) -> Optional[int]:
a__ : List[Any] = SkipList()
skip_list.insert('Key2' , 20 )
assert skip_list.find('Key2' ) == 20
skip_list.insert('Some Key' , 10 )
skip_list.insert('Key2' , 8 )
skip_list.insert('V' , 13 )
assert skip_list.find('Y' ) is None
assert skip_list.find('Key2' ) == 8
assert skip_list.find('Some Key' ) == 10
assert skip_list.find('V' ) == 13
def A_ ( ) -> Dict:
a__ : Any = SkipList()
skip_list.delete('Some key' )
assert len(skip_list.head.forward ) == 0
def A_ ( ) -> List[Any]:
a__ : List[Any] = SkipList()
skip_list.insert('Key1' , 12 )
skip_list.insert('V' , 13 )
skip_list.insert('X' , 14 )
skip_list.insert('Key2' , 15 )
skip_list.delete('V' )
skip_list.delete('Key2' )
assert skip_list.find('V' ) is None
assert skip_list.find('Key2' ) is None
def A_ ( ) -> str:
a__ : Any = SkipList()
skip_list.insert('Key1' , 12 )
skip_list.insert('V' , 13 )
skip_list.insert('X' , 14 )
skip_list.insert('Key2' , 15 )
skip_list.delete('V' )
assert skip_list.find('V' ) is None
assert skip_list.find('X' ) == 14
assert skip_list.find('Key1' ) == 12
assert skip_list.find('Key2' ) == 15
skip_list.delete('X' )
assert skip_list.find('V' ) is None
assert skip_list.find('X' ) is None
assert skip_list.find('Key1' ) == 12
assert skip_list.find('Key2' ) == 15
skip_list.delete('Key1' )
assert skip_list.find('V' ) is None
assert skip_list.find('X' ) is None
assert skip_list.find('Key1' ) is None
assert skip_list.find('Key2' ) == 15
skip_list.delete('Key2' )
assert skip_list.find('V' ) is None
assert skip_list.find('X' ) is None
assert skip_list.find('Key1' ) is None
assert skip_list.find('Key2' ) is None
def A_ ( ) -> Tuple:
a__ : str = SkipList()
skip_list.insert('Key1' , 12 )
skip_list.insert('V' , 13 )
skip_list.insert('X' , 142 )
skip_list.insert('Key2' , 15 )
skip_list.delete('X' )
def traverse_keys(A__ ):
yield node.key
for forward_node in node.forward:
yield from traverse_keys(A__ )
assert len(set(traverse_keys(skip_list.head ) ) ) == 4
def A_ ( ) -> Optional[Any]:
def is_sorted(A__ ):
return all(next_item >= item for item, next_item in zip(A__ , lst[1:] ) )
a__ : List[Any] = SkipList()
for i in range(10 ):
skip_list.insert(A__ , A__ )
assert is_sorted(list(A__ ) )
skip_list.delete(5 )
skip_list.delete(8 )
skip_list.delete(2 )
assert is_sorted(list(A__ ) )
skip_list.insert(-12 , -12 )
skip_list.insert(77 , 77 )
assert is_sorted(list(A__ ) )
def A_ ( ) -> Optional[Any]:
for _ in range(100 ):
# Repeat test 100 times due to the probabilistic nature of skip list
# random values == random bugs
test_insert()
test_insert_overrides_existing_value()
test_searching_empty_list_returns_none()
test_search()
test_deleting_item_from_empty_list_do_nothing()
test_deleted_items_are_not_founded_by_find_method()
test_delete_removes_only_given_key()
test_delete_doesnt_leave_dead_nodes()
test_iter_always_yields_sorted_values()
def A_ ( ) -> Optional[Any]:
a__ : str = SkipList()
skip_list.insert(2 , '2' )
skip_list.insert(4 , '4' )
skip_list.insert(6 , '4' )
skip_list.insert(4 , '5' )
skip_list.insert(8 , '4' )
skip_list.insert(9 , '4' )
skip_list.delete(4 )
print(A__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 99 |
from math import loga
def A_ ( A__ ) -> int:
if a < 0:
raise ValueError('Input value must be a positive integer' )
elif isinstance(A__ , A__ ):
raise TypeError('Input value must be a \'int\' type' )
return 0 if (a == 0) else int(loga(a & -a ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 99 | 1 |
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import EsmForMaskedLM, EsmForSequenceClassification, EsmForTokenClassification, EsmModel
from transformers.models.esm.modeling_esm import (
ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
EsmEmbeddings,
create_position_ids_from_input_ids,
)
class lowercase__ :
def __init__( self : Optional[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : List[str]=13 , UpperCamelCase__ : List[Any]=7 , UpperCamelCase__ : List[Any]=False , UpperCamelCase__ : int=True , UpperCamelCase__ : List[Any]=False , UpperCamelCase__ : List[Any]=True , UpperCamelCase__ : Optional[int]=33 , UpperCamelCase__ : Optional[int]=32 , UpperCamelCase__ : int=5 , UpperCamelCase__ : Optional[int]=4 , UpperCamelCase__ : Dict=37 , UpperCamelCase__ : Union[str, Any]="gelu" , UpperCamelCase__ : str=0.1 , UpperCamelCase__ : int=0.1 , UpperCamelCase__ : List[Any]=512 , UpperCamelCase__ : Tuple=16 , UpperCamelCase__ : List[str]=2 , UpperCamelCase__ : Union[str, Any]=0.02 , UpperCamelCase__ : Tuple=3 , UpperCamelCase__ : List[str]=4 , UpperCamelCase__ : Optional[int]=None , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = parent
SCREAMING_SNAKE_CASE : str = batch_size
SCREAMING_SNAKE_CASE : Dict = seq_length
SCREAMING_SNAKE_CASE : List[str] = is_training
SCREAMING_SNAKE_CASE : Union[str, Any] = use_input_mask
SCREAMING_SNAKE_CASE : List[str] = use_token_type_ids
SCREAMING_SNAKE_CASE : Optional[Any] = use_labels
SCREAMING_SNAKE_CASE : List[str] = vocab_size
SCREAMING_SNAKE_CASE : Any = hidden_size
SCREAMING_SNAKE_CASE : int = num_hidden_layers
SCREAMING_SNAKE_CASE : Union[str, Any] = num_attention_heads
SCREAMING_SNAKE_CASE : Any = intermediate_size
SCREAMING_SNAKE_CASE : Optional[Any] = hidden_act
SCREAMING_SNAKE_CASE : Any = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Tuple = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Tuple = max_position_embeddings
SCREAMING_SNAKE_CASE : int = type_vocab_size
SCREAMING_SNAKE_CASE : Tuple = type_sequence_label_size
SCREAMING_SNAKE_CASE : Tuple = initializer_range
SCREAMING_SNAKE_CASE : Any = num_labels
SCREAMING_SNAKE_CASE : List[str] = num_choices
SCREAMING_SNAKE_CASE : List[str] = scope
def __A ( self : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE : Optional[Any] = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE : str = None
SCREAMING_SNAKE_CASE : Optional[Any] = None
SCREAMING_SNAKE_CASE : List[str] = None
if self.use_labels:
SCREAMING_SNAKE_CASE : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE : Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE : str = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE : str = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def __A ( self : str ):
'''simple docstring'''
return EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def __A ( self : Optional[Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Any , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = EsmModel(config=__snake_case )
model.to(__snake_case )
model.eval()
SCREAMING_SNAKE_CASE : Dict = model(__snake_case , attention_mask=__snake_case )
SCREAMING_SNAKE_CASE : List[str] = model(__snake_case )
SCREAMING_SNAKE_CASE : Union[str, Any] = model(__snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __A ( self : List[Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : Dict , UpperCamelCase__ : Dict , UpperCamelCase__ : Dict , UpperCamelCase__ : List[Any] , UpperCamelCase__ : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = EsmForMaskedLM(config=__snake_case )
model.to(__snake_case )
model.eval()
SCREAMING_SNAKE_CASE : str = model(__snake_case , attention_mask=__snake_case , labels=__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __A ( self : int , UpperCamelCase__ : str , UpperCamelCase__ : Dict , UpperCamelCase__ : str , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Any , UpperCamelCase__ : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.num_labels
SCREAMING_SNAKE_CASE : Optional[Any] = EsmForTokenClassification(config=__snake_case )
model.to(__snake_case )
model.eval()
SCREAMING_SNAKE_CASE : Tuple = model(__snake_case , attention_mask=__snake_case , labels=__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __A ( self : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) ,
) : Any = config_and_inputs
SCREAMING_SNAKE_CASE : Optional[int] = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class lowercase__ ( A__ , A__ , unittest.TestCase):
UpperCamelCase_ = False
UpperCamelCase_ = (
(
EsmForMaskedLM,
EsmModel,
EsmForSequenceClassification,
EsmForTokenClassification,
)
if is_torch_available()
else ()
)
UpperCamelCase_ = ()
UpperCamelCase_ = (
{
"""feature-extraction""": EsmModel,
"""fill-mask""": EsmForMaskedLM,
"""text-classification""": EsmForSequenceClassification,
"""token-classification""": EsmForTokenClassification,
"""zero-shot""": EsmForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCamelCase_ = True
def __A ( self : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = EsmModelTester(self )
SCREAMING_SNAKE_CASE : Any = ConfigTester(self , config_class=__snake_case , hidden_size=37 )
def __A ( self : str ):
'''simple docstring'''
self.config_tester.run_common_tests()
def __A ( self : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__snake_case )
def __A ( self : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
SCREAMING_SNAKE_CASE : Optional[Any] = type
self.model_tester.create_and_check_model(*__snake_case )
def __A ( self : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__snake_case )
def __A ( self : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__snake_case )
@slow
def __A ( self : int ):
'''simple docstring'''
for model_name in ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE : Union[str, Any] = EsmModel.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
def __A ( self : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs()[0]
SCREAMING_SNAKE_CASE : str = EsmEmbeddings(config=__snake_case )
SCREAMING_SNAKE_CASE : Tuple = torch.as_tensor([[12, 31, 13, model.padding_idx]] )
SCREAMING_SNAKE_CASE : List[str] = torch.as_tensor(
[
[
0 + model.padding_idx + 1,
1 + model.padding_idx + 1,
2 + model.padding_idx + 1,
model.padding_idx,
]
] )
SCREAMING_SNAKE_CASE : Union[str, Any] = create_position_ids_from_input_ids(__snake_case , model.padding_idx )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(__snake_case , __snake_case ) ) )
def __A ( self : Tuple ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs()[0]
SCREAMING_SNAKE_CASE : Union[str, Any] = EsmEmbeddings(config=__snake_case )
SCREAMING_SNAKE_CASE : Optional[Any] = torch.empty(2 , 4 , 30 )
SCREAMING_SNAKE_CASE : str = [
0 + embeddings.padding_idx + 1,
1 + embeddings.padding_idx + 1,
2 + embeddings.padding_idx + 1,
3 + embeddings.padding_idx + 1,
]
SCREAMING_SNAKE_CASE : Optional[int] = torch.as_tensor([expected_single_positions, expected_single_positions] )
SCREAMING_SNAKE_CASE : int = embeddings.create_position_ids_from_inputs_embeds(__snake_case )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(__snake_case , __snake_case ) ) )
@unittest.skip('''Esm does not support embedding resizing''' )
def __A ( self : Dict ):
'''simple docstring'''
pass
@unittest.skip('''Esm does not support embedding resizing''' )
def __A ( self : int ):
'''simple docstring'''
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def __A ( self : Optional[int] ):
'''simple docstring'''
pass
@require_torch
class lowercase__ ( A__):
@slow
def __A ( self : Optional[Any] ):
'''simple docstring'''
with torch.no_grad():
SCREAMING_SNAKE_CASE : str = EsmForMaskedLM.from_pretrained('''facebook/esm2_t6_8M_UR50D''' )
model.eval()
SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor([[0, 1, 2, 3, 4, 5]] )
SCREAMING_SNAKE_CASE : Any = model(__snake_case )[0]
SCREAMING_SNAKE_CASE : Optional[Any] = 33
SCREAMING_SNAKE_CASE : List[Any] = torch.Size((1, 6, vocab_size) )
self.assertEqual(output.shape , __snake_case )
SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor(
[[[8.9215, -10.5898, -6.4671], [-6.3967, -13.9114, -1.1212], [-7.7812, -13.9516, -3.7406]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __snake_case , atol=1E-4 ) )
@slow
def __A ( self : str ):
'''simple docstring'''
with torch.no_grad():
SCREAMING_SNAKE_CASE : Optional[Any] = EsmModel.from_pretrained('''facebook/esm2_t6_8M_UR50D''' )
model.eval()
SCREAMING_SNAKE_CASE : int = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
SCREAMING_SNAKE_CASE : Union[str, Any] = model(__snake_case )[0]
# compare the actual values for a slice.
SCREAMING_SNAKE_CASE : List[str] = torch.tensor(
[[[0.1444, 0.5413, 0.3248], [0.3034, 0.0053, 0.3108], [0.3228, -0.2499, 0.3415]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __snake_case , atol=1E-4 ) )
| 353 | import numpy as np
from sklearn.datasets import fetch_california_housing
from sklearn.metrics import mean_absolute_error, mean_squared_error
from sklearn.model_selection import train_test_split
from xgboost import XGBRegressor
def A ( _lowercase ):
return (data["data"], data["target"])
def A ( _lowercase , _lowercase , _lowercase ):
SCREAMING_SNAKE_CASE : Any = XGBRegressor(verbosity=0 , random_state=42 )
xgb.fit(_lowercase , _lowercase )
# Predict target for test data
SCREAMING_SNAKE_CASE : Optional[int] = xgb.predict(_lowercase )
SCREAMING_SNAKE_CASE : Dict = predictions.reshape(len(_lowercase ) , 1 )
return predictions
def A ( ):
SCREAMING_SNAKE_CASE : str = fetch_california_housing()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str = data_handling(_lowercase )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = train_test_split(
_lowercase , _lowercase , test_size=0.25 , random_state=1 )
SCREAMING_SNAKE_CASE : Any = xgboost(_lowercase , _lowercase , _lowercase )
# Error printing
print(f"""Mean Absolute Error : {mean_absolute_error(_lowercase , _lowercase )}""" )
print(f"""Mean Square Error : {mean_squared_error(_lowercase , _lowercase )}""" )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 258 | 0 |
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer
@dataclass
class __lowerCAmelCase ( snake_case__ ):
_a = 42
class __lowerCAmelCase ( snake_case__ , snake_case__ ):
@register_to_config
def __init__( self , lowerCAmelCase = 3 , lowerCAmelCase = 3 , lowerCAmelCase = ("DownEncoderBlock2D",) , lowerCAmelCase = ("UpDecoderBlock2D",) , lowerCAmelCase = (64,) , lowerCAmelCase = 1 , lowerCAmelCase = "silu" , lowerCAmelCase = 3 , lowerCAmelCase = 32 , lowerCAmelCase = 256 , lowerCAmelCase = 32 , lowerCAmelCase = None , lowerCAmelCase = 0.18215 , lowerCAmelCase = "group" , ) -> int:
'''simple docstring'''
super().__init__()
# pass init params to Encoder
_lowercase =Encoder(
in_channels=_A , out_channels=_A , down_block_types=_A , block_out_channels=_A , layers_per_block=_A , act_fn=_A , norm_num_groups=_A , double_z=_A , )
_lowercase =vq_embed_dim if vq_embed_dim is not None else latent_channels
_lowercase =nn.Convad(_A , _A , 1 )
_lowercase =VectorQuantizer(_A , _A , beta=0.25 , remap=_A , sane_index_shape=_A )
_lowercase =nn.Convad(_A , _A , 1 )
# pass init params to Decoder
_lowercase =Decoder(
in_channels=_A , out_channels=_A , up_block_types=_A , block_out_channels=_A , layers_per_block=_A , act_fn=_A , norm_num_groups=_A , norm_type=_A , )
@apply_forward_hook
def A__ ( self , lowerCAmelCase , lowerCAmelCase = True ) -> Any:
'''simple docstring'''
_lowercase =self.encoder(_A )
_lowercase =self.quant_conv(_A )
if not return_dict:
return (h,)
return VQEncoderOutput(latents=_A )
@apply_forward_hook
def A__ ( self , lowerCAmelCase , lowerCAmelCase = False , lowerCAmelCase = True ) -> Union[str, Any]:
'''simple docstring'''
if not force_not_quantize:
_lowercase =self.quantize(_A )
else:
_lowercase =h
_lowercase =self.post_quant_conv(_A )
_lowercase =self.decoder(_A , quant if self.config.norm_type == 'spatial' else None )
if not return_dict:
return (dec,)
return DecoderOutput(sample=_A )
def A__ ( self , lowerCAmelCase , lowerCAmelCase = True ) -> List[str]:
'''simple docstring'''
_lowercase =sample
_lowercase =self.encode(_A ).latents
_lowercase =self.decode(_A ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=_A )
| 205 |
def _SCREAMING_SNAKE_CASE ( a , a = 0 ) -> list:
__A : int = length or len(a )
__A : str = False
for i in range(length - 1 ):
if list_data[i] > list_data[i + 1]:
__A , __A : Optional[int] = list_data[i + 1], list_data[i]
__A : Union[str, Any] = True
return list_data if not swapped else bubble_sort(a , length - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 280 | 0 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple
import torch
from torch import nn
from transformers import RobertaPreTrainedModel, XLMRobertaConfig, XLMRobertaModel
from transformers.utils import ModelOutput
@dataclass
class _snake_case ( a__ ):
lowerCAmelCase :Optional[torch.FloatTensor] = None
lowerCAmelCase :torch.FloatTensor = None
lowerCAmelCase :Optional[Tuple[torch.FloatTensor]] = None
lowerCAmelCase :Optional[Tuple[torch.FloatTensor]] = None
class _snake_case ( a__ ):
def __init__( self , _lowerCamelCase=1 , _lowerCamelCase=0 , _lowerCamelCase=2 , _lowerCamelCase=512 , _lowerCamelCase="cls" , _lowerCamelCase=False , _lowerCamelCase=True , **_lowerCamelCase , ):
super().__init__(pad_token_id=_lowerCamelCase , bos_token_id=_lowerCamelCase , eos_token_id=_lowerCamelCase , **_lowerCamelCase)
UpperCAmelCase__ : List[Any] = project_dim
UpperCAmelCase__ : str = pooler_fn
UpperCAmelCase__ : Optional[int] = learn_encoder
UpperCAmelCase__ : Optional[Any] = use_attention_mask
class _snake_case ( a__ ):
lowerCAmelCase :Any = [R'''pooler''', R'''logit_scale''']
lowerCAmelCase :Tuple = [R'''position_ids''', R'''predictions.decoder.bias''']
lowerCAmelCase :List[str] = '''roberta'''
lowerCAmelCase :Optional[int] = RobertaSeriesConfig
def __init__( self , _lowerCamelCase):
super().__init__(_lowerCamelCase)
UpperCAmelCase__ : str = XLMRobertaModel(_lowerCamelCase)
UpperCAmelCase__ : Optional[Any] = nn.Linear(config.hidden_size , config.project_dim)
UpperCAmelCase__ : Dict = getattr(_lowerCamelCase , """has_pre_transformation""" , _lowerCamelCase)
if self.has_pre_transformation:
UpperCAmelCase__ : List[Any] = nn.Linear(config.hidden_size , config.project_dim)
UpperCAmelCase__ : List[str] = nn.LayerNorm(config.hidden_size , eps=config.layer_norm_eps)
self.post_init()
def snake_case__ ( self , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , ):
UpperCAmelCase__ : Union[str, Any] = return_dict if return_dict is not None else self.config.use_return_dict
UpperCAmelCase__ : Optional[Any] = self.base_model(
input_ids=_lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase , position_ids=_lowerCamelCase , head_mask=_lowerCamelCase , inputs_embeds=_lowerCamelCase , encoder_hidden_states=_lowerCamelCase , encoder_attention_mask=_lowerCamelCase , output_attentions=_lowerCamelCase , output_hidden_states=True if self.has_pre_transformation else output_hidden_states , return_dict=_lowerCamelCase , )
if self.has_pre_transformation:
UpperCAmelCase__ : List[str] = outputs["""hidden_states"""][-2]
UpperCAmelCase__ : List[str] = self.pre_LN(_lowerCamelCase)
UpperCAmelCase__ : Optional[int] = self.transformation_pre(_lowerCamelCase)
return TransformationModelOutput(
projection_state=_lowerCamelCase , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
else:
UpperCAmelCase__ : List[str] = self.transformation(outputs.last_hidden_state)
return TransformationModelOutput(
projection_state=_lowerCamelCase , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , ) | 283 |
'''simple docstring'''
def _UpperCamelCase ( UpperCamelCase__ = 4_0_0_0_0_0_0 ):
UpperCAmelCase__ : List[str] = [0, 1]
UpperCAmelCase__ : Any = 0
while fib[i] <= n:
fib.append(fib[i] + fib[i + 1] )
if fib[i + 2] > n:
break
i += 1
UpperCAmelCase__ : str = 0
for j in range(len(UpperCamelCase__ ) - 1 ):
if fib[j] % 2 == 0:
total += fib[j]
return total
if __name__ == "__main__":
print(f"""{solution() = }""") | 283 | 1 |
'''simple docstring'''
import unittest
from diffusers import FlaxAutoencoderKL
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax
from .test_modeling_common_flax import FlaxModelTesterMixin
if is_flax_available():
import jax
@require_flax
class lowerCamelCase_ (snake_case__ , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase: List[str] = FlaxAutoencoderKL
@property
def _A ( self : Union[str, Any] ):
_UpperCAmelCase : Optional[int] = 4
_UpperCAmelCase : str = 3
_UpperCAmelCase : int = (32, 32)
_UpperCAmelCase : List[str] = jax.random.PRNGKey(0 )
_UpperCAmelCase : Optional[Any] = jax.random.uniform(A , ((batch_size, num_channels) + sizes) )
return {"sample": image, "prng_key": prng_key}
def _A ( self : Tuple ):
_UpperCAmelCase : Tuple = {
"block_out_channels": [32, 64],
"in_channels": 3,
"out_channels": 3,
"down_block_types": ["DownEncoderBlock2D", "DownEncoderBlock2D"],
"up_block_types": ["UpDecoderBlock2D", "UpDecoderBlock2D"],
"latent_channels": 4,
}
_UpperCAmelCase : Union[str, Any] = self.dummy_input
return init_dict, inputs_dict
| 31 | '''simple docstring'''
import argparse
import json
import pickle
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation, MaskFormerImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
__SCREAMING_SNAKE_CASE : Union[str, Any] = logging.get_logger(__name__)
def UpperCamelCase_ ( _UpperCAmelCase : str ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase : int = SwinConfig.from_pretrained(
"microsoft/swin-tiny-patch4-window7-224" , out_features=["stage1", "stage2", "stage3", "stage4"] )
_UpperCAmelCase : List[Any] = MaskFormerConfig(backbone_config=_UpperCAmelCase )
_UpperCAmelCase : Tuple = "huggingface/label-files"
if "ade20k-full" in model_name:
# this should be ok
_UpperCAmelCase : Dict = 847
_UpperCAmelCase : Any = "maskformer-ade20k-full-id2label.json"
elif "ade" in model_name:
# this should be ok
_UpperCAmelCase : Any = 150
_UpperCAmelCase : Any = "ade20k-id2label.json"
elif "coco-stuff" in model_name:
# this should be ok
_UpperCAmelCase : Tuple = 171
_UpperCAmelCase : Union[str, Any] = "maskformer-coco-stuff-id2label.json"
elif "coco" in model_name:
# TODO
_UpperCAmelCase : Any = 133
_UpperCAmelCase : int = "coco-panoptic-id2label.json"
elif "cityscapes" in model_name:
# this should be ok
_UpperCAmelCase : Optional[int] = 19
_UpperCAmelCase : str = "cityscapes-id2label.json"
elif "vistas" in model_name:
# this should be ok
_UpperCAmelCase : Optional[int] = 65
_UpperCAmelCase : Tuple = "mapillary-vistas-id2label.json"
_UpperCAmelCase : List[Any] = json.load(open(hf_hub_download(_UpperCAmelCase , _UpperCAmelCase , repo_type="dataset" ) , "r" ) )
_UpperCAmelCase : Tuple = {int(_UpperCAmelCase ): v for k, v in idalabel.items()}
return config
def UpperCamelCase_ ( _UpperCAmelCase : Optional[int] ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase : Dict = []
# stem
# fmt: off
rename_keys.append(("backbone.patch_embed.proj.weight", "model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.weight") )
rename_keys.append(("backbone.patch_embed.proj.bias", "model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.bias") )
rename_keys.append(("backbone.patch_embed.norm.weight", "model.pixel_level_module.encoder.model.embeddings.norm.weight") )
rename_keys.append(("backbone.patch_embed.norm.bias", "model.pixel_level_module.encoder.model.embeddings.norm.bias") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.norm1.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.norm1.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.attn.relative_position_bias_table""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.attn.relative_position_index""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.attn.proj.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.attn.proj.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.norm2.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.norm2.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.mlp.fc1.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.mlp.fc1.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.mlp.fc2.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.weight""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.mlp.fc2.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.bias""") )
if i < 3:
rename_keys.append((F"""backbone.layers.{i}.downsample.reduction.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.reduction.weight""") )
rename_keys.append((F"""backbone.layers.{i}.downsample.norm.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.weight""") )
rename_keys.append((F"""backbone.layers.{i}.downsample.norm.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.bias""") )
rename_keys.append((F"""backbone.norm{i}.weight""", F"""model.pixel_level_module.encoder.hidden_states_norms.{i}.weight""") )
rename_keys.append((F"""backbone.norm{i}.bias""", F"""model.pixel_level_module.encoder.hidden_states_norms.{i}.bias""") )
# FPN
rename_keys.append(("sem_seg_head.layer_4.weight", "model.pixel_level_module.decoder.fpn.stem.0.weight") )
rename_keys.append(("sem_seg_head.layer_4.norm.weight", "model.pixel_level_module.decoder.fpn.stem.1.weight") )
rename_keys.append(("sem_seg_head.layer_4.norm.bias", "model.pixel_level_module.decoder.fpn.stem.1.bias") )
for source_index, target_index in zip(range(3 , 0 , -1 ) , range(0 , 3 ) ):
rename_keys.append((F"""sem_seg_head.adapter_{source_index}.weight""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.0.weight""") )
rename_keys.append((F"""sem_seg_head.adapter_{source_index}.norm.weight""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.weight""") )
rename_keys.append((F"""sem_seg_head.adapter_{source_index}.norm.bias""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.bias""") )
rename_keys.append((F"""sem_seg_head.layer_{source_index}.weight""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.block.0.weight""") )
rename_keys.append((F"""sem_seg_head.layer_{source_index}.norm.weight""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.weight""") )
rename_keys.append((F"""sem_seg_head.layer_{source_index}.norm.bias""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.bias""") )
rename_keys.append(("sem_seg_head.mask_features.weight", "model.pixel_level_module.decoder.mask_projection.weight") )
rename_keys.append(("sem_seg_head.mask_features.bias", "model.pixel_level_module.decoder.mask_projection.bias") )
# Transformer decoder
for idx in range(config.decoder_config.decoder_layers ):
# self-attention out projection
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.weight""", F"""model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.bias""", F"""model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.bias""") )
# cross-attention out projection
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.weight""", F"""model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.bias""", F"""model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.bias""") )
# MLP 1
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.weight""", F"""model.transformer_module.decoder.layers.{idx}.fc1.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.bias""", F"""model.transformer_module.decoder.layers.{idx}.fc1.bias""") )
# MLP 2
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.weight""", F"""model.transformer_module.decoder.layers.{idx}.fc2.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.bias""", F"""model.transformer_module.decoder.layers.{idx}.fc2.bias""") )
# layernorm 1 (self-attention layernorm)
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.weight""", F"""model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.bias""", F"""model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.bias""") )
# layernorm 2 (cross-attention layernorm)
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.weight""", F"""model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.bias""", F"""model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.bias""") )
# layernorm 3 (final layernorm)
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.weight""", F"""model.transformer_module.decoder.layers.{idx}.final_layer_norm.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.bias""", F"""model.transformer_module.decoder.layers.{idx}.final_layer_norm.bias""") )
rename_keys.append(("sem_seg_head.predictor.transformer.decoder.norm.weight", "model.transformer_module.decoder.layernorm.weight") )
rename_keys.append(("sem_seg_head.predictor.transformer.decoder.norm.bias", "model.transformer_module.decoder.layernorm.bias") )
# heads on top
rename_keys.append(("sem_seg_head.predictor.query_embed.weight", "model.transformer_module.queries_embedder.weight") )
rename_keys.append(("sem_seg_head.predictor.input_proj.weight", "model.transformer_module.input_projection.weight") )
rename_keys.append(("sem_seg_head.predictor.input_proj.bias", "model.transformer_module.input_projection.bias") )
rename_keys.append(("sem_seg_head.predictor.class_embed.weight", "class_predictor.weight") )
rename_keys.append(("sem_seg_head.predictor.class_embed.bias", "class_predictor.bias") )
for i in range(3 ):
rename_keys.append((F"""sem_seg_head.predictor.mask_embed.layers.{i}.weight""", F"""mask_embedder.{i}.0.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.mask_embed.layers.{i}.bias""", F"""mask_embedder.{i}.0.bias""") )
# fmt: on
return rename_keys
def UpperCamelCase_ ( _UpperCAmelCase : int , _UpperCAmelCase : Tuple , _UpperCAmelCase : List[Any] ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase : Optional[int] = dct.pop(_UpperCAmelCase )
_UpperCAmelCase : List[str] = val
def UpperCamelCase_ ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : Tuple ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase : List[str] = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
_UpperCAmelCase : Optional[int] = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
_UpperCAmelCase : Any = state_dict.pop(F"""backbone.layers.{i}.blocks.{j}.attn.qkv.weight""" )
_UpperCAmelCase : Optional[int] = state_dict.pop(F"""backbone.layers.{i}.blocks.{j}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
_UpperCAmelCase : List[str] = in_proj_weight[:dim, :]
_UpperCAmelCase : Tuple = in_proj_bias[: dim]
_UpperCAmelCase : List[Any] = in_proj_weight[
dim : dim * 2, :
]
_UpperCAmelCase : List[str] = in_proj_bias[
dim : dim * 2
]
_UpperCAmelCase : Optional[Any] = in_proj_weight[
-dim :, :
]
_UpperCAmelCase : Dict = in_proj_bias[-dim :]
# fmt: on
def UpperCamelCase_ ( _UpperCAmelCase : Dict , _UpperCAmelCase : str ) -> Dict:
"""simple docstring"""
_UpperCAmelCase : Union[str, Any] = config.decoder_config.hidden_size
for idx in range(config.decoder_config.decoder_layers ):
# read in weights + bias of self-attention input projection layer (in the original implementation, this is a single matrix + bias)
_UpperCAmelCase : Dict = state_dict.pop(F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_weight""" )
_UpperCAmelCase : Dict = state_dict.pop(F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
_UpperCAmelCase : int = in_proj_weight[: hidden_size, :]
_UpperCAmelCase : Union[str, Any] = in_proj_bias[:config.hidden_size]
_UpperCAmelCase : List[str] = in_proj_weight[hidden_size : hidden_size * 2, :]
_UpperCAmelCase : List[str] = in_proj_bias[hidden_size : hidden_size * 2]
_UpperCAmelCase : int = in_proj_weight[-hidden_size :, :]
_UpperCAmelCase : Optional[Any] = in_proj_bias[-hidden_size :]
# read in weights + bias of cross-attention input projection layer (in the original implementation, this is a single matrix + bias)
_UpperCAmelCase : Optional[Any] = state_dict.pop(F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_weight""" )
_UpperCAmelCase : Tuple = state_dict.pop(F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
_UpperCAmelCase : Any = in_proj_weight[: hidden_size, :]
_UpperCAmelCase : Tuple = in_proj_bias[:config.hidden_size]
_UpperCAmelCase : Dict = in_proj_weight[hidden_size : hidden_size * 2, :]
_UpperCAmelCase : Dict = in_proj_bias[hidden_size : hidden_size * 2]
_UpperCAmelCase : Optional[int] = in_proj_weight[-hidden_size :, :]
_UpperCAmelCase : Union[str, Any] = in_proj_bias[-hidden_size :]
# fmt: on
def UpperCamelCase_ ( ) -> torch.Tensor:
"""simple docstring"""
_UpperCAmelCase : int = "http://images.cocodataset.org/val2017/000000039769.jpg"
_UpperCAmelCase : Any = Image.open(requests.get(_UpperCAmelCase , stream=_UpperCAmelCase ).raw )
return im
@torch.no_grad()
def UpperCamelCase_ ( _UpperCAmelCase : str , _UpperCAmelCase : str , _UpperCAmelCase : str , _UpperCAmelCase : bool = False ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase : Optional[int] = get_maskformer_config(_UpperCAmelCase )
# load original state_dict
with open(_UpperCAmelCase , "rb" ) as f:
_UpperCAmelCase : Optional[int] = pickle.load(_UpperCAmelCase )
_UpperCAmelCase : Optional[int] = data["model"]
# for name, param in state_dict.items():
# print(name, param.shape)
# rename keys
_UpperCAmelCase : Any = create_rename_keys(_UpperCAmelCase )
for src, dest in rename_keys:
rename_key(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
read_in_swin_q_k_v(_UpperCAmelCase , config.backbone_config )
read_in_decoder_q_k_v(_UpperCAmelCase , _UpperCAmelCase )
# update to torch tensors
for key, value in state_dict.items():
_UpperCAmelCase : Tuple = torch.from_numpy(_UpperCAmelCase )
# load 🤗 model
_UpperCAmelCase : Union[str, Any] = MaskFormerForInstanceSegmentation(_UpperCAmelCase )
model.eval()
for name, param in model.named_parameters():
print(_UpperCAmelCase , param.shape )
_UpperCAmelCase , _UpperCAmelCase : Any = model.load_state_dict(_UpperCAmelCase , strict=_UpperCAmelCase )
assert missing_keys == [
"model.pixel_level_module.encoder.model.layernorm.weight",
"model.pixel_level_module.encoder.model.layernorm.bias",
]
assert len(_UpperCAmelCase ) == 0, F"""Unexpected keys: {unexpected_keys}"""
# verify results
_UpperCAmelCase : Optional[int] = prepare_img()
if "vistas" in model_name:
_UpperCAmelCase : int = 65
elif "cityscapes" in model_name:
_UpperCAmelCase : Tuple = 65_535
else:
_UpperCAmelCase : Any = 255
_UpperCAmelCase : Optional[Any] = True if "ade" in model_name else False
_UpperCAmelCase : Optional[int] = MaskFormerImageProcessor(ignore_index=_UpperCAmelCase , reduce_labels=_UpperCAmelCase )
_UpperCAmelCase : Optional[int] = image_processor(_UpperCAmelCase , return_tensors="pt" )
_UpperCAmelCase : List[Any] = model(**_UpperCAmelCase )
print("Logits:" , outputs.class_queries_logits[0, :3, :3] )
if model_name == "maskformer-swin-tiny-ade":
_UpperCAmelCase : Tuple = torch.tensor(
[[3.6_3_5_3, -4.4_7_7_0, -2.6_0_6_5], [0.5_0_8_1, -4.2_3_9_4, -3.5_3_4_3], [2.1_9_0_9, -5.0_3_5_3, -1.9_3_2_3]] )
assert torch.allclose(outputs.class_queries_logits[0, :3, :3] , _UpperCAmelCase , atol=1e-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(F"""Saving model and image processor to {pytorch_dump_folder_path}""" )
Path(_UpperCAmelCase ).mkdir(exist_ok=_UpperCAmelCase )
model.save_pretrained(_UpperCAmelCase )
image_processor.save_pretrained(_UpperCAmelCase )
if push_to_hub:
print("Pushing model and image processor to the hub..." )
model.push_to_hub(F"""nielsr/{model_name}""" )
image_processor.push_to_hub(F"""nielsr/{model_name}""" )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""maskformer-swin-tiny-ade""",
type=str,
help=("""Name of the MaskFormer model you'd like to convert""",),
)
parser.add_argument(
"""--checkpoint_path""",
default="""/Users/nielsrogge/Documents/MaskFormer_checkpoints/MaskFormer-Swin-tiny-ADE20k/model.pkl""",
type=str,
help="""Path to the original state dict (.pth file).""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
__SCREAMING_SNAKE_CASE : int = parser.parse_args()
convert_maskformer_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 31 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase__ = {
"configuration_distilbert": [
"DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"DistilBertConfig",
"DistilBertOnnxConfig",
],
"tokenization_distilbert": ["DistilBertTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ["DistilBertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
"DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"DistilBertForMaskedLM",
"DistilBertForMultipleChoice",
"DistilBertForQuestionAnswering",
"DistilBertForSequenceClassification",
"DistilBertForTokenClassification",
"DistilBertModel",
"DistilBertPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
"TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFDistilBertForMaskedLM",
"TFDistilBertForMultipleChoice",
"TFDistilBertForQuestionAnswering",
"TFDistilBertForSequenceClassification",
"TFDistilBertForTokenClassification",
"TFDistilBertMainLayer",
"TFDistilBertModel",
"TFDistilBertPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
"FlaxDistilBertForMaskedLM",
"FlaxDistilBertForMultipleChoice",
"FlaxDistilBertForQuestionAnswering",
"FlaxDistilBertForSequenceClassification",
"FlaxDistilBertForTokenClassification",
"FlaxDistilBertModel",
"FlaxDistilBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_distilbert import (
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DistilBertConfig,
DistilBertOnnxConfig,
)
from .tokenization_distilbert import DistilBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_distilbert_fast import DistilBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_distilbert import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
DistilBertPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertMainLayer,
TFDistilBertModel,
TFDistilBertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
FlaxDistilBertPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 359 |
"""simple docstring"""
import unittest
from transformers import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING, is_vision_available, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
@staticmethod
def lowercase__ ( *snake_case__ , **snake_case__ ):
"""simple docstring"""
pass
@is_pipeline_test
@require_vision
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
a : Optional[Any] =MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : str = pipeline(
"zero-shot-object-detection" , model="hf-internal-testing/tiny-random-owlvit-object-detection" )
lowerCAmelCase : Dict = [
{
"image": "./tests/fixtures/tests_samples/COCO/000000039769.png",
"candidate_labels": ["cat", "remote", "couch"],
}
]
return object_detector, examples
def lowercase__ ( self , snake_case__ , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : Optional[Any] = object_detector(examples[0] , threshold=0.0 )
lowerCAmelCase : Dict = len(snake_case__ )
self.assertGreater(snake_case__ , 0 )
self.assertEqual(
snake_case__ , [
{
"score": ANY(snake_case__ ),
"label": ANY(snake_case__ ),
"box": {"xmin": ANY(snake_case__ ), "ymin": ANY(snake_case__ ), "xmax": ANY(snake_case__ ), "ymax": ANY(snake_case__ )},
}
for i in range(snake_case__ )
] , )
@require_tf
@unittest.skip("Zero Shot Object Detection not implemented in TF" )
def lowercase__ ( self ):
"""simple docstring"""
pass
@require_torch
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : str = pipeline(
"zero-shot-object-detection" , model="hf-internal-testing/tiny-random-owlvit-object-detection" )
lowerCAmelCase : Tuple = object_detector(
"./tests/fixtures/tests_samples/COCO/000000039769.png" , candidate_labels=["cat", "remote", "couch"] , threshold=0.64 , )
self.assertEqual(
nested_simplify(snake_case__ , decimals=4 ) , [
{"score": 0.7235, "label": "cat", "box": {"xmin": 204, "ymin": 167, "xmax": 232, "ymax": 190}},
{"score": 0.7218, "label": "remote", "box": {"xmin": 204, "ymin": 167, "xmax": 232, "ymax": 190}},
{"score": 0.7184, "label": "couch", "box": {"xmin": 204, "ymin": 167, "xmax": 232, "ymax": 190}},
{"score": 0.6748, "label": "remote", "box": {"xmin": 571, "ymin": 83, "xmax": 598, "ymax": 103}},
{"score": 0.6656, "label": "cat", "box": {"xmin": 571, "ymin": 83, "xmax": 598, "ymax": 103}},
{"score": 0.6614, "label": "couch", "box": {"xmin": 571, "ymin": 83, "xmax": 598, "ymax": 103}},
{"score": 0.6456, "label": "remote", "box": {"xmin": 494, "ymin": 105, "xmax": 521, "ymax": 127}},
{"score": 0.642, "label": "remote", "box": {"xmin": 67, "ymin": 274, "xmax": 93, "ymax": 297}},
{"score": 0.6419, "label": "cat", "box": {"xmin": 494, "ymin": 105, "xmax": 521, "ymax": 127}},
] , )
lowerCAmelCase : Optional[Any] = object_detector(
[
{
"image": "./tests/fixtures/tests_samples/COCO/000000039769.png",
"candidate_labels": ["cat", "remote", "couch"],
}
] , threshold=0.64 , )
self.assertEqual(
nested_simplify(snake_case__ , decimals=4 ) , [
[
{"score": 0.7235, "label": "cat", "box": {"xmin": 204, "ymin": 167, "xmax": 232, "ymax": 190}},
{"score": 0.7218, "label": "remote", "box": {"xmin": 204, "ymin": 167, "xmax": 232, "ymax": 190}},
{"score": 0.7184, "label": "couch", "box": {"xmin": 204, "ymin": 167, "xmax": 232, "ymax": 190}},
{"score": 0.6748, "label": "remote", "box": {"xmin": 571, "ymin": 83, "xmax": 598, "ymax": 103}},
{"score": 0.6656, "label": "cat", "box": {"xmin": 571, "ymin": 83, "xmax": 598, "ymax": 103}},
{"score": 0.6614, "label": "couch", "box": {"xmin": 571, "ymin": 83, "xmax": 598, "ymax": 103}},
{"score": 0.6456, "label": "remote", "box": {"xmin": 494, "ymin": 105, "xmax": 521, "ymax": 127}},
{"score": 0.642, "label": "remote", "box": {"xmin": 67, "ymin": 274, "xmax": 93, "ymax": 297}},
{"score": 0.6419, "label": "cat", "box": {"xmin": 494, "ymin": 105, "xmax": 521, "ymax": 127}},
]
] , )
@require_torch
@slow
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Dict = pipeline("zero-shot-object-detection" )
lowerCAmelCase : Dict = object_detector(
"http://images.cocodataset.org/val2017/000000039769.jpg" , candidate_labels=["cat", "remote", "couch"] , )
self.assertEqual(
nested_simplify(snake_case__ , decimals=4 ) , [
{"score": 0.2868, "label": "cat", "box": {"xmin": 324, "ymin": 20, "xmax": 640, "ymax": 373}},
{"score": 0.277, "label": "remote", "box": {"xmin": 40, "ymin": 72, "xmax": 177, "ymax": 115}},
{"score": 0.2537, "label": "cat", "box": {"xmin": 1, "ymin": 55, "xmax": 315, "ymax": 472}},
{"score": 0.1474, "label": "remote", "box": {"xmin": 335, "ymin": 74, "xmax": 371, "ymax": 187}},
{"score": 0.1208, "label": "couch", "box": {"xmin": 4, "ymin": 0, "xmax": 642, "ymax": 476}},
] , )
lowerCAmelCase : Dict = object_detector(
[
{
"image": "http://images.cocodataset.org/val2017/000000039769.jpg",
"candidate_labels": ["cat", "remote", "couch"],
},
{
"image": "http://images.cocodataset.org/val2017/000000039769.jpg",
"candidate_labels": ["cat", "remote", "couch"],
},
] , )
self.assertEqual(
nested_simplify(snake_case__ , decimals=4 ) , [
[
{"score": 0.2868, "label": "cat", "box": {"xmin": 324, "ymin": 20, "xmax": 640, "ymax": 373}},
{"score": 0.277, "label": "remote", "box": {"xmin": 40, "ymin": 72, "xmax": 177, "ymax": 115}},
{"score": 0.2537, "label": "cat", "box": {"xmin": 1, "ymin": 55, "xmax": 315, "ymax": 472}},
{"score": 0.1474, "label": "remote", "box": {"xmin": 335, "ymin": 74, "xmax": 371, "ymax": 187}},
{"score": 0.1208, "label": "couch", "box": {"xmin": 4, "ymin": 0, "xmax": 642, "ymax": 476}},
],
[
{"score": 0.2868, "label": "cat", "box": {"xmin": 324, "ymin": 20, "xmax": 640, "ymax": 373}},
{"score": 0.277, "label": "remote", "box": {"xmin": 40, "ymin": 72, "xmax": 177, "ymax": 115}},
{"score": 0.2537, "label": "cat", "box": {"xmin": 1, "ymin": 55, "xmax": 315, "ymax": 472}},
{"score": 0.1474, "label": "remote", "box": {"xmin": 335, "ymin": 74, "xmax": 371, "ymax": 187}},
{"score": 0.1208, "label": "couch", "box": {"xmin": 4, "ymin": 0, "xmax": 642, "ymax": 476}},
],
] , )
@require_tf
@unittest.skip("Zero Shot Object Detection not implemented in TF" )
def lowercase__ ( self ):
"""simple docstring"""
pass
@require_torch
@slow
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Dict = 0.2
lowerCAmelCase : List[Any] = pipeline("zero-shot-object-detection" )
lowerCAmelCase : Union[str, Any] = object_detector(
"http://images.cocodataset.org/val2017/000000039769.jpg" , candidate_labels=["cat", "remote", "couch"] , threshold=snake_case__ , )
self.assertEqual(
nested_simplify(snake_case__ , decimals=4 ) , [
{"score": 0.2868, "label": "cat", "box": {"xmin": 324, "ymin": 20, "xmax": 640, "ymax": 373}},
{"score": 0.277, "label": "remote", "box": {"xmin": 40, "ymin": 72, "xmax": 177, "ymax": 115}},
{"score": 0.2537, "label": "cat", "box": {"xmin": 1, "ymin": 55, "xmax": 315, "ymax": 472}},
] , )
@require_torch
@slow
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : int = 2
lowerCAmelCase : Any = pipeline("zero-shot-object-detection" )
lowerCAmelCase : Any = object_detector(
"http://images.cocodataset.org/val2017/000000039769.jpg" , candidate_labels=["cat", "remote", "couch"] , top_k=snake_case__ , )
self.assertEqual(
nested_simplify(snake_case__ , decimals=4 ) , [
{"score": 0.2868, "label": "cat", "box": {"xmin": 324, "ymin": 20, "xmax": 640, "ymax": 373}},
{"score": 0.277, "label": "remote", "box": {"xmin": 40, "ymin": 72, "xmax": 177, "ymax": 115}},
] , )
| 133 | 0 |
import os
from pathlib import Path
def UpperCamelCase__( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Tuple )->Any:
A__ = {
'''en''': '''Machine learning is great, isn\'t it?''',
'''ru''': '''Машинное обучение - это здорово, не так ли?''',
'''de''': '''Maschinelles Lernen ist großartig, nicht wahr?''',
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
A__ = {
'''wmt16-en-de-dist-12-1''': [28.3, 27.52],
'''wmt16-en-de-dist-6-1''': [27.4, 27.11],
'''wmt16-en-de-12-1''': [26.9, 25.75],
}
A__ = f"{src_lang}-{tgt_lang}"
A__ = f"\n---\nlanguage:\n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt16\n- allenai\nlicense: apache-2.0\ndatasets:\n- wmt16\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of fairseq-based [wmt16 transformer](https://github.com/jungokasai/deep-shallow/) for {src_lang}-{tgt_lang}.\n\nFor more details, please, see [Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation](https://arxiv.org/abs/2006.10369).\n\nAll 3 models are available:\n\n* [wmt16-en-de-dist-12-1](https://huggingface.co/allenai/wmt16-en-de-dist-12-1)\n* [wmt16-en-de-dist-6-1](https://huggingface.co/allenai/wmt16-en-de-dist-6-1)\n* [wmt16-en-de-12-1](https://huggingface.co/allenai/wmt16-en-de-12-1)\n\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = \"allenai/{model_name}\"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = \"{texts[src_lang]}\"\ninput_ids = tokenizer.encode(input, return_tensors=\"pt\")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n\n## Training data\n\nPretrained weights were left identical to the original model released by allenai. For more details, please, see the [paper](https://arxiv.org/abs/2006.10369).\n\n## Eval results\n\nHere are the BLEU scores:\n\nmodel | fairseq | transformers\n-------|---------|----------\n{model_name} | {scores[model_name][0]} | {scores[model_name][1]}\n\nThe score is slightly below the score reported in the paper, as the researchers don\'t use `sacrebleu` and measure the score on tokenized outputs. `transformers` score was measured using `sacrebleu` on detokenized outputs.\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=5\nmkdir -p $DATA_DIR\nsacrebleu -t wmt16 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt16 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py allenai/{model_name} $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt16/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2016.tgz?1504722372)\n\n\n### BibTeX entry and citation info\n\n```\n@misc{{kasai2020deep,\n title={{Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation}},\n author={{Jungo Kasai and Nikolaos Pappas and Hao Peng and James Cross and Noah A. Smith}},\n year={{2020}},\n eprint={{2006.10369}},\n archivePrefix={{arXiv}},\n primaryClass={{cs.CL}}\n}}\n```\n\n"
model_card_dir.mkdir(parents=SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ )
A__ = os.path.join(SCREAMING_SNAKE_CASE_ , '''README.md''' )
print(f"Generating {path}" )
with open(SCREAMING_SNAKE_CASE_ , '''w''' , encoding='''utf-8''' ) as f:
f.write(SCREAMING_SNAKE_CASE_ )
# make sure we are under the root of the project
a__: int = Path(__file__).resolve().parent.parent.parent
a__: Dict = repo_dir / 'model_cards'
for model_name in ["wmt16-en-de-dist-12-1", "wmt16-en-de-dist-6-1", "wmt16-en-de-12-1"]:
a__: List[Any] = model_cards_dir / 'allenai' / model_name
write_model_card(model_card_dir, src_lang='en', tgt_lang='de', model_name=model_name)
| 193 |
'''simple docstring'''
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
flip_channel_order,
get_resize_output_image_size,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, is_vision_available, logging
if is_vision_available():
import PIL
if is_torch_available():
import torch
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
class lowerCAmelCase_ ( __magic_name__ ):
__lowerCamelCase : Optional[Any] = ["pixel_values"]
def __init__( self , _lowerCAmelCase = True , _lowerCAmelCase = None , _lowerCAmelCase = PILImageResampling.BILINEAR , _lowerCAmelCase = True , _lowerCAmelCase = 1 / 255 , _lowerCAmelCase = True , _lowerCAmelCase = None , _lowerCAmelCase = True , **_lowerCAmelCase , ) -> None:
super().__init__(**_lowerCAmelCase )
_lowerCAmelCase = size if size is not None else {"shortest_edge": 224}
_lowerCAmelCase = get_size_dict(_lowerCAmelCase , default_to_square=_lowerCAmelCase )
_lowerCAmelCase = crop_size if crop_size is not None else {"height": 256, "width": 256}
_lowerCAmelCase = get_size_dict(_lowerCAmelCase , param_name="crop_size" )
_lowerCAmelCase = do_resize
_lowerCAmelCase = size
_lowerCAmelCase = resample
_lowerCAmelCase = do_rescale
_lowerCAmelCase = rescale_factor
_lowerCAmelCase = do_center_crop
_lowerCAmelCase = crop_size
_lowerCAmelCase = do_flip_channel_order
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = PIL.Image.BILINEAR , _lowerCAmelCase = None , **_lowerCAmelCase , ) -> np.ndarray:
_lowerCAmelCase = get_size_dict(_lowerCAmelCase , default_to_square=_lowerCAmelCase )
if "shortest_edge" not in size:
raise ValueError(f'''The `size` dictionary must contain the key `shortest_edge`. Got {size.keys()}''' )
_lowerCAmelCase = get_resize_output_image_size(_lowerCAmelCase , size=size["shortest_edge"] , default_to_square=_lowerCAmelCase )
return resize(_lowerCAmelCase , size=_lowerCAmelCase , resample=_lowerCAmelCase , data_format=_lowerCAmelCase , **_lowerCAmelCase )
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = None , **_lowerCAmelCase , ) -> np.ndarray:
_lowerCAmelCase = get_size_dict(_lowerCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(f'''The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}''' )
return center_crop(_lowerCAmelCase , size=(size["height"], size["width"]) , data_format=_lowerCAmelCase , **_lowerCAmelCase )
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = None , **_lowerCAmelCase , ) -> Optional[Any]:
return rescale(_lowerCAmelCase , scale=_lowerCAmelCase , data_format=_lowerCAmelCase , **_lowerCAmelCase )
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase = None ) -> np.ndarray:
return flip_channel_order(_lowerCAmelCase , data_format=_lowerCAmelCase )
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = ChannelDimension.FIRST , **_lowerCAmelCase , ) -> PIL.Image.Image:
_lowerCAmelCase = do_resize if do_resize is not None else self.do_resize
_lowerCAmelCase = resample if resample is not None else self.resample
_lowerCAmelCase = do_rescale if do_rescale is not None else self.do_rescale
_lowerCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
_lowerCAmelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
_lowerCAmelCase = (
do_flip_channel_order if do_flip_channel_order is not None else self.do_flip_channel_order
)
_lowerCAmelCase = size if size is not None else self.size
_lowerCAmelCase = get_size_dict(_lowerCAmelCase , default_to_square=_lowerCAmelCase )
_lowerCAmelCase = crop_size if crop_size is not None else self.crop_size
_lowerCAmelCase = get_size_dict(_lowerCAmelCase , param_name="crop_size" )
_lowerCAmelCase = make_list_of_images(_lowerCAmelCase )
if not valid_images(_lowerCAmelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
# All transformations expect numpy arrays.
_lowerCAmelCase = [to_numpy_array(_lowerCAmelCase ) for image in images]
if do_resize:
_lowerCAmelCase = [self.resize(image=_lowerCAmelCase , size=_lowerCAmelCase , resample=_lowerCAmelCase ) for image in images]
if do_center_crop:
_lowerCAmelCase = [self.center_crop(image=_lowerCAmelCase , size=_lowerCAmelCase ) for image in images]
if do_rescale:
_lowerCAmelCase = [self.rescale(image=_lowerCAmelCase , scale=_lowerCAmelCase ) for image in images]
# the pretrained checkpoints assume images are BGR, not RGB
if do_flip_channel_order:
_lowerCAmelCase = [self.flip_channel_order(image=_lowerCAmelCase ) for image in images]
_lowerCAmelCase = [to_channel_dimension_format(_lowerCAmelCase , _lowerCAmelCase ) for image in images]
_lowerCAmelCase = {"pixel_values": images}
return BatchFeature(data=_lowerCAmelCase , tensor_type=_lowerCAmelCase )
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase = None ) -> int:
_lowerCAmelCase = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(_lowerCAmelCase ) != len(_lowerCAmelCase ):
raise ValueError(
"Make sure that you pass in as many target sizes as the batch dimension of the logits" )
if is_torch_tensor(_lowerCAmelCase ):
_lowerCAmelCase = target_sizes.numpy()
_lowerCAmelCase = []
for idx in range(len(_lowerCAmelCase ) ):
_lowerCAmelCase = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="bilinear" , align_corners=_lowerCAmelCase )
_lowerCAmelCase = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(_lowerCAmelCase )
else:
_lowerCAmelCase = logits.argmax(dim=1 )
_lowerCAmelCase = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 158 | 0 |
import os
import unittest
from transformers import MobileBertTokenizer, MobileBertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class UpperCamelCase__ ( lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
__snake_case : Optional[Any] = MobileBertTokenizer
__snake_case : Optional[int] = MobileBertTokenizerFast
__snake_case : str = True
__snake_case : Any = True
__snake_case : List[Any] = filter_non_english
__snake_case : List[Any] = "google/mobilebert-uncased"
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> List[str]:
'''simple docstring'''
super().setUp()
SCREAMING_SNAKE_CASE = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""[PAD]""",
"""[MASK]""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file ,"""w""" ,encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
SCREAMING_SNAKE_CASE = [
(tokenizer_def[0], self.pre_trained_model_path, tokenizer_def[2]) # else the 'google/' prefix is stripped
for tokenizer_def in self.tokenizers_list
]
def SCREAMING_SNAKE_CASE__ ( self : Dict ,lowerCamelCase__ : Optional[Any] ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = """UNwant\u00E9d,running"""
SCREAMING_SNAKE_CASE = """unwanted, running"""
return input_text, output_text
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.tokenizer_class(self.vocab_file )
SCREAMING_SNAKE_CASE = tokenizer.tokenize("""UNwant\u00E9d,running""" )
self.assertListEqual(lowerCamelCase__ ,["""un""", """##want""", """##ed""", """,""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase__ ) ,[9, 6, 7, 12, 10, 11] )
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Tuple:
'''simple docstring'''
if not self.test_rust_tokenizer:
return
SCREAMING_SNAKE_CASE = self.get_tokenizer()
SCREAMING_SNAKE_CASE = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE = """UNwant\u00E9d,running"""
SCREAMING_SNAKE_CASE = tokenizer.tokenize(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = rust_tokenizer.tokenize(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ ,lowerCamelCase__ )
SCREAMING_SNAKE_CASE = tokenizer.encode(lowerCamelCase__ ,add_special_tokens=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = rust_tokenizer.encode(lowerCamelCase__ ,add_special_tokens=lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ ,lowerCamelCase__ )
SCREAMING_SNAKE_CASE = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE = tokenizer.encode(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = rust_tokenizer.encode(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ ,lowerCamelCase__ )
# With lower casing
SCREAMING_SNAKE_CASE = self.get_tokenizer(do_lower_case=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = self.get_rust_tokenizer(do_lower_case=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = """UNwant\u00E9d,running"""
SCREAMING_SNAKE_CASE = tokenizer.tokenize(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = rust_tokenizer.tokenize(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ ,lowerCamelCase__ )
SCREAMING_SNAKE_CASE = tokenizer.encode(lowerCamelCase__ ,add_special_tokens=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = rust_tokenizer.encode(lowerCamelCase__ ,add_special_tokens=lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ ,lowerCamelCase__ )
SCREAMING_SNAKE_CASE = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE = tokenizer.encode(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = rust_tokenizer.encode(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ ,lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : str ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize("""ah\u535A\u63A8zz""" ) ,["""ah""", """\u535A""", """\u63A8""", """zz"""] )
def SCREAMING_SNAKE_CASE__ ( self : int ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = BasicTokenizer(do_lower_case=lowerCamelCase__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) ,["""hello""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) ,["""hello"""] )
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = BasicTokenizer(do_lower_case=lowerCamelCase__ ,strip_accents=lowerCamelCase__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) ,["""hällo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) ,["""h\u00E9llo"""] )
def SCREAMING_SNAKE_CASE__ ( self : int ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = BasicTokenizer(do_lower_case=lowerCamelCase__ ,strip_accents=lowerCamelCase__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) ,["""hallo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) ,["""hello"""] )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = BasicTokenizer(do_lower_case=lowerCamelCase__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) ,["""hallo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) ,["""hello"""] )
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = BasicTokenizer(do_lower_case=lowerCamelCase__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) ,["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = BasicTokenizer(do_lower_case=lowerCamelCase__ ,strip_accents=lowerCamelCase__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) ,["""HäLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = BasicTokenizer(do_lower_case=lowerCamelCase__ ,strip_accents=lowerCamelCase__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) ,["""HaLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE = BasicTokenizer(do_lower_case=lowerCamelCase__ ,never_split=["""[UNK]"""] )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? [UNK]""" ) ,["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?""", """[UNK]"""] )
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = ["""[UNK]""", """[CLS]""", """[SEP]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing"""]
SCREAMING_SNAKE_CASE = {}
for i, token in enumerate(lowerCamelCase__ ):
SCREAMING_SNAKE_CASE = i
SCREAMING_SNAKE_CASE = WordpieceTokenizer(vocab=lowerCamelCase__ ,unk_token="""[UNK]""" )
self.assertListEqual(tokenizer.tokenize("""""" ) ,[] )
self.assertListEqual(tokenizer.tokenize("""unwanted running""" ) ,["""un""", """##want""", """##ed""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.tokenize("""unwantedX running""" ) ,["""[UNK]""", """runn""", """##ing"""] )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> List[Any]:
'''simple docstring'''
self.assertTrue(_is_whitespace(""" """ ) )
self.assertTrue(_is_whitespace("""\t""" ) )
self.assertTrue(_is_whitespace("""\r""" ) )
self.assertTrue(_is_whitespace("""\n""" ) )
self.assertTrue(_is_whitespace("""\u00A0""" ) )
self.assertFalse(_is_whitespace("""A""" ) )
self.assertFalse(_is_whitespace("""-""" ) )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Any:
'''simple docstring'''
self.assertTrue(_is_control("""\u0005""" ) )
self.assertFalse(_is_control("""A""" ) )
self.assertFalse(_is_control(""" """ ) )
self.assertFalse(_is_control("""\t""" ) )
self.assertFalse(_is_control("""\r""" ) )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Optional[int]:
'''simple docstring'''
self.assertTrue(_is_punctuation("""-""" ) )
self.assertTrue(_is_punctuation("""$""" ) )
self.assertTrue(_is_punctuation("""`""" ) )
self.assertTrue(_is_punctuation(""".""" ) )
self.assertFalse(_is_punctuation("""A""" ) )
self.assertFalse(_is_punctuation(""" """ ) )
def SCREAMING_SNAKE_CASE__ ( self : int ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.get_tokenizer()
SCREAMING_SNAKE_CASE = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(lowerCamelCase__ ) for t in ["""Test""", """\xad""", """test"""]] ,[["""[UNK]"""], [], ["""[UNK]"""]] )
self.assertListEqual(
[rust_tokenizer.tokenize(lowerCamelCase__ ) for t in ["""Test""", """\xad""", """test"""]] ,[["""[UNK]"""], [], ["""[UNK]"""]] )
@slow
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.tokenizer_class.from_pretrained("""google/mobilebert-uncased""" )
SCREAMING_SNAKE_CASE = tokenizer.encode("""sequence builders""" ,add_special_tokens=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = tokenizer.encode("""multi-sequence build""" ,add_special_tokens=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = tokenizer.build_inputs_with_special_tokens(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = tokenizer.build_inputs_with_special_tokens(lowerCamelCase__ ,lowerCamelCase__ )
assert encoded_sentence == [101] + text + [102]
assert encoded_pair == [101] + text + [102] + text_a + [102]
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
SCREAMING_SNAKE_CASE = self.rust_tokenizer_class.from_pretrained(lowerCamelCase__ ,**lowerCamelCase__ )
SCREAMING_SNAKE_CASE = F"""A, naïve {tokenizer_r.mask_token} AllenNLP sentence."""
SCREAMING_SNAKE_CASE = tokenizer_r.encode_plus(
lowerCamelCase__ ,return_attention_mask=lowerCamelCase__ ,return_token_type_ids=lowerCamelCase__ ,return_offsets_mapping=lowerCamelCase__ ,add_special_tokens=lowerCamelCase__ ,)
SCREAMING_SNAKE_CASE = tokenizer_r.do_lower_case if hasattr(lowerCamelCase__ ,"""do_lower_case""" ) else False
SCREAMING_SNAKE_CASE = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), """A"""),
((1, 2), ""","""),
((3, 5), """na"""),
((5, 6), """##ï"""),
((6, 8), """##ve"""),
((9, 15), tokenizer_r.mask_token),
((16, 21), """Allen"""),
((21, 23), """##NL"""),
((23, 24), """##P"""),
((25, 33), """sentence"""),
((33, 34), """."""),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), """a"""),
((1, 2), ""","""),
((3, 8), """naive"""),
((9, 15), tokenizer_r.mask_token),
((16, 21), """allen"""),
((21, 23), """##nl"""),
((23, 24), """##p"""),
((25, 33), """sentence"""),
((33, 34), """."""),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] ,tokenizer_r.convert_ids_to_tokens(tokens["""input_ids"""] ) )
self.assertEqual([e[0] for e in expected_results] ,tokens["""offset_mapping"""] )
def SCREAMING_SNAKE_CASE__ ( self : int ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE = ["""的""", """人""", """有"""]
SCREAMING_SNAKE_CASE = """""".join(lowerCamelCase__ )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = self.tokenizer_class.from_pretrained(lowerCamelCase__ ,**lowerCamelCase__ )
SCREAMING_SNAKE_CASE = self.rust_tokenizer_class.from_pretrained(lowerCamelCase__ ,**lowerCamelCase__ )
SCREAMING_SNAKE_CASE = tokenizer_p.encode(lowerCamelCase__ ,add_special_tokens=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = tokenizer_r.encode(lowerCamelCase__ ,add_special_tokens=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = tokenizer_r.convert_ids_to_tokens(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = tokenizer_p.convert_ids_to_tokens(lowerCamelCase__ )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(lowerCamelCase__ ,lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ ,lowerCamelCase__ )
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = self.rust_tokenizer_class.from_pretrained(lowerCamelCase__ ,**lowerCamelCase__ )
SCREAMING_SNAKE_CASE = self.tokenizer_class.from_pretrained(lowerCamelCase__ ,**lowerCamelCase__ )
SCREAMING_SNAKE_CASE = tokenizer_r.encode(lowerCamelCase__ ,add_special_tokens=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = tokenizer_p.encode(lowerCamelCase__ ,add_special_tokens=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = tokenizer_r.convert_ids_to_tokens(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = tokenizer_p.convert_ids_to_tokens(lowerCamelCase__ )
# it is expected that only the first Chinese character is not preceded by "##".
SCREAMING_SNAKE_CASE = [
F"""##{token}""" if idx != 0 else token for idx, token in enumerate(lowerCamelCase__ )
]
self.assertListEqual(lowerCamelCase__ ,lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ ,lowerCamelCase__ )
| 193 |
import json
import sys
def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Any:
'''simple docstring'''
with open(_SCREAMING_SNAKE_CASE , encoding="""utf-8""" ) as f:
SCREAMING_SNAKE_CASE = json.load(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = ["""<details>""", """<summary>Show updated benchmarks!</summary>""", """ """]
for benchmark_name in sorted(_SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE = results[benchmark_name]
SCREAMING_SNAKE_CASE = benchmark_name.split("""/""" )[-1]
output_md.append(F"""### Benchmark: {benchmark_file_name}""" )
SCREAMING_SNAKE_CASE = """| metric |"""
SCREAMING_SNAKE_CASE = """|--------|"""
SCREAMING_SNAKE_CASE = """| new / old (diff) |"""
for metric_name in sorted(_SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE = benchmark_res[metric_name]
SCREAMING_SNAKE_CASE = metric_vals["""new"""]
SCREAMING_SNAKE_CASE = metric_vals.get("""old""" , _SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = metric_vals.get("""diff""" , _SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = F""" {new_val:f}""" if isinstance(_SCREAMING_SNAKE_CASE , (int, float) ) else """None"""
if old_val is not None:
val_str += F""" / {old_val:f}""" if isinstance(_SCREAMING_SNAKE_CASE , (int, float) ) else "None"
if dif_val is not None:
val_str += F""" ({dif_val:f})""" if isinstance(_SCREAMING_SNAKE_CASE , (int, float) ) else "None"
title += " " + metric_name + " |"
lines += "---|"
value += val_str + " |"
output_md += [title, lines, value, " "]
output_md.append("""</details>""" )
with open(_SCREAMING_SNAKE_CASE , """w""" , encoding="""utf-8""" ) as f:
f.writelines("""\n""".join(_SCREAMING_SNAKE_CASE ) )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = sys.argv[1]
SCREAMING_SNAKE_CASE_ = sys.argv[2]
format_json_to_md(input_json_file, output_md_file)
| 193 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ : str = logging.get_logger(__name__)
UpperCAmelCase_ : List[str] = {
'vinvino02/glpn-kitti': 'https://huggingface.co/vinvino02/glpn-kitti/resolve/main/config.json',
# See all GLPN models at https://huggingface.co/models?filter=glpn
}
class SCREAMING_SNAKE_CASE__ ( lowercase__ ):
snake_case__ : Dict = '''glpn'''
def __init__( self : Tuple , SCREAMING_SNAKE_CASE__ : Dict=3 , SCREAMING_SNAKE_CASE__ : int=4 , SCREAMING_SNAKE_CASE__ : int=[2, 2, 2, 2] , SCREAMING_SNAKE_CASE__ : str=[8, 4, 2, 1] , SCREAMING_SNAKE_CASE__ : List[Any]=[3_2, 6_4, 1_6_0, 2_5_6] , SCREAMING_SNAKE_CASE__ : Optional[Any]=[7, 3, 3, 3] , SCREAMING_SNAKE_CASE__ : List[str]=[4, 2, 2, 2] , SCREAMING_SNAKE_CASE__ : Dict=[1, 2, 5, 8] , SCREAMING_SNAKE_CASE__ : List[str]=[4, 4, 4, 4] , SCREAMING_SNAKE_CASE__ : Union[str, Any]="gelu" , SCREAMING_SNAKE_CASE__ : int=0.0 , SCREAMING_SNAKE_CASE__ : List[str]=0.0 , SCREAMING_SNAKE_CASE__ : Tuple=0.02 , SCREAMING_SNAKE_CASE__ : str=0.1 , SCREAMING_SNAKE_CASE__ : str=1E-6 , SCREAMING_SNAKE_CASE__ : Optional[int]=6_4 , SCREAMING_SNAKE_CASE__ : str=1_0 , SCREAMING_SNAKE_CASE__ : Tuple=-1 , **SCREAMING_SNAKE_CASE__ : Union[str, Any] , ) -> int:
super().__init__(**SCREAMING_SNAKE_CASE__ )
a_ : str = num_channels
a_ : Tuple = num_encoder_blocks
a_ : Union[str, Any] = depths
a_ : Any = sr_ratios
a_ : Optional[Any] = hidden_sizes
a_ : Union[str, Any] = patch_sizes
a_ : List[str] = strides
a_ : List[Any] = mlp_ratios
a_ : Optional[int] = num_attention_heads
a_ : Optional[Any] = hidden_act
a_ : List[Any] = hidden_dropout_prob
a_ : Tuple = attention_probs_dropout_prob
a_ : str = initializer_range
a_ : Tuple = drop_path_rate
a_ : Dict = layer_norm_eps
a_ : Dict = decoder_hidden_size
a_ : int = max_depth
a_ : Union[str, Any] = head_in_index
| 32 |
import flax.linen as nn
import jax
import jax.numpy as jnp
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
snake_case__ : int
snake_case__ : jnp.dtype = jnp.floataa
def SCREAMING_SNAKE_CASE ( self : str ) -> int:
a_ : Dict = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self : Tuple , SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Optional[int]:
a_ , a_ , a_ , a_ : Union[str, Any] = hidden_states.shape
a_ : List[str] = jax.image.resize(
SCREAMING_SNAKE_CASE__ , shape=(batch, height * 2, width * 2, channels) , method='nearest' , )
a_ : Any = self.conv(SCREAMING_SNAKE_CASE__ )
return hidden_states
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
snake_case__ : int
snake_case__ : jnp.dtype = jnp.floataa
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[int]:
a_ : Optional[int] = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self : Any , SCREAMING_SNAKE_CASE__ : int ) -> Optional[int]:
# pad = ((0, 0), (0, 1), (0, 1), (0, 0)) # pad height and width dim
# hidden_states = jnp.pad(hidden_states, pad_width=pad)
a_ : str = self.conv(SCREAMING_SNAKE_CASE__ )
return hidden_states
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
snake_case__ : int
snake_case__ : int = None
snake_case__ : float = 0.0
snake_case__ : bool = None
snake_case__ : jnp.dtype = jnp.floataa
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Dict:
a_ : List[str] = self.in_channels if self.out_channels is None else self.out_channels
a_ : Optional[int] = nn.GroupNorm(num_groups=3_2 , epsilon=1E-5 )
a_ : Any = nn.Conv(
SCREAMING_SNAKE_CASE__ , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
a_ : Optional[int] = nn.Dense(SCREAMING_SNAKE_CASE__ , dtype=self.dtype )
a_ : Union[str, Any] = nn.GroupNorm(num_groups=3_2 , epsilon=1E-5 )
a_ : int = nn.Dropout(self.dropout_prob )
a_ : Optional[Any] = nn.Conv(
SCREAMING_SNAKE_CASE__ , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
a_ : List[str] = self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut
a_ : List[Any] = None
if use_nin_shortcut:
a_ : Union[str, Any] = nn.Conv(
SCREAMING_SNAKE_CASE__ , kernel_size=(1, 1) , strides=(1, 1) , padding='VALID' , dtype=self.dtype , )
def __call__( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[Any]=True ) -> int:
a_ : List[Any] = hidden_states
a_ : Any = self.norma(SCREAMING_SNAKE_CASE__ )
a_ : Any = nn.swish(SCREAMING_SNAKE_CASE__ )
a_ : Union[str, Any] = self.conva(SCREAMING_SNAKE_CASE__ )
a_ : int = self.time_emb_proj(nn.swish(SCREAMING_SNAKE_CASE__ ) )
a_ : List[str] = jnp.expand_dims(jnp.expand_dims(SCREAMING_SNAKE_CASE__ , 1 ) , 1 )
a_ : Optional[int] = hidden_states + temb
a_ : List[str] = self.norma(SCREAMING_SNAKE_CASE__ )
a_ : Tuple = nn.swish(SCREAMING_SNAKE_CASE__ )
a_ : Optional[int] = self.dropout(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
a_ : List[Any] = self.conva(SCREAMING_SNAKE_CASE__ )
if self.conv_shortcut is not None:
a_ : List[str] = self.conv_shortcut(SCREAMING_SNAKE_CASE__ )
return hidden_states + residual
| 32 | 1 |
import inspect
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
_lowerCamelCase : List[str] = '''src/transformers'''
# This is to make sure the transformers module imported is the one in the repo.
_lowerCamelCase : List[str] = direct_transformers_import(PATH_TO_TRANSFORMERS)
_lowerCamelCase : Union[str, Any] = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
_lowerCamelCase : Optional[int] = re.compile(r'''\[(.+?)\]\((https://huggingface\.co/.+?)\)''')
_lowerCamelCase : List[Any] = {
'''DecisionTransformerConfig''',
'''EncoderDecoderConfig''',
'''MusicgenConfig''',
'''RagConfig''',
'''SpeechEncoderDecoderConfig''',
'''TimmBackboneConfig''',
'''VisionEncoderDecoderConfig''',
'''VisionTextDualEncoderConfig''',
'''LlamaConfig''',
}
def _a ( SCREAMING_SNAKE_CASE__ : List[Any] ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple = None
# source code of `config_class`
SCREAMING_SNAKE_CASE__ : int = inspect.getsource(A_ )
SCREAMING_SNAKE_CASE__ : Dict = _re_checkpoint.findall(A_ )
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
for ckpt_name, ckpt_link in checkpoints:
# allow the link to end with `/`
if ckpt_link.endswith("/" ):
SCREAMING_SNAKE_CASE__ : Dict = ckpt_link[:-1]
# verify the checkpoint name corresponds to the checkpoint link
SCREAMING_SNAKE_CASE__ : Optional[Any] = f'''https://huggingface.co/{ckpt_name}'''
if ckpt_link == ckpt_link_from_name:
SCREAMING_SNAKE_CASE__ : Any = ckpt_name
break
return checkpoint
def _a ( ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple = []
for config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in config_class.__module__:
continue
SCREAMING_SNAKE_CASE__ : str = get_checkpoint_from_config_class(A_ )
SCREAMING_SNAKE_CASE__ : List[Any] = config_class.__name__
if checkpoint is None and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(A_ )
if len(A_ ) > 0:
SCREAMING_SNAKE_CASE__ : Tuple = '''\n'''.join(sorted(A_ ) )
raise ValueError(f'''The following configurations don\'t contain any valid checkpoint:\n{message}''' )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 358 |
def _a ( SCREAMING_SNAKE_CASE__ : str ) -> str:
'''simple docstring'''
if not all(char in "01" for char in bin_string ):
raise ValueError("Non-binary value was passed to the function" )
if not bin_string:
raise ValueError("Empty string was passed to the function" )
SCREAMING_SNAKE_CASE__ : List[Any] = ""
while len(SCREAMING_SNAKE_CASE__ ) % 3 != 0:
SCREAMING_SNAKE_CASE__ : str = "0" + bin_string
SCREAMING_SNAKE_CASE__ : List[Any] = [
bin_string[index : index + 3]
for index in range(len(SCREAMING_SNAKE_CASE__ ) )
if index % 3 == 0
]
for bin_group in bin_string_in_3_list:
SCREAMING_SNAKE_CASE__ : List[Any] = 0
for index, val in enumerate(SCREAMING_SNAKE_CASE__ ):
oct_val += int(2 ** (2 - index) * int(SCREAMING_SNAKE_CASE__ ) )
oct_string += str(SCREAMING_SNAKE_CASE__ )
return oct_string
if __name__ == "__main__":
from doctest import testmod
testmod()
| 191 | 0 |
import functools
from typing import Any
def __lowercase ( _UpperCamelCase, _UpperCamelCase ) ->bool:
"""simple docstring"""
if not isinstance(a__, a__ ) or len(a__ ) == 0:
raise ValueError('''the string should be not empty string''' )
if not isinstance(a__, a__ ) or not all(
isinstance(a__, a__ ) and len(a__ ) > 0 for item in words ):
raise ValueError('''the words should be a list of non-empty strings''' )
# Build trie
lowercase : Tuple = {}
lowercase : List[Any] = '''WORD_KEEPER'''
for word in words:
lowercase : List[Any] = trie
for c in word:
if c not in trie_node:
lowercase : Any = {}
lowercase : Optional[int] = trie_node[c]
lowercase : int = True
lowercase : List[str] = len(a__ )
# Dynamic programming method
@functools.cache
def is_breakable(_UpperCamelCase ) -> bool:
if index == len_string:
return True
lowercase : Optional[int] = trie
for i in range(a__, a__ ):
lowercase : Optional[Any] = trie_node.get(string[i], a__ )
if trie_node is None:
return False
if trie_node.get(a__, a__ ) and is_breakable(i + 1 ):
return True
return False
return is_breakable(0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 337 |
def __lowercase ( a__ ) -> bool:
return sum(i for i in range(1 , number // 2 + 1 ) if number % i == 0 ) == number
if __name__ == "__main__":
print('''Program to check whether a number is a Perfect number or not...''')
lowerCAmelCase__ : Optional[Any] =int(input('''Enter number: ''').strip())
print(F'''{number} is {'' if perfect(number) else 'not '}a Perfect Number.''')
| 257 | 0 |
"""simple docstring"""
def __UpperCAmelCase ( snake_case_ : Optional[Any] ) -> Any:
"""simple docstring"""
_lowerCAmelCase = []
_lowerCAmelCase = set({"""(""", """[""", """{"""} )
_lowerCAmelCase = set({""")""", """]""", """}"""} )
_lowerCAmelCase = {"""{""": """}""", """[""": """]""", """(""": """)"""}
for i in range(len(snake_case_ ) ):
if s[i] in open_brackets:
stack.append(s[i] )
elif s[i] in closed_brackets and (
len(snake_case_ ) == 0 or (len(snake_case_ ) > 0 and open_to_closed[stack.pop()] != s[i])
):
return False
return len(snake_case_ ) == 0
def __UpperCAmelCase ( ) -> str:
"""simple docstring"""
_lowerCAmelCase = input("""Enter sequence of brackets: """ )
if is_balanced(snake_case_ ):
print(snake_case_ , """is balanced""" )
else:
print(snake_case_ , """is not balanced""" )
if __name__ == "__main__":
main() | 317 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel
from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS,
CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __lowerCamelCase ( __lowercase , unittest.TestCase ):
__UpperCamelCase = DiTPipeline
__UpperCamelCase = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS
__UpperCamelCase = PipelineTesterMixin.required_optional_params - {
'latents',
'num_images_per_prompt',
'callback',
'callback_steps',
}
__UpperCamelCase = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS
__UpperCamelCase = False
def A__ (self ):
'''simple docstring'''
torch.manual_seed(0 )
_lowerCAmelCase = TransformeraDModel(
sample_size=16 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=lowerCamelCase , activation_fn="""gelu-approximate""" , num_embeds_ada_norm=1_000 , norm_type="""ada_norm_zero""" , norm_elementwise_affine=lowerCamelCase , )
_lowerCAmelCase = AutoencoderKL()
_lowerCAmelCase = DDIMScheduler()
_lowerCAmelCase = {"""transformer""": transformer.eval(), """vae""": vae.eval(), """scheduler""": scheduler}
return components
def A__ (self , lowerCamelCase , lowerCamelCase=0 ):
'''simple docstring'''
if str(lowerCamelCase ).startswith("""mps""" ):
_lowerCAmelCase = torch.manual_seed(lowerCamelCase )
else:
_lowerCAmelCase = torch.Generator(device=lowerCamelCase ).manual_seed(lowerCamelCase )
_lowerCAmelCase = {
"""class_labels""": [1],
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = """cpu"""
_lowerCAmelCase = self.get_dummy_components()
_lowerCAmelCase = self.pipeline_class(**lowerCamelCase )
pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
_lowerCAmelCase = self.get_dummy_inputs(lowerCamelCase )
_lowerCAmelCase = pipe(**lowerCamelCase ).images
_lowerCAmelCase = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 16, 16, 3) )
_lowerCAmelCase = np.array([0.2946, 0.6601, 0.4329, 0.3296, 0.4144, 0.5319, 0.7273, 0.5013, 0.4457] )
_lowerCAmelCase = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(lowerCamelCase , 1e-3 )
def A__ (self ):
'''simple docstring'''
self._test_inference_batch_single_identical(relax_max_difference=lowerCamelCase , expected_max_diff=1e-3 )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def A__ (self ):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
@require_torch_gpu
@slow
class __lowerCamelCase ( unittest.TestCase ):
def A__ (self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = torch.manual_seed(0 )
_lowerCAmelCase = DiTPipeline.from_pretrained("""facebook/DiT-XL-2-256""" )
pipe.to("""cuda""" )
_lowerCAmelCase = ["""vase""", """umbrella""", """white shark""", """white wolf"""]
_lowerCAmelCase = pipe.get_label_ids(lowerCamelCase )
_lowerCAmelCase = pipe(lowerCamelCase , generator=lowerCamelCase , num_inference_steps=40 , output_type="""np""" ).images
for word, image in zip(lowerCamelCase , lowerCamelCase ):
_lowerCAmelCase = load_numpy(
f"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy""" )
assert np.abs((expected_image - image).max() ) < 1e-2
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = DiTPipeline.from_pretrained("""facebook/DiT-XL-2-512""" )
_lowerCAmelCase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.to("""cuda""" )
_lowerCAmelCase = ["""vase""", """umbrella"""]
_lowerCAmelCase = pipe.get_label_ids(lowerCamelCase )
_lowerCAmelCase = torch.manual_seed(0 )
_lowerCAmelCase = pipe(lowerCamelCase , generator=lowerCamelCase , num_inference_steps=25 , output_type="""np""" ).images
for word, image in zip(lowerCamelCase , lowerCamelCase ):
_lowerCAmelCase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
f"""/dit/{word}_512.npy""" )
assert np.abs((expected_image - image).max() ) < 1e-1 | 317 | 1 |
"""simple docstring"""
from __future__ import annotations
from math import pi
# Define the Reduced Planck Constant ℏ (H bar), speed of light C, value of
# Pi and the function
__A = 1.0_5_4_5_7_1_8_1_7e-3_4 # unit of ℏ : J * s
__A = 3e8 # unit of c : m * s^-1
def UpperCamelCase__ ( lowercase__ : float , lowercase__ : float , lowercase__ : float ):
if (force, area, distance).count(0 ) != 1:
raise ValueError("One and only one argument must be 0" )
if force < 0:
raise ValueError("Magnitude of force can not be negative" )
if distance < 0:
raise ValueError("Distance can not be negative" )
if area < 0:
raise ValueError("Area can not be negative" )
if force == 0:
snake_case : Union[str, Any] = (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (
240 * (distance) ** 4
)
return {"force": force}
elif area == 0:
snake_case : List[str] = (240 * force * (distance) ** 4) / (
REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2
)
return {"area": area}
elif distance == 0:
snake_case : List[Any] = (
(REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (240 * force)
) ** (1 / 4)
return {"distance": distance}
raise ValueError("One and only one argument must be 0" )
# Run doctest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 148 |
"""simple docstring"""
import importlib
import inspect
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
__A = "src/transformers"
# This is to make sure the transformers module imported is the one in the repo.
__A = importlib.util.spec_from_file_location(
"transformers",
os.path.join(PATH_TO_TRANSFORMERS, "__init__.py"),
submodule_search_locations=[PATH_TO_TRANSFORMERS],
)
__A = spec.loader.load_module()
__A = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
__A = re.compile("\[(.+?)\]\((https://huggingface\.co/.+?)\)")
__A = {
"CLIPConfigMixin",
"DecisionTransformerConfigMixin",
"EncoderDecoderConfigMixin",
"RagConfigMixin",
"SpeechEncoderDecoderConfigMixin",
"VisionEncoderDecoderConfigMixin",
"VisionTextDualEncoderConfigMixin",
}
def UpperCamelCase__ ( ):
snake_case : Dict = []
for config_class in list(CONFIG_MAPPING.values() ):
snake_case : Tuple = False
# source code of `config_class`
snake_case : Tuple = inspect.getsource(lowercase__ )
snake_case : Optional[int] = _re_checkpoint.findall(lowercase__ )
for checkpoint in checkpoints:
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
snake_case , snake_case : str = checkpoint
# verify the checkpoint name corresponds to the checkpoint link
snake_case : Optional[int] = F'''https://huggingface.co/{ckpt_name}'''
if ckpt_link == ckpt_link_from_name:
snake_case : Any = True
break
snake_case : Optional[Any] = config_class.__name__
if not checkpoint_found and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(lowercase__ )
if len(lowercase__ ) > 0:
snake_case : Optional[Any] = "\n".join(sorted(lowercase__ ) )
raise ValueError(F'''The following configurations don\'t contain any valid checkpoint:\n{message}''' )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 148 | 1 |
'''simple docstring'''
import itertools
import json
import os
import unittest
from transformers import AddedToken, RobertaTokenizer, RobertaTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __magic_name__ ( _UpperCAmelCase, unittest.TestCase):
UpperCamelCase__ = RobertaTokenizer
UpperCamelCase__ = RobertaTokenizerFast
UpperCamelCase__ = True
UpperCamelCase__ = {'''cls_token''': '''<s>'''}
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowercase_ : List[str] = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
lowercase_ : Tuple = dict(zip(lowercase_ , range(len(lowercase_ ) ) ) )
lowercase_ : int = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
lowercase_ : Dict = {"""unk_token""": """<unk>"""}
lowercase_ : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
lowercase_ : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(lowercase_ ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(lowercase_ ) )
def SCREAMING_SNAKE_CASE_ ( self : str , **lowercase_ : int ):
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , **lowercase_ : int ):
kwargs.update(self.special_tokens_map )
return RobertaTokenizerFast.from_pretrained(self.tmpdirname , **lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : str , lowercase_ : Optional[Any] ):
lowercase_ : Dict = """lower newer"""
lowercase_ : int = """lower newer"""
return input_text, output_text
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
lowercase_ : Dict = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
lowercase_ : Tuple = """lower newer"""
lowercase_ : Dict = ["""l""", """o""", """w""", """er""", """\u0120""", """n""", """e""", """w""", """er"""]
lowercase_ : List[str] = tokenizer.tokenize(lowercase_ ) # , add_prefix_space=True)
self.assertListEqual(lowercase_ , lowercase_ )
lowercase_ : Optional[int] = tokens + [tokenizer.unk_token]
lowercase_ : int = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase_ ) , lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
lowercase_ : List[str] = self.get_tokenizer()
self.assertListEqual(tokenizer.encode("""Hello world!""" , add_special_tokens=lowercase_ ) , [0, 31414, 232, 328, 2] )
self.assertListEqual(
tokenizer.encode("""Hello world! cécé herlolip 418""" , add_special_tokens=lowercase_ ) , [0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2] , )
@slow
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
lowercase_ : List[Any] = self.tokenizer_class.from_pretrained("""roberta-base""" )
lowercase_ : Any = tokenizer.encode("""sequence builders""" , add_special_tokens=lowercase_ )
lowercase_ : Tuple = tokenizer.encode("""multi-sequence build""" , add_special_tokens=lowercase_ )
lowercase_ : Dict = tokenizer.encode(
"""sequence builders""" , add_special_tokens=lowercase_ , add_prefix_space=lowercase_ )
lowercase_ : Optional[Any] = tokenizer.encode(
"""sequence builders""" , """multi-sequence build""" , add_special_tokens=lowercase_ , add_prefix_space=lowercase_ )
lowercase_ : List[str] = tokenizer.build_inputs_with_special_tokens(lowercase_ )
lowercase_ : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(lowercase_ , lowercase_ )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
lowercase_ : List[str] = self.get_tokenizer()
lowercase_ : Tuple = """Encode this sequence."""
lowercase_ : str = tokenizer.byte_encoder[""" """.encode("""utf-8""" )[0]]
# Testing encoder arguments
lowercase_ : str = tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ , add_prefix_space=lowercase_ )
lowercase_ : Optional[int] = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(lowercase_ , lowercase_ )
lowercase_ : List[str] = tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ , add_prefix_space=lowercase_ )
lowercase_ : Dict = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(lowercase_ , lowercase_ )
tokenizer.add_special_tokens({"""bos_token""": """<s>"""} )
lowercase_ : Dict = tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ )
lowercase_ : Tuple = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(lowercase_ , lowercase_ )
# Testing spaces after special tokens
lowercase_ : Dict = """<mask>"""
tokenizer.add_special_tokens(
{"""mask_token""": AddedToken(lowercase_ , lstrip=lowercase_ , rstrip=lowercase_ )} ) # mask token has a left space
lowercase_ : Dict = tokenizer.convert_tokens_to_ids(lowercase_ )
lowercase_ : Dict = """Encode <mask> sequence"""
lowercase_ : int = """Encode <mask>sequence"""
lowercase_ : Optional[Any] = tokenizer.encode(lowercase_ )
lowercase_ : List[str] = encoded.index(lowercase_ )
lowercase_ : List[str] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(lowercase_ , lowercase_ )
lowercase_ : List[Any] = tokenizer.encode(lowercase_ )
lowercase_ : Optional[Any] = encoded.index(lowercase_ )
lowercase_ : Union[str, Any] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(lowercase_ , lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
pass
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
lowercase_ : List[str] = self.rust_tokenizer_class.from_pretrained(lowercase_ , **lowercase_ )
lowercase_ : Union[str, Any] = self.tokenizer_class.from_pretrained(lowercase_ , **lowercase_ )
lowercase_ : Tuple = """A, <mask> AllenNLP sentence."""
lowercase_ : str = tokenizer_r.encode_plus(lowercase_ , add_special_tokens=lowercase_ , return_token_type_ids=lowercase_ )
lowercase_ : Union[str, Any] = tokenizer_p.encode_plus(lowercase_ , add_special_tokens=lowercase_ , return_token_type_ids=lowercase_ )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r["""token_type_ids"""] ) , sum(tokens_p["""token_type_ids"""] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r["""attention_mask"""] ) / len(tokens_r["""attention_mask"""] ) , sum(tokens_p["""attention_mask"""] ) / len(tokens_p["""attention_mask"""] ) , )
lowercase_ : List[str] = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""] )
lowercase_ : Dict = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p["""input_ids"""] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r["""input_ids"""] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(
lowercase_ , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
self.assertSequenceEqual(
lowercase_ , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
lowercase_ : Dict = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=lowercase_ , add_prefix_space=lowercase_ , trim_offsets=lowercase_ )
lowercase_ : Optional[Any] = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
lowercase_ : Optional[int] = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state["""add_prefix_space"""] , lowercase_ )
self.assertEqual(post_processor_state["""add_prefix_space"""] , lowercase_ )
self.assertEqual(post_processor_state["""trim_offsets"""] , lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and
# `trim_offsets`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
lowercase_ : Union[str, Any] = """hello""" # `hello` is a token in the vocabulary of `pretrained_name`
lowercase_ : List[str] = f'''{text_of_1_token} {text_of_1_token}'''
lowercase_ : Optional[int] = self.rust_tokenizer_class.from_pretrained(
lowercase_ , use_fast=lowercase_ , add_prefix_space=lowercase_ , trim_offsets=lowercase_ )
lowercase_ : List[Any] = tokenizer_r(lowercase_ , return_offsets_mapping=lowercase_ , add_special_tokens=lowercase_ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowercase_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowercase_ ) + 1, len(lowercase_ ) + 1 + len(lowercase_ )) , )
lowercase_ : Tuple = self.rust_tokenizer_class.from_pretrained(
lowercase_ , use_fast=lowercase_ , add_prefix_space=lowercase_ , trim_offsets=lowercase_ )
lowercase_ : Optional[Any] = tokenizer_r(lowercase_ , return_offsets_mapping=lowercase_ , add_special_tokens=lowercase_ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowercase_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowercase_ ) + 1, len(lowercase_ ) + 1 + len(lowercase_ )) , )
lowercase_ : int = self.rust_tokenizer_class.from_pretrained(
lowercase_ , use_fast=lowercase_ , add_prefix_space=lowercase_ , trim_offsets=lowercase_ )
lowercase_ : int = tokenizer_r(lowercase_ , return_offsets_mapping=lowercase_ , add_special_tokens=lowercase_ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowercase_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowercase_ ), len(lowercase_ ) + 1 + len(lowercase_ )) , )
lowercase_ : List[str] = self.rust_tokenizer_class.from_pretrained(
lowercase_ , use_fast=lowercase_ , add_prefix_space=lowercase_ , trim_offsets=lowercase_ )
lowercase_ : Union[str, Any] = tokenizer_r(lowercase_ , return_offsets_mapping=lowercase_ , add_special_tokens=lowercase_ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowercase_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowercase_ ), len(lowercase_ ) + 1 + len(lowercase_ )) , )
lowercase_ : Union[str, Any] = f''' {text}'''
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
lowercase_ : Optional[Any] = self.rust_tokenizer_class.from_pretrained(
lowercase_ , use_fast=lowercase_ , add_prefix_space=lowercase_ , trim_offsets=lowercase_ )
lowercase_ : List[str] = tokenizer_r(lowercase_ , return_offsets_mapping=lowercase_ , add_special_tokens=lowercase_ )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(lowercase_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(lowercase_ ) + 1, 1 + len(lowercase_ ) + 1 + len(lowercase_ )) , )
lowercase_ : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(
lowercase_ , use_fast=lowercase_ , add_prefix_space=lowercase_ , trim_offsets=lowercase_ )
lowercase_ : List[str] = tokenizer_r(lowercase_ , return_offsets_mapping=lowercase_ , add_special_tokens=lowercase_ )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(lowercase_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(lowercase_ ), 1 + len(lowercase_ ) + 1 + len(lowercase_ )) , )
lowercase_ : Any = self.rust_tokenizer_class.from_pretrained(
lowercase_ , use_fast=lowercase_ , add_prefix_space=lowercase_ , trim_offsets=lowercase_ )
lowercase_ : Any = tokenizer_r(lowercase_ , return_offsets_mapping=lowercase_ , add_special_tokens=lowercase_ )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(lowercase_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(lowercase_ ), 1 + len(lowercase_ ) + 1 + len(lowercase_ )) , )
| 358 | '''simple docstring'''
class __magic_name__ :
def __init__( self : int , lowercase_ : list ):
lowercase_ : Dict = set_counts
lowercase_ : List[Any] = max(lowercase_ )
lowercase_ : str = len(lowercase_ )
lowercase_ : str = [1] * num_sets
lowercase_ : Dict = list(range(lowercase_ ) )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , lowercase_ : int , lowercase_ : int ):
lowercase_ : List[Any] = self.get_parent(lowercase_ )
lowercase_ : Union[str, Any] = self.get_parent(lowercase_ )
if src_parent == dst_parent:
return False
if self.ranks[dst_parent] >= self.ranks[src_parent]:
self.set_counts[dst_parent] += self.set_counts[src_parent]
lowercase_ : List[str] = 0
lowercase_ : Optional[int] = dst_parent
if self.ranks[dst_parent] == self.ranks[src_parent]:
self.ranks[dst_parent] += 1
lowercase_ : int = self.set_counts[dst_parent]
else:
self.set_counts[src_parent] += self.set_counts[dst_parent]
lowercase_ : int = 0
lowercase_ : List[Any] = src_parent
lowercase_ : List[Any] = self.set_counts[src_parent]
lowercase_ : Tuple = max(self.max_set , lowercase_ )
return True
def SCREAMING_SNAKE_CASE_ ( self : Dict , lowercase_ : int ):
if self.parents[disj_set] == disj_set:
return disj_set
lowercase_ : int = self.get_parent(self.parents[disj_set] )
return self.parents[disj_set]
| 21 | 0 |
"""simple docstring"""
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetrImageProcessor
class lowerCamelCase__ ( unittest.TestCase ):
def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=7 , SCREAMING_SNAKE_CASE=3 , SCREAMING_SNAKE_CASE=30 , SCREAMING_SNAKE_CASE=400 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=1 / 255 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , SCREAMING_SNAKE_CASE=True , ):
"""simple docstring"""
snake_case : Optional[Any] = size if size is not None else {'''shortest_edge''': 18, '''longest_edge''': 1_333}
snake_case : Any = parent
snake_case : Dict = batch_size
snake_case : List[str] = num_channels
snake_case : Optional[int] = min_resolution
snake_case : Any = max_resolution
snake_case : Tuple = do_resize
snake_case : Optional[int] = size
snake_case : List[Any] = do_rescale
snake_case : List[str] = rescale_factor
snake_case : Any = do_normalize
snake_case : Union[str, Any] = image_mean
snake_case : List[Any] = image_std
snake_case : List[Any] = do_pad
def lowerCamelCase_ ( self ):
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_pad": self.do_pad,
}
def lowerCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False ):
"""simple docstring"""
if not batched:
snake_case : List[Any] = image_inputs[0]
if isinstance(_SCREAMING_SNAKE_CASE , Image.Image ):
snake_case : List[str] = image.size
else:
snake_case : str = image.shape[1], image.shape[2]
if w < h:
snake_case : Tuple = int(self.size["shortest_edge"] * h / w )
snake_case : List[Any] = self.size['''shortest_edge''']
elif w > h:
snake_case : Tuple = self.size['''shortest_edge''']
snake_case : Union[str, Any] = int(self.size["shortest_edge"] * w / h )
else:
snake_case : List[Any] = self.size['''shortest_edge''']
snake_case : str = self.size['''shortest_edge''']
else:
snake_case : Tuple = []
for image in image_inputs:
snake_case : Tuple = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
snake_case : str = max(_SCREAMING_SNAKE_CASE , key=lambda SCREAMING_SNAKE_CASE : item[0] )[0]
snake_case : Optional[Any] = max(_SCREAMING_SNAKE_CASE , key=lambda SCREAMING_SNAKE_CASE : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class lowerCamelCase__ ( lowerCamelCase_ , unittest.TestCase ):
a__ : Dict = DetrImageProcessor if is_vision_available() else None
def lowerCamelCase_ ( self ):
"""simple docstring"""
snake_case : List[Any] = DetrImageProcessingTester(self )
@property
def lowerCamelCase_ ( self ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCamelCase_ ( self ):
"""simple docstring"""
snake_case : List[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , "image_mean" ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , "image_std" ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , "do_normalize" ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , "do_rescale" ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , "rescale_factor" ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , "do_resize" ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , "size" ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , "do_pad" ) )
def lowerCamelCase_ ( self ):
"""simple docstring"""
snake_case : int = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 18, "longest_edge": 1_333} )
self.assertEqual(image_processor.do_pad , _SCREAMING_SNAKE_CASE )
snake_case : Dict = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=_SCREAMING_SNAKE_CASE )
self.assertEqual(image_processor.size , {"shortest_edge": 42, "longest_edge": 84} )
self.assertEqual(image_processor.do_pad , _SCREAMING_SNAKE_CASE )
def lowerCamelCase_ ( self ):
"""simple docstring"""
pass
def lowerCamelCase_ ( self ):
"""simple docstring"""
snake_case : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , Image.Image )
# Test not batched input
snake_case : Optional[int] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
snake_case : Any = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case : Any = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE )
snake_case : str = image_processing(_SCREAMING_SNAKE_CASE , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCamelCase_ ( self ):
"""simple docstring"""
snake_case : int = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE , numpify=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , np.ndarray )
# Test not batched input
snake_case : Tuple = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
snake_case : List[Any] = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case : List[str] = image_processing(_SCREAMING_SNAKE_CASE , return_tensors="pt" ).pixel_values
snake_case : Optional[Any] = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCamelCase_ ( self ):
"""simple docstring"""
snake_case : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE , torchify=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , torch.Tensor )
# Test not batched input
snake_case : Any = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
snake_case : Optional[Any] = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case : Union[str, Any] = image_processing(_SCREAMING_SNAKE_CASE , return_tensors="pt" ).pixel_values
snake_case : Dict = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def lowerCamelCase_ ( self ):
"""simple docstring"""
snake_case : Optional[int] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r" ) as f:
snake_case : List[Any] = json.loads(f.read() )
snake_case : Any = {'''image_id''': 39_769, '''annotations''': target}
# encode them
snake_case : Optional[int] = DetrImageProcessor.from_pretrained("facebook/detr-resnet-50" )
snake_case : Optional[int] = image_processing(images=_SCREAMING_SNAKE_CASE , annotations=_SCREAMING_SNAKE_CASE , return_tensors="pt" )
# verify pixel values
snake_case : List[str] = torch.Size([1, 3, 800, 1_066] )
self.assertEqual(encoding["pixel_values"].shape , _SCREAMING_SNAKE_CASE )
snake_case : Union[str, Any] = torch.tensor([0.27_96, 0.31_38, 0.34_81] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 ) )
# verify area
snake_case : Any = torch.tensor([5_887.9_600, 11_250.2_061, 489_353.8_438, 837_122.7_500, 147_967.5_156, 165_732.3_438] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , _SCREAMING_SNAKE_CASE ) )
# verify boxes
snake_case : Optional[int] = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , _SCREAMING_SNAKE_CASE )
snake_case : Optional[int] = torch.tensor([0.55_03, 0.27_65, 0.06_04, 0.22_15] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , _SCREAMING_SNAKE_CASE , atol=1E-3 ) )
# verify image_id
snake_case : Optional[int] = torch.tensor([39_769] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , _SCREAMING_SNAKE_CASE ) )
# verify is_crowd
snake_case : Union[str, Any] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , _SCREAMING_SNAKE_CASE ) )
# verify class_labels
snake_case : int = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , _SCREAMING_SNAKE_CASE ) )
# verify orig_size
snake_case : Union[str, Any] = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , _SCREAMING_SNAKE_CASE ) )
# verify size
snake_case : List[Any] = torch.tensor([800, 1_066] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , _SCREAMING_SNAKE_CASE ) )
@slow
def lowerCamelCase_ ( self ):
"""simple docstring"""
snake_case : Dict = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r" ) as f:
snake_case : Union[str, Any] = json.loads(f.read() )
snake_case : Optional[int] = {'''file_name''': '''000000039769.png''', '''image_id''': 39_769, '''segments_info''': target}
snake_case : List[str] = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" )
# encode them
snake_case : Union[str, Any] = DetrImageProcessor.from_pretrained("facebook/detr-resnet-50-panoptic" )
snake_case : List[Any] = image_processing(images=_SCREAMING_SNAKE_CASE , annotations=_SCREAMING_SNAKE_CASE , masks_path=_SCREAMING_SNAKE_CASE , return_tensors="pt" )
# verify pixel values
snake_case : List[Any] = torch.Size([1, 3, 800, 1_066] )
self.assertEqual(encoding["pixel_values"].shape , _SCREAMING_SNAKE_CASE )
snake_case : int = torch.tensor([0.27_96, 0.31_38, 0.34_81] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 ) )
# verify area
snake_case : int = torch.tensor([147_979.6_875, 165_527.0_469, 484_638.5_938, 11_292.9_375, 5_879.6_562, 7_634.1_147] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , _SCREAMING_SNAKE_CASE ) )
# verify boxes
snake_case : int = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , _SCREAMING_SNAKE_CASE )
snake_case : List[str] = torch.tensor([0.26_25, 0.54_37, 0.46_88, 0.86_25] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , _SCREAMING_SNAKE_CASE , atol=1E-3 ) )
# verify image_id
snake_case : Optional[Any] = torch.tensor([39_769] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , _SCREAMING_SNAKE_CASE ) )
# verify is_crowd
snake_case : str = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , _SCREAMING_SNAKE_CASE ) )
# verify class_labels
snake_case : Optional[Any] = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , _SCREAMING_SNAKE_CASE ) )
# verify masks
snake_case : Optional[Any] = 822_873
self.assertEqual(encoding["labels"][0]["masks"].sum().item() , _SCREAMING_SNAKE_CASE )
# verify orig_size
snake_case : Tuple = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , _SCREAMING_SNAKE_CASE ) )
# verify size
snake_case : List[Any] = torch.tensor([800, 1_066] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , _SCREAMING_SNAKE_CASE ) )
| 148 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
UpperCamelCase = {
"""configuration_mobilevit""": ["""MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MobileViTConfig""", """MobileViTOnnxConfig"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ["""MobileViTFeatureExtractor"""]
UpperCamelCase = ["""MobileViTImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"""MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MobileViTForImageClassification""",
"""MobileViTForSemanticSegmentation""",
"""MobileViTModel""",
"""MobileViTPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"""TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFMobileViTForImageClassification""",
"""TFMobileViTForSemanticSegmentation""",
"""TFMobileViTModel""",
"""TFMobileViTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mobilevit import MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileViTConfig, MobileViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilevit import MobileViTFeatureExtractor
from .image_processing_mobilevit import MobileViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilevit import (
MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTModel,
MobileViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilevit import (
TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileViTForImageClassification,
TFMobileViTForSemanticSegmentation,
TFMobileViTModel,
TFMobileViTPreTrainedModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 186 | 0 |
"""simple docstring"""
from functools import reduce
_A = (
'73167176531330624919225119674426574742355349194934'
'96983520312774506326239578318016984801869478851843'
'85861560789112949495459501737958331952853208805511'
'12540698747158523863050715693290963295227443043557'
'66896648950445244523161731856403098711121722383113'
'62229893423380308135336276614282806444486645238749'
'30358907296290491560440772390713810515859307960866'
'70172427121883998797908792274921901699720888093776'
'65727333001053367881220235421809751254540594752243'
'52584907711670556013604839586446706324415722155397'
'53697817977846174064955149290862569321978468622482'
'83972241375657056057490261407972968652414535100474'
'82166370484403199890008895243450658541227588666881'
'16427171479924442928230863465674813919123162824586'
'17866458359124566529476545682848912883142607690042'
'24219022671055626321111109370544217506941658960408'
'07198403850962455444362981230987879927244284909188'
'84580156166097919133875499200524063689912560717606'
'05886116467109405077541002256983155200055935729725'
'71636269561882670428252483600823257530420752963450'
)
def UpperCAmelCase ( a_ = N ):
'''simple docstring'''
return max(
# mypy cannot properly interpret reduce
int(reduce(lambda a_, a_ : str(int(a_ ) * int(a_ ) ), n[i : i + 13] ) )
for i in range(len(a_ ) - 12 ) )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 205 |
"""simple docstring"""
def UpperCAmelCase ( a_ = 1000 ):
'''simple docstring'''
lowerCamelCase : Dict = 2**power
lowerCamelCase : List[str] = str(a_ )
lowerCamelCase : Dict = list(a_ )
lowerCamelCase : Optional[Any] = 0
for i in list_num:
sum_of_num += int(a_ )
return sum_of_num
if __name__ == "__main__":
_A = int(input('Enter the power of 2: ').strip())
print('2 ^ ', power, ' = ', 2**power)
_A = solution(power)
print('Sum of the digits is: ', result)
| 205 | 1 |
'''simple docstring'''
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
from .record_evaluation import evaluate as evaluate_record
_lowerCamelCase : int = '\\n@article{wang2019superglue,\n title={SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems},\n author={Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R},\n journal={arXiv preprint arXiv:1905.00537},\n year={2019}\n}\n'
_lowerCamelCase : Tuple = '\\nSuperGLUE (https://super.gluebenchmark.com/) is a new benchmark styled after\nGLUE with a new set of more difficult language understanding tasks, improved\nresources, and a new public leaderboard.\n'
_lowerCamelCase : Optional[int] = '\nCompute SuperGLUE evaluation metric associated to each SuperGLUE dataset.\nArgs:\n predictions: list of predictions to score. Depending on the SuperGlUE subset:\n - for \'record\': list of question-answer dictionaries with the following keys:\n - \'idx\': index of the question as specified by the dataset\n - \'prediction_text\': the predicted answer text\n - for \'multirc\': list of question-answer dictionaries with the following keys:\n - \'idx\': index of the question-answer pair as specified by the dataset\n - \'prediction\': the predicted answer label\n - otherwise: list of predicted labels\n references: list of reference labels. Depending on the SuperGLUE subset:\n - for \'record\': list of question-answers dictionaries with the following keys:\n - \'idx\': index of the question as specified by the dataset\n - \'answers\': list of possible answers\n - otherwise: list of reference labels\nReturns: depending on the SuperGLUE subset:\n - for \'record\':\n - \'exact_match\': Exact match between answer and gold answer\n - \'f1\': F1 score\n - for \'multirc\':\n - \'exact_match\': Exact match between answer and gold answer\n - \'f1_m\': Per-question macro-F1 score\n - \'f1_a\': Average F1 score over all answers\n - for \'axb\':\n \'matthews_correlation\': Matthew Correlation\n - for \'cb\':\n - \'accuracy\': Accuracy\n - \'f1\': F1 score\n - for all others:\n - \'accuracy\': Accuracy\nExamples:\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'copa\') # any of ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0}\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'cb\')\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0, \'f1\': 1.0}\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'record\')\n >>> predictions = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'prediction_text\': \'answer\'}]\n >>> references = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'answers\': [\'answer\', \'another_answer\']}]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'exact_match\': 1.0, \'f1\': 1.0}\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'multirc\')\n >>> predictions = [{\'idx\': {\'answer\': 0, \'paragraph\': 0, \'question\': 0}, \'prediction\': 0}, {\'idx\': {\'answer\': 1, \'paragraph\': 2, \'question\': 3}, \'prediction\': 1}]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'exact_match\': 1.0, \'f1_m\': 1.0, \'f1_a\': 1.0}\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'axb\')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'matthews_correlation\': 1.0}\n'
def __a ( UpperCAmelCase , UpperCAmelCase ) ->List[str]:
"""simple docstring"""
return float((preds == labels).mean() )
def __a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase="binary" ) ->Any:
"""simple docstring"""
A = simple_accuracy(UpperCAmelCase , UpperCAmelCase )
A = float(fa_score(y_true=UpperCAmelCase , y_pred=UpperCAmelCase , average=UpperCAmelCase ) )
return {
"accuracy": acc,
"f1": fa,
}
def __a ( UpperCAmelCase , UpperCAmelCase ) ->int:
"""simple docstring"""
A = {}
for id_pred, label in zip(UpperCAmelCase , UpperCAmelCase ):
A = f"""{id_pred["idx"]["paragraph"]}-{id_pred["idx"]["question"]}"""
A = id_pred["""prediction"""]
if question_id in question_map:
question_map[question_id].append((pred, label) )
else:
A = [(pred, label)]
A , A = [], []
for question, preds_labels in question_map.items():
A , A = zip(*UpperCAmelCase )
A = fa_score(y_true=UpperCAmelCase , y_pred=UpperCAmelCase , average="""macro""" )
fas.append(UpperCAmelCase )
A = int(sum(pred == label for pred, label in preds_labels ) == len(UpperCAmelCase ) )
ems.append(UpperCAmelCase )
A = float(sum(UpperCAmelCase ) / len(UpperCAmelCase ) )
A = sum(UpperCAmelCase ) / len(UpperCAmelCase )
A = float(fa_score(y_true=UpperCAmelCase , y_pred=[id_pred["""prediction"""] for id_pred in ids_preds] ) )
return {"exact_match": em, "f1_m": fa_m, "f1_a": fa_a}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __UpperCAmelCase ( datasets.Metric ):
'''simple docstring'''
def A (self : List[Any] ):
if self.config_name not in [
"boolq",
"cb",
"copa",
"multirc",
"record",
"rte",
"wic",
"wsc",
"wsc.fixed",
"axb",
"axg",
]:
raise KeyError(
"""You should supply a configuration name selected in """
"""[\"boolq\", \"cb\", \"copa\", \"multirc\", \"record\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"axb\", \"axg\",]""" )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , codebase_urls=[] , reference_urls=[] , format="""numpy""" if not self.config_name == """record""" and not self.config_name == """multirc""" else None , )
def A (self : str ):
if self.config_name == "record":
return {
"predictions": {
"idx": {
"passage": datasets.Value("""int64""" ),
"query": datasets.Value("""int64""" ),
},
"prediction_text": datasets.Value("""string""" ),
},
"references": {
"idx": {
"passage": datasets.Value("""int64""" ),
"query": datasets.Value("""int64""" ),
},
"answers": datasets.Sequence(datasets.Value("""string""" ) ),
},
}
elif self.config_name == "multirc":
return {
"predictions": {
"idx": {
"answer": datasets.Value("""int64""" ),
"paragraph": datasets.Value("""int64""" ),
"question": datasets.Value("""int64""" ),
},
"prediction": datasets.Value("""int64""" ),
},
"references": datasets.Value("""int64""" ),
}
else:
return {
"predictions": datasets.Value("""int64""" ),
"references": datasets.Value("""int64""" ),
}
def A (self : Union[str, Any] , _lowerCAmelCase : Dict , _lowerCAmelCase : Any ):
if self.config_name == "axb":
return {"matthews_correlation": matthews_corrcoef(_lowerCAmelCase , _lowerCAmelCase )}
elif self.config_name == "cb":
return acc_and_fa(_lowerCAmelCase , _lowerCAmelCase , fa_avg="""macro""" )
elif self.config_name == "record":
A = [
{
"""qas""": [
{"""id""": ref["""idx"""]["""query"""], """answers""": [{"""text""": ans} for ans in ref["""answers"""]]}
for ref in references
]
}
]
A = {pred["""idx"""]["""query"""]: pred["""prediction_text"""] for pred in predictions}
return evaluate_record(_lowerCAmelCase , _lowerCAmelCase )[0]
elif self.config_name == "multirc":
return evaluate_multirc(_lowerCAmelCase , _lowerCAmelCase )
elif self.config_name in ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]:
return {"accuracy": simple_accuracy(_lowerCAmelCase , _lowerCAmelCase )}
else:
raise KeyError(
"""You should supply a configuration name selected in """
"""[\"boolq\", \"cb\", \"copa\", \"multirc\", \"record\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"axb\", \"axg\",]""" )
| 258 |
'''simple docstring'''
import time
from dataclasses import dataclass
from multiprocessing import Pool
from unittest import TestCase
from unittest.mock import patch
import multiprocess
import numpy as np
import pytest
from datasets.utils.py_utils import (
NestedDataStructure,
asdict,
iflatmap_unordered,
map_nested,
temp_seed,
temporary_assignment,
zip_dict,
)
from .utils import require_tf, require_torch
def __a ( UpperCAmelCase ) ->Tuple: # picklable for multiprocessing
"""simple docstring"""
return x.sum()
def __a ( UpperCAmelCase ) ->int: # picklable for multiprocessing
"""simple docstring"""
return i + 1
@dataclass
class __UpperCAmelCase :
'''simple docstring'''
__lowerCAmelCase = 42
__lowerCAmelCase = 42
class __UpperCAmelCase ( A__ ):
'''simple docstring'''
def A (self : Tuple ):
A = {}
A = []
A = 1
A = [1, 2]
A = {"""a""": 1, """b""": 2}
A = {"""a""": [1, 2], """b""": [3, 4]}
A = {"""a""": {"""1""": 1}, """b""": 2}
A = {"""a""": 1, """b""": 2, """c""": 3, """d""": 4}
A = {}
A = []
A = 2
A = [2, 3]
A = {"""a""": 2, """b""": 3}
A = {"""a""": [2, 3], """b""": [4, 5]}
A = {"""a""": {"""1""": 2}, """b""": 3}
A = {"""a""": 2, """b""": 3, """c""": 4, """d""": 5}
self.assertEqual(map_nested(_lowerCAmelCase , _lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(map_nested(_lowerCAmelCase , _lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(map_nested(_lowerCAmelCase , _lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(map_nested(_lowerCAmelCase , _lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(map_nested(_lowerCAmelCase , _lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(map_nested(_lowerCAmelCase , _lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(map_nested(_lowerCAmelCase , _lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(map_nested(_lowerCAmelCase , _lowerCAmelCase ) , _lowerCAmelCase )
A = 2
self.assertEqual(map_nested(_lowerCAmelCase , _lowerCAmelCase , num_proc=_lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(map_nested(_lowerCAmelCase , _lowerCAmelCase , num_proc=_lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(map_nested(_lowerCAmelCase , _lowerCAmelCase , num_proc=_lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(map_nested(_lowerCAmelCase , _lowerCAmelCase , num_proc=_lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(map_nested(_lowerCAmelCase , _lowerCAmelCase , num_proc=_lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(map_nested(_lowerCAmelCase , _lowerCAmelCase , num_proc=_lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(map_nested(_lowerCAmelCase , _lowerCAmelCase , num_proc=_lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(map_nested(_lowerCAmelCase , _lowerCAmelCase , num_proc=_lowerCAmelCase ) , _lowerCAmelCase )
A = {"""a""": np.eye(2 ), """b""": np.zeros(3 ), """c""": np.ones(2 )}
A = {"""a""": 2, """b""": 0, """c""": 2}
A = {
"""a""": np.eye(2 ).astype(_lowerCAmelCase ),
"""b""": np.zeros(3 ).astype(_lowerCAmelCase ),
"""c""": np.ones(2 ).astype(_lowerCAmelCase ),
}
self.assertEqual(map_nested(_lowerCAmelCase , _lowerCAmelCase , map_numpy=_lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(_lowerCAmelCase , _lowerCAmelCase , map_numpy=_lowerCAmelCase ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
self.assertEqual(map_nested(_lowerCAmelCase , _lowerCAmelCase , map_numpy=_lowerCAmelCase , num_proc=_lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(_lowerCAmelCase , _lowerCAmelCase , map_numpy=_lowerCAmelCase , num_proc=_lowerCAmelCase ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
with self.assertRaises(_lowerCAmelCase ): # can't pickle a local lambda
map_nested(lambda _lowerCAmelCase : x + 1 , _lowerCAmelCase , num_proc=_lowerCAmelCase )
def A (self : List[Any] ):
A = {"""a""": 1, """b""": 2}
A = {"""a""": 3, """b""": 4}
A = {"""a""": 5, """b""": 6}
A = sorted([("""a""", (1, 3, 5)), ("""b""", (2, 4, 6))] )
self.assertEqual(sorted(zip_dict(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) ) , _lowerCAmelCase )
def A (self : Union[str, Any] ):
class __UpperCAmelCase :
'''simple docstring'''
__lowerCAmelCase = '''bar'''
A = Foo()
self.assertEqual(foo.my_attr , """bar""" )
with temporary_assignment(_lowerCAmelCase , """my_attr""" , """BAR""" ):
self.assertEqual(foo.my_attr , """BAR""" )
self.assertEqual(foo.my_attr , """bar""" )
@pytest.mark.parametrize(
"""iterable_length, num_proc, expected_num_proc""" , [
(1, None, 1),
(1, 1, 1),
(2, None, 1),
(2, 1, 1),
(2, 2, 1),
(2, 3, 1),
(3, 2, 1),
(16, 16, 16),
(16, 17, 16),
(17, 16, 16),
] , )
def __a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->Any:
"""simple docstring"""
with patch("""datasets.utils.py_utils._single_map_nested""" ) as mock_single_map_nested, patch(
"""datasets.parallel.parallel.Pool""" ) as mock_multiprocessing_pool:
A = {f"""{i}""": i for i in range(UpperCAmelCase )}
A = map_nested(lambda UpperCAmelCase : x + 10 , UpperCAmelCase , num_proc=UpperCAmelCase , parallel_min_length=16 )
if expected_num_proc == 1:
assert mock_single_map_nested.called
assert not mock_multiprocessing_pool.called
else:
assert not mock_single_map_nested.called
assert mock_multiprocessing_pool.called
assert mock_multiprocessing_pool.call_args[0][0] == expected_num_proc
class __UpperCAmelCase ( A__ ):
'''simple docstring'''
@require_tf
def A (self : Dict ):
import tensorflow as tf
from tensorflow.keras import layers
A = layers.Dense(2 )
def gen_random_output():
A = tf.random.uniform((1, 3) )
return model(_lowerCAmelCase ).numpy()
with temp_seed(42 , set_tensorflow=_lowerCAmelCase ):
A = gen_random_output()
with temp_seed(42 , set_tensorflow=_lowerCAmelCase ):
A = gen_random_output()
A = gen_random_output()
np.testing.assert_equal(_lowerCAmelCase , _lowerCAmelCase )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@require_torch
def A (self : Tuple ):
import torch
def gen_random_output():
A = torch.nn.Linear(3 , 2 )
A = torch.rand(1 , 3 )
return model(_lowerCAmelCase ).detach().numpy()
with temp_seed(42 , set_pytorch=_lowerCAmelCase ):
A = gen_random_output()
with temp_seed(42 , set_pytorch=_lowerCAmelCase ):
A = gen_random_output()
A = gen_random_output()
np.testing.assert_equal(_lowerCAmelCase , _lowerCAmelCase )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
def A (self : str ):
def gen_random_output():
return np.random.rand(1 , 3 )
with temp_seed(42 ):
A = gen_random_output()
with temp_seed(42 ):
A = gen_random_output()
A = gen_random_output()
np.testing.assert_equal(_lowerCAmelCase , _lowerCAmelCase )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@pytest.mark.parametrize("""input_data""" , [{}] )
def __a ( UpperCAmelCase ) ->List[str]:
"""simple docstring"""
A = NestedDataStructure(UpperCAmelCase ).data
assert output_data == input_data
@pytest.mark.parametrize(
"""data, expected_output""" , [
({}, []),
([], []),
("""foo""", ["""foo"""]),
(["""foo""", """bar"""], ["""foo""", """bar"""]),
([["""foo""", """bar"""]], ["""foo""", """bar"""]),
([[["""foo"""], ["""bar"""]]], ["""foo""", """bar"""]),
([[["""foo"""], """bar"""]], ["""foo""", """bar"""]),
({"""a""": 1, """b""": 2}, [1, 2]),
({"""a""": [1, 2], """b""": [3, 4]}, [1, 2, 3, 4]),
({"""a""": [[1, 2]], """b""": [[3, 4]]}, [1, 2, 3, 4]),
({"""a""": [[1, 2]], """b""": [3, 4]}, [1, 2, 3, 4]),
({"""a""": [[[1], [2]]], """b""": [[[3], [4]]]}, [1, 2, 3, 4]),
({"""a""": [[[1], [2]]], """b""": [[3, 4]]}, [1, 2, 3, 4]),
({"""a""": [[[1], [2]]], """b""": [3, 4]}, [1, 2, 3, 4]),
({"""a""": [[[1], [2]]], """b""": [3, [4]]}, [1, 2, 3, 4]),
({"""a""": {"""1""": 1}, """b""": 2}, [1, 2]),
({"""a""": {"""1""": [1]}, """b""": 2}, [1, 2]),
({"""a""": {"""1""": [1]}, """b""": [2]}, [1, 2]),
] , )
def __a ( UpperCAmelCase , UpperCAmelCase ) ->List[Any]:
"""simple docstring"""
A = NestedDataStructure(UpperCAmelCase ).flatten()
assert output == expected_output
def __a ( ) ->Optional[Any]:
"""simple docstring"""
A = A(x=1 , y="""foobar""" )
A = {"""x""": 1, """y""": """foobar"""}
assert asdict(UpperCAmelCase ) == expected_output
A = {"""a""": {"""b""": A(x=10 , y="""foo""" )}, """c""": [A(x=20 , y="""bar""" )]}
A = {"""a""": {"""b""": {"""x""": 10, """y""": """foo"""}}, """c""": [{"""x""": 20, """y""": """bar"""}]}
assert asdict(UpperCAmelCase ) == expected_output
with pytest.raises(UpperCAmelCase ):
asdict([1, A(x=10 , y="""foo""" )] )
def __a ( UpperCAmelCase ) ->Tuple:
"""simple docstring"""
return text.split()
def __a ( UpperCAmelCase ) ->List[str]:
"""simple docstring"""
yield (time.time(), content)
time.sleep(2 )
yield (time.time(), content)
def __a ( ) ->Optional[int]:
"""simple docstring"""
with Pool(2 ) as pool:
A = list(iflatmap_unordered(UpperCAmelCase , _split_text , kwargs_iterable=[{"""text""": """hello there"""}] * 10 ) )
assert out.count("""hello""" ) == 10
assert out.count("""there""" ) == 10
assert len(UpperCAmelCase ) == 20
# check multiprocess from pathos (uses dill for pickling)
with multiprocess.Pool(2 ) as pool:
A = list(iflatmap_unordered(UpperCAmelCase , _split_text , kwargs_iterable=[{"""text""": """hello there"""}] * 10 ) )
assert out.count("""hello""" ) == 10
assert out.count("""there""" ) == 10
assert len(UpperCAmelCase ) == 20
# check that we get items as fast as possible
with Pool(2 ) as pool:
A = []
for yield_time, content in iflatmap_unordered(
UpperCAmelCase , _aseconds_generator_of_aitems_with_timing , kwargs_iterable=[{"""content""": """a"""}, {"""content""": """b"""}] ):
assert yield_time < time.time() + 0.1, "we should each item directly after it was yielded"
out.append(UpperCAmelCase )
assert out.count("""a""" ) == 2
assert out.count("""b""" ) == 2
assert len(UpperCAmelCase ) == 4
| 258 | 1 |
"""simple docstring"""
def __lowercase ( a__ , a__ ) -> Tuple:
while second != 0:
__SCREAMING_SNAKE_CASE = first & second
first ^= second
__SCREAMING_SNAKE_CASE = c << 1
return first
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase__ : List[str] =int(input('''Enter the first number: ''').strip())
lowerCAmelCase__ : Dict =int(input('''Enter the second number: ''').strip())
print(F'''{add(first, second) = }''')
| 366 |
from collections import deque
from .hash_table import HashTable
class UpperCAmelCase_ ( UpperCamelCase_ ):
'''simple docstring'''
def __init__( self , *_A , **_A ):
'''simple docstring'''
super().__init__(*_A , **_A )
def _A ( self , _A , _A ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = deque([] ) if self.values[key] is None else self.values[key]
self.values[key].appendleft(_A )
__SCREAMING_SNAKE_CASE = self.values[key]
def _A ( self ):
'''simple docstring'''
return (
sum(self.charge_factor - len(_A ) for slot in self.values )
/ self.size_table
* self.charge_factor
)
def _A ( self , _A , _A=None ):
'''simple docstring'''
if not (
len(self.values[key] ) == self.charge_factor and self.values.count(_A ) == 0
):
return key
return super()._collision_resolution(_A , _A )
| 118 | 0 |
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import SegformerImageProcessor, SwinConfig, UperNetConfig, UperNetForSemanticSegmentation
def lowercase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
lowerCamelCase : str = 384
lowerCamelCase : List[str] = 7
if "tiny" in model_name:
lowerCamelCase : Dict = 96
lowerCamelCase : Tuple = (2, 2, 6, 2)
lowerCamelCase : Optional[Any] = (3, 6, 12, 24)
elif "small" in model_name:
lowerCamelCase : List[Any] = 96
lowerCamelCase : Any = (2, 2, 18, 2)
lowerCamelCase : Optional[Any] = (3, 6, 12, 24)
elif "base" in model_name:
lowerCamelCase : Optional[int] = 128
lowerCamelCase : Union[str, Any] = (2, 2, 18, 2)
lowerCamelCase : Union[str, Any] = (4, 8, 16, 32)
lowerCamelCase : Tuple = 12
lowerCamelCase : Any = 512
elif "large" in model_name:
lowerCamelCase : int = 192
lowerCamelCase : List[str] = (2, 2, 18, 2)
lowerCamelCase : int = (6, 12, 24, 48)
lowerCamelCase : int = 12
lowerCamelCase : Optional[Any] = 768
# set label information
lowerCamelCase : Tuple = 150
lowerCamelCase : Optional[int] = "huggingface/label-files"
lowerCamelCase : int = "ade20k-id2label.json"
lowerCamelCase : Dict = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , repo_type="dataset" ) , "r" ) )
lowerCamelCase : str = {int(SCREAMING_SNAKE_CASE_ ): v for k, v in idalabel.items()}
lowerCamelCase : Dict = {v: k for k, v in idalabel.items()}
lowerCamelCase : Union[str, Any] = SwinConfig(
embed_dim=SCREAMING_SNAKE_CASE_ , depths=SCREAMING_SNAKE_CASE_ , num_heads=SCREAMING_SNAKE_CASE_ , window_size=SCREAMING_SNAKE_CASE_ , out_features=["stage1", "stage2", "stage3", "stage4"] , )
lowerCamelCase : Dict = UperNetConfig(
backbone_config=SCREAMING_SNAKE_CASE_ , auxiliary_in_channels=SCREAMING_SNAKE_CASE_ , num_labels=SCREAMING_SNAKE_CASE_ , idalabel=SCREAMING_SNAKE_CASE_ , labelaid=SCREAMING_SNAKE_CASE_ , )
return config
def lowercase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
lowerCamelCase : Dict = []
# fmt: off
# stem
rename_keys.append(("backbone.patch_embed.projection.weight", "backbone.embeddings.patch_embeddings.projection.weight") )
rename_keys.append(("backbone.patch_embed.projection.bias", "backbone.embeddings.patch_embeddings.projection.bias") )
rename_keys.append(("backbone.patch_embed.norm.weight", "backbone.embeddings.norm.weight") )
rename_keys.append(("backbone.patch_embed.norm.bias", "backbone.embeddings.norm.bias") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.norm1.weight""", f"""backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.weight""") )
rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.norm1.bias""", f"""backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.bias""") )
rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_bias_table""", f"""backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table""") )
rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_index""", f"""backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index""") )
rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.weight""", f"""backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight""") )
rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.bias""", f"""backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias""") )
rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.norm2.weight""", f"""backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.weight""") )
rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.norm2.bias""", f"""backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.bias""") )
rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.weight""", f"""backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight""") )
rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.bias""", f"""backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias""") )
rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.ffn.layers.1.weight""", f"""backbone.encoder.layers.{i}.blocks.{j}.output.dense.weight""") )
rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.ffn.layers.1.bias""", f"""backbone.encoder.layers.{i}.blocks.{j}.output.dense.bias""") )
if i < 3:
rename_keys.append((f"""backbone.stages.{i}.downsample.reduction.weight""", f"""backbone.encoder.layers.{i}.downsample.reduction.weight""") )
rename_keys.append((f"""backbone.stages.{i}.downsample.norm.weight""", f"""backbone.encoder.layers.{i}.downsample.norm.weight""") )
rename_keys.append((f"""backbone.stages.{i}.downsample.norm.bias""", f"""backbone.encoder.layers.{i}.downsample.norm.bias""") )
rename_keys.append((f"""backbone.norm{i}.weight""", f"""backbone.hidden_states_norms.stage{i+1}.weight""") )
rename_keys.append((f"""backbone.norm{i}.bias""", f"""backbone.hidden_states_norms.stage{i+1}.bias""") )
# decode head
rename_keys.extend(
[
("decode_head.conv_seg.weight", "decode_head.classifier.weight"),
("decode_head.conv_seg.bias", "decode_head.classifier.bias"),
("auxiliary_head.conv_seg.weight", "auxiliary_head.classifier.weight"),
("auxiliary_head.conv_seg.bias", "auxiliary_head.classifier.bias"),
] )
# fmt: on
return rename_keys
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
lowerCamelCase : List[Any] = dct.pop(SCREAMING_SNAKE_CASE_ )
lowerCamelCase : int = val
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
lowerCamelCase : List[str] = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
lowerCamelCase : int = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
lowerCamelCase : Optional[Any] = state_dict.pop(f"""backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.weight""" )
lowerCamelCase : str = state_dict.pop(f"""backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
lowerCamelCase : List[str] = in_proj_weight[:dim, :]
lowerCamelCase : int = in_proj_bias[: dim]
lowerCamelCase : Union[str, Any] = in_proj_weight[
dim : dim * 2, :
]
lowerCamelCase : List[Any] = in_proj_bias[
dim : dim * 2
]
lowerCamelCase : int = in_proj_weight[
-dim :, :
]
lowerCamelCase : str = in_proj_bias[-dim :]
# fmt: on
def lowercase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
lowerCamelCase , lowerCamelCase : int = x.shape
lowerCamelCase : Optional[int] = x.reshape(SCREAMING_SNAKE_CASE_ , 4 , in_channel // 4 )
lowerCamelCase : Tuple = x[:, [0, 2, 1, 3], :].transpose(1 , 2 ).reshape(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return x
def lowercase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
lowerCamelCase , lowerCamelCase : int = x.shape
lowerCamelCase : int = x.reshape(SCREAMING_SNAKE_CASE_ , in_channel // 4 , 4 )
lowerCamelCase : str = x[:, :, [0, 2, 1, 3]].transpose(1 , 2 ).reshape(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return x
def lowercase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
lowerCamelCase : List[str] = x.shape[0]
lowerCamelCase : Union[str, Any] = x.reshape(4 , in_channel // 4 )
lowerCamelCase : Optional[Any] = x[[0, 2, 1, 3], :].transpose(0 , 1 ).reshape(SCREAMING_SNAKE_CASE_ )
return x
def lowercase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
lowerCamelCase : List[str] = x.shape[0]
lowerCamelCase : Union[str, Any] = x.reshape(in_channel // 4 , 4 )
lowerCamelCase : List[Any] = x[:, [0, 2, 1, 3]].transpose(0 , 1 ).reshape(SCREAMING_SNAKE_CASE_ )
return x
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
lowerCamelCase : List[str] = {
"upernet-swin-tiny": "https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210531_112542-e380ad3e.pth",
"upernet-swin-small": "https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210526_192015-ee2fff1c.pth",
"upernet-swin-base": "https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K_20210531_125459-429057bf.pth",
"upernet-swin-large": "https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k_20220318_091743-9ba68901.pth",
}
lowerCamelCase : List[str] = model_name_to_url[model_name]
lowerCamelCase : Any = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE_ , map_location="cpu" , file_name=SCREAMING_SNAKE_CASE_ )[
"state_dict"
]
for name, param in state_dict.items():
print(SCREAMING_SNAKE_CASE_ , param.shape )
lowerCamelCase : List[Any] = get_upernet_config(SCREAMING_SNAKE_CASE_ )
lowerCamelCase : int = UperNetForSemanticSegmentation(SCREAMING_SNAKE_CASE_ )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
lowerCamelCase : List[str] = state_dict.pop(SCREAMING_SNAKE_CASE_ )
if "bn" in key:
lowerCamelCase : Tuple = key.replace("bn" , "batch_norm" )
lowerCamelCase : Any = val
# rename keys
lowerCamelCase : Optional[int] = create_rename_keys(SCREAMING_SNAKE_CASE_ )
for src, dest in rename_keys:
rename_key(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
read_in_q_k_v(SCREAMING_SNAKE_CASE_ , config.backbone_config )
# fix downsample parameters
for key, value in state_dict.items():
if "downsample" in key:
if "reduction" in key:
lowerCamelCase : Optional[Any] = reverse_correct_unfold_reduction_order(SCREAMING_SNAKE_CASE_ )
if "norm" in key:
lowerCamelCase : Any = reverse_correct_unfold_norm_order(SCREAMING_SNAKE_CASE_ )
model.load_state_dict(SCREAMING_SNAKE_CASE_ )
# verify on image
lowerCamelCase : Tuple = "https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg"
lowerCamelCase : List[str] = Image.open(requests.get(SCREAMING_SNAKE_CASE_ , stream=SCREAMING_SNAKE_CASE_ ).raw ).convert("RGB" )
lowerCamelCase : Optional[int] = SegformerImageProcessor()
lowerCamelCase : Optional[Any] = processor(SCREAMING_SNAKE_CASE_ , return_tensors="pt" ).pixel_values
with torch.no_grad():
lowerCamelCase : Optional[Any] = model(SCREAMING_SNAKE_CASE_ )
lowerCamelCase : str = outputs.logits
print(logits.shape )
print("First values of logits:" , logits[0, 0, :3, :3] )
# assert values
if model_name == "upernet-swin-tiny":
lowerCamelCase : str = torch.tensor(
[[-7.5958, -7.5958, -7.4302], [-7.5958, -7.5958, -7.4302], [-7.4797, -7.4797, -7.3068]] )
elif model_name == "upernet-swin-small":
lowerCamelCase : Union[str, Any] = torch.tensor(
[[-7.1921, -7.1921, -6.9532], [-7.1921, -7.1921, -6.9532], [-7.0908, -7.0908, -6.8534]] )
elif model_name == "upernet-swin-base":
lowerCamelCase : int = torch.tensor(
[[-6.5851, -6.5851, -6.4330], [-6.5851, -6.5851, -6.4330], [-6.4763, -6.4763, -6.3254]] )
elif model_name == "upernet-swin-large":
lowerCamelCase : int = torch.tensor(
[[-7.5297, -7.5297, -7.3802], [-7.5297, -7.5297, -7.3802], [-7.4044, -7.4044, -7.2586]] )
print("Logits:" , outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] , SCREAMING_SNAKE_CASE_ , atol=1E-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(f"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
print(f"""Saving processor to {pytorch_dump_folder_path}""" )
processor.save_pretrained(SCREAMING_SNAKE_CASE_ )
if push_to_hub:
print(f"""Pushing model and processor for {model_name} to hub""" )
model.push_to_hub(f"""openmmlab/{model_name}""" )
processor.push_to_hub(f"""openmmlab/{model_name}""" )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''upernet-swin-tiny''',
type=str,
choices=[f'''upernet-swin-{size}''' for size in ['''tiny''', '''small''', '''base''', '''large''']],
help='''Name of the Swin + UperNet model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
_snake_case = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 283 |
def lowercase_( SCREAMING_SNAKE_CASE_ = 4000000 ):
'''simple docstring'''
lowerCamelCase : Any = [0, 1]
lowerCamelCase : Union[str, Any] = 0
while fib[i] <= n:
fib.append(fib[i] + fib[i + 1] )
if fib[i + 2] > n:
break
i += 1
lowerCamelCase : Union[str, Any] = 0
for j in range(len(SCREAMING_SNAKE_CASE_ ) - 1 ):
if fib[j] % 2 == 0:
total += fib[j]
return total
if __name__ == "__main__":
print(f'''{solution() = }''')
| 283 | 1 |
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
lowerCamelCase : Tuple = logging.get_logger(__name__)
lowerCamelCase : Union[str, Any] = {
'''EleutherAI/gpt-j-6B''': '''https://huggingface.co/EleutherAI/gpt-j-6B/resolve/main/config.json''',
# See all GPT-J models at https://huggingface.co/models?filter=gpt_j
}
class lowerCAmelCase ( A__ ):
'''simple docstring'''
_A : Optional[int] = '''gptj'''
_A : int = {
'''max_position_embeddings''': '''n_positions''',
'''hidden_size''': '''n_embd''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self : List[Any] , __a : Tuple=50400 , __a : List[str]=2048 , __a : Optional[int]=4096 , __a : Union[str, Any]=28 , __a : Optional[Any]=16 , __a : Optional[Any]=64 , __a : Any=None , __a : List[Any]="gelu_new" , __a : Any=0.0 , __a : Union[str, Any]=0.0 , __a : str=0.0 , __a : Any=1E-5 , __a : List[Any]=0.02 , __a : str=True , __a : Union[str, Any]=50256 , __a : Dict=50256 , __a : List[Any]=False , **__a : Tuple , ) -> Any:
"""simple docstring"""
__lowercase : Dict = vocab_size
__lowercase : Any = n_positions
__lowercase : Union[str, Any] = n_embd
__lowercase : List[Any] = n_layer
__lowercase : List[str] = n_head
__lowercase : List[str] = n_inner
__lowercase : str = rotary_dim
__lowercase : Dict = activation_function
__lowercase : Optional[int] = resid_pdrop
__lowercase : str = embd_pdrop
__lowercase : Any = attn_pdrop
__lowercase : Dict = layer_norm_epsilon
__lowercase : str = initializer_range
__lowercase : Optional[int] = use_cache
__lowercase : Dict = bos_token_id
__lowercase : Optional[int] = eos_token_id
super().__init__(
bos_token_id=__snake_case , eos_token_id=__snake_case , tie_word_embeddings=__snake_case , **__snake_case )
class lowerCAmelCase ( A__ ):
'''simple docstring'''
def __init__( self : List[Any] , __a : PretrainedConfig , __a : str = "default" , __a : List[PatchingSpec] = None , __a : bool = False , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(__snake_case , task=__snake_case , patching_specs=__snake_case , use_past=__snake_case )
if not getattr(self._config , """pad_token_id""" , __snake_case ):
# TODO: how to do that better?
__lowercase : Tuple = 0
@property
def lowerCAmelCase ( self : str ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
__lowercase : Optional[int] = OrderedDict({"""input_ids""": {0: """batch""", 1: """sequence"""}} )
if self.use_past:
self.fill_with_past_key_values_(__snake_case , direction="""inputs""" )
__lowercase : Tuple = {0: """batch""", 1: """past_sequence + sequence"""}
else:
__lowercase : List[Any] = {0: """batch""", 1: """sequence"""}
return common_inputs
@property
def lowerCAmelCase ( self : int ) -> int:
"""simple docstring"""
return self._config.n_layer
@property
def lowerCAmelCase ( self : Optional[Any] ) -> int:
"""simple docstring"""
return self._config.n_head
def lowerCAmelCase ( self : Any , __a : PreTrainedTokenizer , __a : int = -1 , __a : int = -1 , __a : bool = False , __a : Optional[TensorType] = None , ) -> Mapping[str, Any]:
"""simple docstring"""
__lowercase : List[Any] = super(__snake_case , self ).generate_dummy_inputs(
__snake_case , batch_size=__snake_case , seq_length=__snake_case , is_pair=__snake_case , framework=__snake_case )
# We need to order the input in the way they appears in the forward()
__lowercase : str = OrderedDict({"""input_ids""": common_inputs["""input_ids"""]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
__lowercase , __lowercase : Optional[int] = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
__lowercase : int = seqlen + 2
__lowercase : int = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
__lowercase : Union[str, Any] = [
(torch.zeros(__snake_case ), torch.zeros(__snake_case )) for _ in range(self.num_layers )
]
__lowercase : Any = common_inputs["""attention_mask"""]
if self.use_past:
__lowercase : List[str] = ordered_inputs["""attention_mask"""].dtype
__lowercase : Tuple = torch.cat(
[ordered_inputs["""attention_mask"""], torch.ones(__snake_case , __snake_case , dtype=__snake_case )] , dim=1 )
return ordered_inputs
@property
def lowerCAmelCase ( self : Dict ) -> int:
"""simple docstring"""
return 13 | 353 |
def snake_case_ ( lowerCAmelCase_ : str , lowerCAmelCase_ : str ):
__lowercase : Optional[Any] = len(lowerCAmelCase_ )
__lowercase : str = len(lowerCAmelCase_ )
__lowercase : Optional[int] = [[False for _ in range(m + 1 )] for _ in range(n + 1 )]
__lowercase : Tuple = True
for i in range(lowerCAmelCase_ ):
for j in range(m + 1 ):
if dp[i][j]:
if j < m and a[i].upper() == b[j]:
__lowercase : Optional[Any] = True
if a[i].islower():
__lowercase : Dict = True
return dp[n][m]
if __name__ == "__main__":
import doctest
doctest.testmod() | 306 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
snake_case : List[str] = {
'''configuration_squeezebert''': [
'''SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''SqueezeBertConfig''',
'''SqueezeBertOnnxConfig''',
],
'''tokenization_squeezebert''': ['''SqueezeBertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : Tuple = ['''SqueezeBertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : List[str] = [
'''SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''SqueezeBertForMaskedLM''',
'''SqueezeBertForMultipleChoice''',
'''SqueezeBertForQuestionAnswering''',
'''SqueezeBertForSequenceClassification''',
'''SqueezeBertForTokenClassification''',
'''SqueezeBertModel''',
'''SqueezeBertModule''',
'''SqueezeBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_squeezebert import (
SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
SqueezeBertConfig,
SqueezeBertOnnxConfig,
)
from .tokenization_squeezebert import SqueezeBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_squeezebert import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
SqueezeBertModule,
SqueezeBertPreTrainedModel,
)
else:
import sys
snake_case : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 94 |
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .tokenization_wavaveca import WavaVecaCTCTokenizer
class __lowerCAmelCase ( UpperCAmelCase__ ):
snake_case_ : str = "Wav2Vec2FeatureExtractor"
snake_case_ : Dict = "AutoTokenizer"
def __init__( self : Tuple , snake_case__ : Optional[Any] , snake_case__ : Dict ):
"""simple docstring"""
super().__init__(snake_case__ , snake_case__ )
_UpperCAmelCase = self.feature_extractor
_UpperCAmelCase = False
@classmethod
def UpperCamelCase ( cls : List[Any] , snake_case__ : Optional[Any] , **snake_case__ : Any ):
"""simple docstring"""
try:
return super().from_pretrained(snake_case__ , **snake_case__ )
except OSError:
warnings.warn(
F"""Loading a tokenizer inside {cls.__name__} from a config that does not"""
" include a `tokenizer_class` attribute is deprecated and will be "
"removed in v5. Please add `'tokenizer_class': 'Wav2Vec2CTCTokenizer'`"
" attribute to either your `config.json` or `tokenizer_config.json` "
"file to suppress this warning: " , snake_case__ , )
_UpperCAmelCase = WavaVecaFeatureExtractor.from_pretrained(snake_case__ , **snake_case__ )
_UpperCAmelCase = WavaVecaCTCTokenizer.from_pretrained(snake_case__ , **snake_case__ )
return cls(feature_extractor=snake_case__ , tokenizer=snake_case__ )
def __call__( self : int , *snake_case__ : Tuple , **snake_case__ : List[str] ):
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor(*snake_case__ , **snake_case__ )
if "raw_speech" in kwargs:
warnings.warn("Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead." )
_UpperCAmelCase = kwargs.pop("raw_speech" )
else:
_UpperCAmelCase = kwargs.pop("audio" , snake_case__ )
_UpperCAmelCase = kwargs.pop("sampling_rate" , snake_case__ )
_UpperCAmelCase = kwargs.pop("text" , snake_case__ )
if len(snake_case__ ) > 0:
_UpperCAmelCase = args[0]
_UpperCAmelCase = args[1:]
if audio is None and text is None:
raise ValueError("You need to specify either an `audio` or `text` input to process." )
if audio is not None:
_UpperCAmelCase = self.feature_extractor(snake_case__ , *snake_case__ , sampling_rate=snake_case__ , **snake_case__ )
if text is not None:
_UpperCAmelCase = self.tokenizer(snake_case__ , **snake_case__ )
if text is None:
return inputs
elif audio is None:
return encodings
else:
_UpperCAmelCase = encodings["input_ids"]
return inputs
def UpperCamelCase ( self : List[str] , *snake_case__ : Any , **snake_case__ : Dict ):
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor.pad(*snake_case__ , **snake_case__ )
_UpperCAmelCase = kwargs.pop("input_features" , snake_case__ )
_UpperCAmelCase = kwargs.pop("labels" , snake_case__ )
if len(snake_case__ ) > 0:
_UpperCAmelCase = args[0]
_UpperCAmelCase = args[1:]
if input_features is not None:
_UpperCAmelCase = self.feature_extractor.pad(snake_case__ , *snake_case__ , **snake_case__ )
if labels is not None:
_UpperCAmelCase = self.tokenizer.pad(snake_case__ , **snake_case__ )
if labels is None:
return input_features
elif input_features is None:
return labels
else:
_UpperCAmelCase = labels["input_ids"]
return input_features
def UpperCamelCase ( self : str , *snake_case__ : List[str] , **snake_case__ : Dict ):
"""simple docstring"""
return self.tokenizer.batch_decode(*snake_case__ , **snake_case__ )
def UpperCamelCase ( self : Union[str, Any] , *snake_case__ : List[str] , **snake_case__ : Union[str, Any] ):
"""simple docstring"""
return self.tokenizer.decode(*snake_case__ , **snake_case__ )
@contextmanager
def UpperCamelCase ( self : Tuple ):
"""simple docstring"""
warnings.warn(
"`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your "
"labels by using the argument `text` of the regular `__call__` method (either in the same call as "
"your audio inputs, or in a separate call." )
_UpperCAmelCase = True
_UpperCAmelCase = self.tokenizer
yield
_UpperCAmelCase = self.feature_extractor
_UpperCAmelCase = False
| 133 | 0 |
"""simple docstring"""
import inspect
import unittest
import warnings
from transformers import DeiTConfig
from transformers.models.auto import get_values
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
)
from transformers.models.deit.modeling_deit import DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class UpperCAmelCase_ :
def __init__( self : Dict , snake_case_ : Union[str, Any] , snake_case_ : List[str]=13 , snake_case_ : List[Any]=30 , snake_case_ : Dict=2 , snake_case_ : int=3 , snake_case_ : Dict=True , snake_case_ : Any=True , snake_case_ : Any=32 , snake_case_ : Optional[Any]=5 , snake_case_ : List[Any]=4 , snake_case_ : int=37 , snake_case_ : Any="gelu" , snake_case_ : Union[str, Any]=0.1 , snake_case_ : int=0.1 , snake_case_ : Tuple=10 , snake_case_ : Optional[int]=0.02 , snake_case_ : Dict=3 , snake_case_ : Union[str, Any]=None , snake_case_ : int=2 , ) -> Any:
'''simple docstring'''
A__ = parent
A__ = batch_size
A__ = image_size
A__ = patch_size
A__ = num_channels
A__ = is_training
A__ = use_labels
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = intermediate_size
A__ = hidden_act
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = type_sequence_label_size
A__ = initializer_range
A__ = scope
A__ = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
A__ = (image_size // patch_size) ** 2
A__ = num_patches + 2
def __magic_name__ ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
A__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A__ = None
if self.use_labels:
A__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A__ = self.get_config()
return config, pixel_values, labels
def __magic_name__ ( self : Tuple ) -> str:
'''simple docstring'''
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=snake_case_ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def __magic_name__ ( self : Union[str, Any] , snake_case_ : Optional[Any] , snake_case_ : Dict , snake_case_ : Dict ) -> Any:
'''simple docstring'''
A__ = DeiTModel(config=snake_case_ )
model.to(snake_case_ )
model.eval()
A__ = model(snake_case_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __magic_name__ ( self : Optional[int] , snake_case_ : int , snake_case_ : Dict , snake_case_ : str ) -> Tuple:
'''simple docstring'''
A__ = DeiTForMaskedImageModeling(config=snake_case_ )
model.to(snake_case_ )
model.eval()
A__ = model(snake_case_ )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
A__ = 1
A__ = DeiTForMaskedImageModeling(snake_case_ )
model.to(snake_case_ )
model.eval()
A__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
A__ = model(snake_case_ )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def __magic_name__ ( self : Any , snake_case_ : Optional[Any] , snake_case_ : List[Any] , snake_case_ : Optional[Any] ) -> Dict:
'''simple docstring'''
A__ = self.type_sequence_label_size
A__ = DeiTForImageClassification(snake_case_ )
model.to(snake_case_ )
model.eval()
A__ = model(snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
A__ = 1
A__ = DeiTForImageClassification(snake_case_ )
model.to(snake_case_ )
model.eval()
A__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
A__ = model(snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __magic_name__ ( self : Dict ) -> int:
'''simple docstring'''
A__ = self.prepare_config_and_inputs()
(
(
A__
), (
A__
), (
A__
),
) = config_and_inputs
A__ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( A_, A_, unittest.TestCase ):
lowercase__ = (
(
DeiTModel,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
lowercase__ = (
{
'''feature-extraction''': DeiTModel,
'''image-classification''': (DeiTForImageClassification, DeiTForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
lowercase__ = False
lowercase__ = False
lowercase__ = False
def __magic_name__ ( self : str ) -> Tuple:
'''simple docstring'''
A__ = DeiTModelTester(self )
A__ = ConfigTester(self , config_class=snake_case_ , has_text_modality=snake_case_ , hidden_size=37 )
def __magic_name__ ( self : Any ) -> Optional[int]:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="DeiT does not use inputs_embeds" )
def __magic_name__ ( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
pass
def __magic_name__ ( self : Dict ) -> Dict:
'''simple docstring'''
A__, A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = model_class(snake_case_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
A__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(snake_case_ , nn.Linear ) )
def __magic_name__ ( self : Tuple ) -> List[Any]:
'''simple docstring'''
A__, A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = model_class(snake_case_ )
A__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A__ = [*signature.parameters.keys()]
A__ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , snake_case_ )
def __magic_name__ ( self : Any ) -> Tuple:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case_ )
def __magic_name__ ( self : List[str] ) -> Tuple:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*snake_case_ )
def __magic_name__ ( self : Dict ) -> List[Any]:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case_ )
def __magic_name__ ( self : Optional[Any] , snake_case_ : Any , snake_case_ : int , snake_case_ : str=False ) -> Dict:
'''simple docstring'''
A__ = super()._prepare_for_class(snake_case_ , snake_case_ , return_labels=snake_case_ )
if return_labels:
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def __magic_name__ ( self : str ) -> Tuple:
'''simple docstring'''
if not self.model_tester.is_training:
return
A__, A__ = self.model_tester.prepare_config_and_inputs_for_common()
A__ = True
for model_class in self.all_model_classes:
# DeiTForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(snake_case_ )
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
A__ = model_class(snake_case_ )
model.to(snake_case_ )
model.train()
A__ = self._prepare_for_class(snake_case_ , snake_case_ , return_labels=snake_case_ )
A__ = model(**snake_case_ ).loss
loss.backward()
def __magic_name__ ( self : Any ) -> str:
'''simple docstring'''
A__, A__ = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
A__ = False
A__ = True
for model_class in self.all_model_classes:
if model_class in get_values(snake_case_ ) or not model_class.supports_gradient_checkpointing:
continue
# DeiTForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
continue
A__ = model_class(snake_case_ )
model.gradient_checkpointing_enable()
model.to(snake_case_ )
model.train()
A__ = self._prepare_for_class(snake_case_ , snake_case_ , return_labels=snake_case_ )
A__ = model(**snake_case_ ).loss
loss.backward()
def __magic_name__ ( self : str ) -> Optional[int]:
'''simple docstring'''
A__, A__ = self.model_tester.prepare_config_and_inputs_for_common()
A__ = [
{"title": "multi_label_classification", "num_labels": 2, "dtype": torch.float},
{"title": "single_label_classification", "num_labels": 1, "dtype": torch.long},
{"title": "regression", "num_labels": 1, "dtype": torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(snake_case_ ),
*get_values(snake_case_ ),
]
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=F"""Testing {model_class} with {problem_type["title"]}""" ):
A__ = problem_type["title"]
A__ = problem_type["num_labels"]
A__ = model_class(snake_case_ )
model.to(snake_case_ )
model.train()
A__ = self._prepare_for_class(snake_case_ , snake_case_ , return_labels=snake_case_ )
if problem_type["num_labels"] > 1:
A__ = inputs["labels"].unsqueeze(1 ).repeat(1 , problem_type["num_labels"] )
A__ = inputs["labels"].to(problem_type["dtype"] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=snake_case_ ) as warning_list:
A__ = model(**snake_case_ ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
F"""Something is going wrong in the regression problem: intercepted {w.message}""" )
loss.backward()
@slow
def __magic_name__ ( self : Tuple ) -> str:
'''simple docstring'''
for model_name in DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ = DeiTModel.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
def _SCREAMING_SNAKE_CASE ( ) -> Tuple:
A__ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class UpperCAmelCase_ ( unittest.TestCase ):
@cached_property
def __magic_name__ ( self : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
return (
DeiTImageProcessor.from_pretrained("facebook/deit-base-distilled-patch16-224" )
if is_vision_available()
else None
)
@slow
def __magic_name__ ( self : List[str] ) -> str:
'''simple docstring'''
A__ = DeiTForImageClassificationWithTeacher.from_pretrained("facebook/deit-base-distilled-patch16-224" ).to(
snake_case_ )
A__ = self.default_image_processor
A__ = prepare_img()
A__ = image_processor(images=snake_case_ , return_tensors="pt" ).to(snake_case_ )
# forward pass
with torch.no_grad():
A__ = model(**snake_case_ )
# verify the logits
A__ = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , snake_case_ )
A__ = torch.tensor([-1.0266, 0.1912, -1.2861] ).to(snake_case_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , snake_case_ , atol=1e-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def __magic_name__ ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
A__ = DeiTModel.from_pretrained(
"facebook/deit-base-distilled-patch16-224" , torch_dtype=torch.floataa , device_map="auto" )
A__ = self.default_image_processor
A__ = prepare_img()
A__ = image_processor(images=snake_case_ , return_tensors="pt" )
A__ = inputs.pixel_values.to(snake_case_ )
# forward pass to make sure inference works in fp16
with torch.no_grad():
A__ = model(snake_case_ )
| 370 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_nllb import NllbTokenizer
else:
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"}
SCREAMING_SNAKE_CASE = {
"vocab_file": {
"facebook/nllb-200-distilled-600M": (
"https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model"
),
},
"tokenizer_file": {
"facebook/nllb-200-distilled-600M": (
"https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json"
),
},
}
SCREAMING_SNAKE_CASE = {
"facebook/nllb-large-en-ro": 1024,
"facebook/nllb-200-distilled-600M": 1024,
}
# fmt: off
SCREAMING_SNAKE_CASE = ["ace_Arab", "ace_Latn", "acm_Arab", "acq_Arab", "aeb_Arab", "afr_Latn", "ajp_Arab", "aka_Latn", "amh_Ethi", "apc_Arab", "arb_Arab", "ars_Arab", "ary_Arab", "arz_Arab", "asm_Beng", "ast_Latn", "awa_Deva", "ayr_Latn", "azb_Arab", "azj_Latn", "bak_Cyrl", "bam_Latn", "ban_Latn", "bel_Cyrl", "bem_Latn", "ben_Beng", "bho_Deva", "bjn_Arab", "bjn_Latn", "bod_Tibt", "bos_Latn", "bug_Latn", "bul_Cyrl", "cat_Latn", "ceb_Latn", "ces_Latn", "cjk_Latn", "ckb_Arab", "crh_Latn", "cym_Latn", "dan_Latn", "deu_Latn", "dik_Latn", "dyu_Latn", "dzo_Tibt", "ell_Grek", "eng_Latn", "epo_Latn", "est_Latn", "eus_Latn", "ewe_Latn", "fao_Latn", "pes_Arab", "fij_Latn", "fin_Latn", "fon_Latn", "fra_Latn", "fur_Latn", "fuv_Latn", "gla_Latn", "gle_Latn", "glg_Latn", "grn_Latn", "guj_Gujr", "hat_Latn", "hau_Latn", "heb_Hebr", "hin_Deva", "hne_Deva", "hrv_Latn", "hun_Latn", "hye_Armn", "ibo_Latn", "ilo_Latn", "ind_Latn", "isl_Latn", "ita_Latn", "jav_Latn", "jpn_Jpan", "kab_Latn", "kac_Latn", "kam_Latn", "kan_Knda", "kas_Arab", "kas_Deva", "kat_Geor", "knc_Arab", "knc_Latn", "kaz_Cyrl", "kbp_Latn", "kea_Latn", "khm_Khmr", "kik_Latn", "kin_Latn", "kir_Cyrl", "kmb_Latn", "kon_Latn", "kor_Hang", "kmr_Latn", "lao_Laoo", "lvs_Latn", "lij_Latn", "lim_Latn", "lin_Latn", "lit_Latn", "lmo_Latn", "ltg_Latn", "ltz_Latn", "lua_Latn", "lug_Latn", "luo_Latn", "lus_Latn", "mag_Deva", "mai_Deva", "mal_Mlym", "mar_Deva", "min_Latn", "mkd_Cyrl", "plt_Latn", "mlt_Latn", "mni_Beng", "khk_Cyrl", "mos_Latn", "mri_Latn", "zsm_Latn", "mya_Mymr", "nld_Latn", "nno_Latn", "nob_Latn", "npi_Deva", "nso_Latn", "nus_Latn", "nya_Latn", "oci_Latn", "gaz_Latn", "ory_Orya", "pag_Latn", "pan_Guru", "pap_Latn", "pol_Latn", "por_Latn", "prs_Arab", "pbt_Arab", "quy_Latn", "ron_Latn", "run_Latn", "rus_Cyrl", "sag_Latn", "san_Deva", "sat_Beng", "scn_Latn", "shn_Mymr", "sin_Sinh", "slk_Latn", "slv_Latn", "smo_Latn", "sna_Latn", "snd_Arab", "som_Latn", "sot_Latn", "spa_Latn", "als_Latn", "srd_Latn", "srp_Cyrl", "ssw_Latn", "sun_Latn", "swe_Latn", "swh_Latn", "szl_Latn", "tam_Taml", "tat_Cyrl", "tel_Telu", "tgk_Cyrl", "tgl_Latn", "tha_Thai", "tir_Ethi", "taq_Latn", "taq_Tfng", "tpi_Latn", "tsn_Latn", "tso_Latn", "tuk_Latn", "tum_Latn", "tur_Latn", "twi_Latn", "tzm_Tfng", "uig_Arab", "ukr_Cyrl", "umb_Latn", "urd_Arab", "uzn_Latn", "vec_Latn", "vie_Latn", "war_Latn", "wol_Latn", "xho_Latn", "ydd_Hebr", "yor_Latn", "yue_Hant", "zho_Hans", "zho_Hant", "zul_Latn"]
class UpperCAmelCase_ ( A_ ):
lowercase__ = VOCAB_FILES_NAMES
lowercase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ = PRETRAINED_VOCAB_FILES_MAP
lowercase__ = ['''input_ids''', '''attention_mask''']
lowercase__ = NllbTokenizer
lowercase__ = []
lowercase__ = []
def __init__( self : int , snake_case_ : int=None , snake_case_ : Any=None , snake_case_ : int="<s>" , snake_case_ : List[Any]="</s>" , snake_case_ : Optional[int]="</s>" , snake_case_ : int="<s>" , snake_case_ : str="<unk>" , snake_case_ : str="<pad>" , snake_case_ : Optional[int]="<mask>" , snake_case_ : str=None , snake_case_ : List[Any]=None , snake_case_ : Tuple=None , snake_case_ : Optional[int]=False , **snake_case_ : List[str] , ) -> Tuple:
'''simple docstring'''
A__ = AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else mask_token
A__ = legacy_behaviour
super().__init__(
vocab_file=snake_case_ , tokenizer_file=snake_case_ , bos_token=snake_case_ , eos_token=snake_case_ , sep_token=snake_case_ , cls_token=snake_case_ , unk_token=snake_case_ , pad_token=snake_case_ , mask_token=snake_case_ , src_lang=snake_case_ , tgt_lang=snake_case_ , additional_special_tokens=snake_case_ , legacy_behaviour=snake_case_ , **snake_case_ , )
A__ = vocab_file
A__ = False if not self.vocab_file else True
A__ = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({"additional_special_tokens": _additional_special_tokens} )
A__ = {
lang_code: self.convert_tokens_to_ids(snake_case_ ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
A__ = src_lang if src_lang is not None else "eng_Latn"
A__ = self.convert_tokens_to_ids(self._src_lang )
A__ = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def __magic_name__ ( self : Union[str, Any] ) -> str:
'''simple docstring'''
return self._src_lang
@src_lang.setter
def __magic_name__ ( self : Optional[int] , snake_case_ : str ) -> None:
'''simple docstring'''
A__ = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __magic_name__ ( self : Optional[int] , snake_case_ : List[int] , snake_case_ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def __magic_name__ ( self : Any , snake_case_ : List[int] , snake_case_ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
A__ = [self.sep_token_id]
A__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __magic_name__ ( self : int , snake_case_ : Tuple , snake_case_ : str , snake_case_ : Optional[str] , snake_case_ : Optional[str] , **snake_case_ : Tuple ) -> List[Any]:
'''simple docstring'''
if src_lang is None or tgt_lang is None:
raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model" )
A__ = src_lang
A__ = self(snake_case_ , add_special_tokens=snake_case_ , return_tensors=snake_case_ , **snake_case_ )
A__ = self.convert_tokens_to_ids(snake_case_ )
A__ = tgt_lang_id
return inputs
def __magic_name__ ( self : int , snake_case_ : List[str] , snake_case_ : str = "eng_Latn" , snake_case_ : Optional[List[str]] = None , snake_case_ : str = "fra_Latn" , **snake_case_ : Dict , ) -> BatchEncoding:
'''simple docstring'''
A__ = src_lang
A__ = tgt_lang
return super().prepare_seqaseq_batch(snake_case_ , snake_case_ , **snake_case_ )
def __magic_name__ ( self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
return self.set_src_lang_special_tokens(self.src_lang )
def __magic_name__ ( self : Tuple ) -> Dict:
'''simple docstring'''
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def __magic_name__ ( self : List[Any] , snake_case_ : Dict ) -> None:
'''simple docstring'''
A__ = self.convert_tokens_to_ids(snake_case_ )
if self.legacy_behaviour:
A__ = []
A__ = [self.eos_token_id, self.cur_lang_code]
else:
A__ = [self.cur_lang_code]
A__ = [self.eos_token_id]
A__ = self.convert_ids_to_tokens(self.prefix_tokens )
A__ = self.convert_ids_to_tokens(self.suffix_tokens )
A__ = processors.TemplateProcessing(
single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def __magic_name__ ( self : List[Any] , snake_case_ : str ) -> None:
'''simple docstring'''
A__ = self.convert_tokens_to_ids(snake_case_ )
if self.legacy_behaviour:
A__ = []
A__ = [self.eos_token_id, self.cur_lang_code]
else:
A__ = [self.cur_lang_code]
A__ = [self.eos_token_id]
A__ = self.convert_ids_to_tokens(self.prefix_tokens )
A__ = self.convert_ids_to_tokens(self.suffix_tokens )
A__ = processors.TemplateProcessing(
single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def __magic_name__ ( self : List[str] , snake_case_ : str , snake_case_ : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(snake_case_ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory.""" )
return
A__ = os.path.join(
snake_case_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case_ ):
copyfile(self.vocab_file , snake_case_ )
return (out_vocab_file,)
| 230 | 0 |
import argparse
import torch
from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert
from transformers.utils import logging
logging.set_verbosity_info()
def UpperCamelCase__( UpperCamelCase__ : Any , UpperCamelCase__ : int , UpperCamelCase__ : int )->int:
# Initialise PyTorch model
A__ = BertConfig.from_json_file(UpperCamelCase__ )
print(f"Building PyTorch model from configuration: {config}" )
A__ = BertForPreTraining(UpperCamelCase__ )
# Load weights from tf checkpoint
load_tf_weights_in_bert(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Save pytorch-model
print(f"Save PyTorch model to {pytorch_dump_path}" )
torch.save(model.state_dict() , UpperCamelCase__ )
if __name__ == "__main__":
a__: str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--bert_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained BERT model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
a__: Any = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 193 |
from ..utils import DummyObject, requires_backends
class SCREAMING_SNAKE_CASE__ ( metaclass=UpperCamelCase__ ):
__SCREAMING_SNAKE_CASE = ['''keras_nlp''']
def __init__( self,*__lowerCamelCase,**__lowerCamelCase ):
requires_backends(self,['''keras_nlp'''] )
| 193 | 1 |
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class __A( a ):
@slow
@require_torch
def SCREAMING_SNAKE_CASE_ ( self ) -> int:
'''simple docstring'''
__a = EncoderDecoderModel.from_encoder_decoder_pretrained('''prajjwal1/bert-tiny''' , '''prajjwal1/bert-tiny''' )
__a = BertTokenizer.from_pretrained('''bert-base-uncased''' )
__a = bertabert.config.encoder.vocab_size
__a = tokenizer.sep_token_id
__a = tokenizer.cls_token_id
__a = 128
__a = datasets.load_dataset('''cnn_dailymail''' , '''3.0.0''' , split='''train[:1%]''' )
__a = datasets.load_dataset('''cnn_dailymail''' , '''3.0.0''' , split='''validation[:1%]''' )
__a = train_dataset.select(range(32 ) )
__a = val_dataset.select(range(16 ) )
__a = 4
def _map_to_encoder_decoder_inputs(_snake_case ):
# Tokenizer will automatically set [BOS] <text> [EOS]
__a = tokenizer(batch['''article'''] , padding='''max_length''' , truncation=_snake_case , max_length=512 )
__a = tokenizer(batch['''highlights'''] , padding='''max_length''' , truncation=_snake_case , max_length=128 )
__a = inputs.input_ids
__a = inputs.attention_mask
__a = outputs.input_ids
__a = outputs.input_ids.copy()
__a = [
[-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch['''labels''']
]
__a = outputs.attention_mask
assert all(len(_snake_case ) == 512 for x in inputs.input_ids )
assert all(len(_snake_case ) == 128 for x in outputs.input_ids )
return batch
def _compute_metrics(_snake_case ):
__a = pred.label_ids
__a = pred.predictions
# all unnecessary tokens are removed
__a = tokenizer.batch_decode(_snake_case , skip_special_tokens=_snake_case )
__a = tokenizer.batch_decode(_snake_case , skip_special_tokens=_snake_case )
__a = sum([int(pred_str[i] == label_str[i] ) for i in range(len(_snake_case ) )] ) / len(_snake_case )
return {"accuracy": accuracy}
# map train dataset
__a = train_dataset.map(
_map_to_encoder_decoder_inputs , batched=_snake_case , batch_size=_snake_case , remove_columns=['''article''', '''highlights'''] , )
train_dataset.set_format(
type='''torch''' , columns=['''input_ids''', '''attention_mask''', '''decoder_input_ids''', '''decoder_attention_mask''', '''labels'''] , )
# same for validation dataset
__a = val_dataset.map(
_map_to_encoder_decoder_inputs , batched=_snake_case , batch_size=_snake_case , remove_columns=['''article''', '''highlights'''] , )
val_dataset.set_format(
type='''torch''' , columns=['''input_ids''', '''attention_mask''', '''decoder_input_ids''', '''decoder_attention_mask''', '''labels'''] , )
__a = self.get_auto_remove_tmp_dir()
__a = SeqaSeqTrainingArguments(
output_dir=_snake_case , per_device_train_batch_size=_snake_case , per_device_eval_batch_size=_snake_case , predict_with_generate=_snake_case , evaluation_strategy='''steps''' , do_train=_snake_case , do_eval=_snake_case , warmup_steps=0 , eval_steps=2 , logging_steps=2 , )
# instantiate trainer
__a = SeqaSeqTrainer(
model=_snake_case , args=_snake_case , compute_metrics=_compute_metrics , train_dataset=_snake_case , eval_dataset=_snake_case , tokenizer=_snake_case , )
# start training
trainer.train() | 33 |
from __future__ import annotations
def __lowerCAmelCase ( a__ , a__ = None ) -> list[list[str]]:
__a = word_bank or []
# create a table
__a = len(a__ ) + 1
__a = []
for _ in range(a__ ):
table.append([] )
# seed value
__a = [[]] # because empty string has empty combination
# iterate through the indices
for i in range(a__ ):
# condition
if table[i] != []:
for word in word_bank:
# slice condition
if target[i : i + len(a__ )] == word:
__a = [
[word, *way] for way in table[i]
]
# adds the word to every combination the current position holds
# now,push that combination to the table[i+len(word)]
table[i + len(a__ )] += new_combinations
# combinations are in reverse order so reverse for better output
for combination in table[len(a__ )]:
combination.reverse()
return table[len(a__ )]
if __name__ == "__main__":
print(all_construct('jwajalapa', ['jwa', 'j', 'w', 'a', 'la', 'lapa']))
print(all_construct('rajamati', ['s', 'raj', 'amat', 'raja', 'ma', 'i', 't']))
print(
all_construct(
'hexagonosaurus',
['h', 'ex', 'hex', 'ag', 'ago', 'ru', 'auru', 'rus', 'go', 'no', 'o', 's'],
)
) | 33 | 1 |
'''simple docstring'''
import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
A__ : Any ={
'''return_dict''': False,
'''output_hidden_states''': True,
'''output_attentions''': True,
'''torchscript''': True,
'''torch_dtype''': '''float16''',
'''use_bfloat16''': True,
'''tf_legacy_loss''': True,
'''pruned_heads''': {'''a''': 1},
'''tie_word_embeddings''': False,
'''is_decoder''': True,
'''cross_attention_hidden_size''': 1_28,
'''add_cross_attention''': True,
'''tie_encoder_decoder''': True,
'''max_length''': 50,
'''min_length''': 3,
'''do_sample''': True,
'''early_stopping''': True,
'''num_beams''': 3,
'''num_beam_groups''': 3,
'''diversity_penalty''': 0.5,
'''temperature''': 2.0,
'''top_k''': 10,
'''top_p''': 0.7,
'''typical_p''': 0.2,
'''repetition_penalty''': 0.8,
'''length_penalty''': 0.8,
'''no_repeat_ngram_size''': 5,
'''encoder_no_repeat_ngram_size''': 5,
'''bad_words_ids''': [1, 2, 3],
'''num_return_sequences''': 3,
'''chunk_size_feed_forward''': 5,
'''output_scores''': True,
'''return_dict_in_generate''': True,
'''forced_bos_token_id''': 2,
'''forced_eos_token_id''': 3,
'''remove_invalid_values''': True,
'''architectures''': ['''BertModel'''],
'''finetuning_task''': '''translation''',
'''id2label''': {0: '''label'''},
'''label2id''': {'''label''': '''0'''},
'''tokenizer_class''': '''BertTokenizerFast''',
'''prefix''': '''prefix''',
'''bos_token_id''': 6,
'''pad_token_id''': 7,
'''eos_token_id''': 8,
'''sep_token_id''': 9,
'''decoder_start_token_id''': 10,
'''exponential_decay_length_penalty''': (5, 1.01),
'''suppress_tokens''': [0, 1],
'''begin_suppress_tokens''': 2,
'''task_specific_params''': {'''translation''': '''some_params'''},
'''problem_type''': '''regression''',
}
@is_staging_test
class UpperCAmelCase ( unittest.TestCase ):
@classmethod
def lowercase__ ( cls : Optional[Any] ) -> Tuple:
_lowerCAmelCase = TOKEN
HfFolder.save_token(__snake_case )
@classmethod
def lowercase__ ( cls : Optional[int] ) -> List[str]:
try:
delete_repo(token=cls._token , repo_id="""test-config""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""valid_org/test-config-org""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""test-dynamic-config""" )
except HTTPError:
pass
def lowercase__ ( self : int ) -> Union[str, Any]:
_lowerCAmelCase = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
config.push_to_hub("""test-config""" , use_auth_token=self._token )
_lowerCAmelCase = BertConfig.from_pretrained(f"{USER}/test-config" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__snake_case , getattr(__snake_case , __snake_case ) )
# Reset repo
delete_repo(token=self._token , repo_id="""test-config""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(__snake_case , repo_id="""test-config""" , push_to_hub=__snake_case , use_auth_token=self._token )
_lowerCAmelCase = BertConfig.from_pretrained(f"{USER}/test-config" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__snake_case , getattr(__snake_case , __snake_case ) )
def lowercase__ ( self : Union[str, Any] ) -> List[Any]:
_lowerCAmelCase = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
config.push_to_hub("""valid_org/test-config-org""" , use_auth_token=self._token )
_lowerCAmelCase = BertConfig.from_pretrained("""valid_org/test-config-org""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__snake_case , getattr(__snake_case , __snake_case ) )
# Reset repo
delete_repo(token=self._token , repo_id="""valid_org/test-config-org""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
__snake_case , repo_id="""valid_org/test-config-org""" , push_to_hub=__snake_case , use_auth_token=self._token )
_lowerCAmelCase = BertConfig.from_pretrained("""valid_org/test-config-org""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__snake_case , getattr(__snake_case , __snake_case ) )
def lowercase__ ( self : Dict ) -> Any:
CustomConfig.register_for_auto_class()
_lowerCAmelCase = CustomConfig(attribute=42 )
config.push_to_hub("""test-dynamic-config""" , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map , {"""AutoConfig""": """custom_configuration.CustomConfig"""} )
_lowerCAmelCase = AutoConfig.from_pretrained(f"{USER}/test-dynamic-config" , trust_remote_code=__snake_case )
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__ , """CustomConfig""" )
self.assertEqual(new_config.attribute , 42 )
class UpperCAmelCase ( unittest.TestCase ):
def lowercase__ ( self : List[Any] ) -> Optional[Any]:
_lowerCAmelCase = GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
_lowerCAmelCase = c.n_embd + 1 # int
_lowerCAmelCase = c.resid_pdrop + 1.0 # float
_lowerCAmelCase = not c.scale_attn_weights # bool
_lowerCAmelCase = c.summary_type + """foo""" # str
c.update_from_string(
f"n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}" )
self.assertEqual(__snake_case , c.n_embd , """mismatch for key: n_embd""" )
self.assertEqual(__snake_case , c.resid_pdrop , """mismatch for key: resid_pdrop""" )
self.assertEqual(__snake_case , c.scale_attn_weights , """mismatch for key: scale_attn_weights""" )
self.assertEqual(__snake_case , c.summary_type , """mismatch for key: summary_type""" )
def lowercase__ ( self : Optional[Any] ) -> Any:
_lowerCAmelCase = PretrainedConfig()
_lowerCAmelCase = [key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
__snake_case , ["""is_encoder_decoder""", """_name_or_path""", """_commit_hash""", """transformers_version"""] )
_lowerCAmelCase = [key for key, value in config_common_kwargs.items() if value == getattr(__snake_case , __snake_case )]
if len(__snake_case ) > 0:
raise ValueError(
"""The following keys are set with the default values in"""
""" `test_configuration_common.config_common_kwargs` pick another value for them:"""
f" {', '.join(__snake_case )}." )
def lowercase__ ( self : List[str] ) -> List[Any]:
with self.assertRaises(__snake_case ):
# config is in subfolder, the following should not work without specifying the subfolder
_lowerCAmelCase = BertConfig.from_pretrained("""hf-internal-testing/tiny-random-bert-subfolder""" )
_lowerCAmelCase = BertConfig.from_pretrained("""hf-internal-testing/tiny-random-bert-subfolder""" , subfolder="""bert""" )
self.assertIsNotNone(__snake_case )
def lowercase__ ( self : Optional[Any] ) -> str:
# A mock response for an HTTP head request to emulate server down
_lowerCAmelCase = mock.Mock()
_lowerCAmelCase = 5_00
_lowerCAmelCase = {}
_lowerCAmelCase = HTTPError
_lowerCAmelCase = {}
# Download this model to make sure it's in the cache.
_lowerCAmelCase = BertConfig.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("""requests.Session.request""" , return_value=__snake_case ) as mock_head:
_lowerCAmelCase = BertConfig.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
# This check we did call the fake head request
mock_head.assert_called()
def lowercase__ ( self : str ) -> Union[str, Any]:
# This test is for deprecated behavior and can be removed in v5
_lowerCAmelCase = BertConfig.from_pretrained(
"""https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json""" )
def lowercase__ ( self : str ) -> Union[str, Any]:
_lowerCAmelCase = AutoConfig.from_pretrained("""bert-base-cased""" )
_lowerCAmelCase = ["""config.4.0.0.json"""]
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(__snake_case )
_lowerCAmelCase = 2
json.dump(configuration.to_dict() , open(os.path.join(__snake_case , """config.4.0.0.json""" ) , """w""" ) )
# This should pick the new configuration file as the version of Transformers is > 4.0.0
_lowerCAmelCase = AutoConfig.from_pretrained(__snake_case )
self.assertEqual(new_configuration.hidden_size , 2 )
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
_lowerCAmelCase = ["""config.42.0.0.json"""]
_lowerCAmelCase = 7_68
configuration.save_pretrained(__snake_case )
shutil.move(os.path.join(__snake_case , """config.4.0.0.json""" ) , os.path.join(__snake_case , """config.42.0.0.json""" ) )
_lowerCAmelCase = AutoConfig.from_pretrained(__snake_case )
self.assertEqual(new_configuration.hidden_size , 7_68 )
def lowercase__ ( self : Optional[Any] ) -> Optional[Any]:
# This repo has two configuration files, one for v4.0.0 and above with a different hidden size.
_lowerCAmelCase = """hf-internal-testing/test-two-configs"""
import transformers as new_transformers
_lowerCAmelCase = """v4.0.0"""
_lowerCAmelCase , _lowerCAmelCase = new_transformers.models.auto.AutoConfig.from_pretrained(
__snake_case , return_unused_kwargs=__snake_case )
self.assertEqual(new_configuration.hidden_size , 2 )
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(__snake_case , {} )
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
_lowerCAmelCase = """v3.0.0"""
_lowerCAmelCase = old_transformers.models.auto.AutoConfig.from_pretrained(__snake_case )
self.assertEqual(old_configuration.hidden_size , 7_68 )
| 70 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available
from ...utils import OptionalDependencyNotAvailable
lowerCamelCase_ = {"configuration_dpt": ["DPT_PRETRAINED_CONFIG_ARCHIVE_MAP", "DPTConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = ["DPTFeatureExtractor"]
lowerCamelCase_ = ["DPTImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = [
"DPT_PRETRAINED_MODEL_ARCHIVE_LIST",
"DPTForDepthEstimation",
"DPTForSemanticSegmentation",
"DPTModel",
"DPTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_dpt import DPTFeatureExtractor
from .image_processing_dpt import DPTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dpt import (
DPT_PRETRAINED_MODEL_ARCHIVE_LIST,
DPTForDepthEstimation,
DPTForSemanticSegmentation,
DPTModel,
DPTPreTrainedModel,
)
else:
import sys
lowerCamelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) | 191 | 0 |
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class __a( _a ):
"""simple docstring"""
lowerCAmelCase = (DDPMScheduler,)
def a__ ( self ,**_SCREAMING_SNAKE_CASE ) -> str:
UpperCAmelCase_ : Dict = {
'''num_train_timesteps''': 1_000,
'''beta_start''': 0.00_01,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''variance_type''': '''fixed_small''',
'''clip_sample''': True,
}
config.update(**_SCREAMING_SNAKE_CASE )
return config
def a__ ( self ) -> List[str]:
for timesteps in [1, 5, 100, 1_000]:
self.check_over_configs(num_train_timesteps=_SCREAMING_SNAKE_CASE )
def a__ ( self ) -> Tuple:
for beta_start, beta_end in zip([0.00_01, 0.0_01, 0.01, 0.1] ,[0.0_02, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=_SCREAMING_SNAKE_CASE ,beta_end=_SCREAMING_SNAKE_CASE )
def a__ ( self ) -> Union[str, Any]:
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=_SCREAMING_SNAKE_CASE )
def a__ ( self ) -> Optional[Any]:
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=_SCREAMING_SNAKE_CASE )
def a__ ( self ) -> Any:
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=_SCREAMING_SNAKE_CASE )
def a__ ( self ) -> Tuple:
self.check_over_configs(thresholding=_SCREAMING_SNAKE_CASE )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=_SCREAMING_SNAKE_CASE ,prediction_type=_SCREAMING_SNAKE_CASE ,sample_max_value=_SCREAMING_SNAKE_CASE ,)
def a__ ( self ) -> Tuple:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=_SCREAMING_SNAKE_CASE )
def a__ ( self ) -> Tuple:
for t in [0, 500, 999]:
self.check_over_forward(time_step=_SCREAMING_SNAKE_CASE )
def a__ ( self ) -> int:
UpperCAmelCase_ : Any = self.scheduler_classes[0]
UpperCAmelCase_ : str = self.get_scheduler_config()
UpperCAmelCase_ : Optional[int] = scheduler_class(**_SCREAMING_SNAKE_CASE )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_09_79 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.02 ) ) < 1e-5
def a__ ( self ) -> Optional[int]:
UpperCAmelCase_ : Optional[int] = self.scheduler_classes[0]
UpperCAmelCase_ : List[str] = self.get_scheduler_config()
UpperCAmelCase_ : List[Any] = scheduler_class(**_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Union[str, Any] = len(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : int = self.dummy_model()
UpperCAmelCase_ : Dict = self.dummy_sample_deter
UpperCAmelCase_ : int = torch.manual_seed(0 )
for t in reversed(range(_SCREAMING_SNAKE_CASE ) ):
# 1. predict noise residual
UpperCAmelCase_ : Any = model(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
# 2. predict previous mean of sample x_t-1
UpperCAmelCase_ : int = scheduler.step(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,generator=_SCREAMING_SNAKE_CASE ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
UpperCAmelCase_ : str = pred_prev_sample
UpperCAmelCase_ : Any = torch.sum(torch.abs(_SCREAMING_SNAKE_CASE ) )
UpperCAmelCase_ : Any = torch.mean(torch.abs(_SCREAMING_SNAKE_CASE ) )
assert abs(result_sum.item() - 2_58.96_06 ) < 1e-2
assert abs(result_mean.item() - 0.33_72 ) < 1e-3
def a__ ( self ) -> List[Any]:
UpperCAmelCase_ : Dict = self.scheduler_classes[0]
UpperCAmelCase_ : List[Any] = self.get_scheduler_config(prediction_type='''v_prediction''' )
UpperCAmelCase_ : int = scheduler_class(**_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[str] = len(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Any = self.dummy_model()
UpperCAmelCase_ : Optional[int] = self.dummy_sample_deter
UpperCAmelCase_ : int = torch.manual_seed(0 )
for t in reversed(range(_SCREAMING_SNAKE_CASE ) ):
# 1. predict noise residual
UpperCAmelCase_ : Optional[Any] = model(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
# 2. predict previous mean of sample x_t-1
UpperCAmelCase_ : Dict = scheduler.step(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,generator=_SCREAMING_SNAKE_CASE ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
UpperCAmelCase_ : str = pred_prev_sample
UpperCAmelCase_ : Any = torch.sum(torch.abs(_SCREAMING_SNAKE_CASE ) )
UpperCAmelCase_ : Dict = torch.mean(torch.abs(_SCREAMING_SNAKE_CASE ) )
assert abs(result_sum.item() - 2_02.02_96 ) < 1e-2
assert abs(result_mean.item() - 0.26_31 ) < 1e-3
def a__ ( self ) -> Union[str, Any]:
UpperCAmelCase_ : Union[str, Any] = self.scheduler_classes[0]
UpperCAmelCase_ : List[str] = self.get_scheduler_config()
UpperCAmelCase_ : Optional[int] = scheduler_class(**_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : str = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Dict = scheduler.timesteps
for i, timestep in enumerate(_SCREAMING_SNAKE_CASE ):
if i == len(_SCREAMING_SNAKE_CASE ) - 1:
UpperCAmelCase_ : str = -1
else:
UpperCAmelCase_ : Tuple = timesteps[i + 1]
UpperCAmelCase_ : Union[str, Any] = scheduler.previous_timestep(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[Any] = prev_t.item()
self.assertEqual(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
def a__ ( self ) -> Any:
UpperCAmelCase_ : Optional[Any] = self.scheduler_classes[0]
UpperCAmelCase_ : Tuple = self.get_scheduler_config()
UpperCAmelCase_ : Optional[int] = scheduler_class(**_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Any = [100, 87, 50, 51, 0]
with self.assertRaises(_SCREAMING_SNAKE_CASE ,msg='''`custom_timesteps` must be in descending order.''' ):
scheduler.set_timesteps(timesteps=_SCREAMING_SNAKE_CASE )
def a__ ( self ) -> Optional[int]:
UpperCAmelCase_ : Optional[Any] = self.scheduler_classes[0]
UpperCAmelCase_ : str = self.get_scheduler_config()
UpperCAmelCase_ : Dict = scheduler_class(**_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Tuple = [100, 87, 50, 1, 0]
UpperCAmelCase_ : List[Any] = len(_SCREAMING_SNAKE_CASE )
with self.assertRaises(_SCREAMING_SNAKE_CASE ,msg='''Can only pass one of `num_inference_steps` or `custom_timesteps`.''' ):
scheduler.set_timesteps(num_inference_steps=_SCREAMING_SNAKE_CASE ,timesteps=_SCREAMING_SNAKE_CASE )
def a__ ( self ) -> List[str]:
UpperCAmelCase_ : List[Any] = self.scheduler_classes[0]
UpperCAmelCase_ : Any = self.get_scheduler_config()
UpperCAmelCase_ : List[Any] = scheduler_class(**_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Tuple = [scheduler.config.num_train_timesteps]
with self.assertRaises(
_SCREAMING_SNAKE_CASE ,msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' ,):
scheduler.set_timesteps(timesteps=_SCREAMING_SNAKE_CASE ) | 235 |
import argparse
import logging
import sys
from unittest.mock import patch
import run_glue_deebert
from transformers.testing_utils import TestCasePlus, get_gpu_count, require_torch_non_multi_gpu, slow
logging.basicConfig(level=logging.DEBUG)
__a = logging.getLogger()
def lowerCamelCase__ ( ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = argparse.ArgumentParser()
parser.add_argument('''-f''' )
UpperCAmelCase_ : Dict = parser.parse_args()
return args.f
class __a( _a ):
"""simple docstring"""
def a__ ( self ) -> None:
UpperCAmelCase_ : int = logging.StreamHandler(sys.stdout )
logger.addHandler(_SCREAMING_SNAKE_CASE )
def a__ ( self ,_SCREAMING_SNAKE_CASE ) -> str:
UpperCAmelCase_ : int = get_gpu_count()
if n_gpu > 1:
pass
# XXX: doesn't quite work with n_gpu > 1 https://github.com/huggingface/transformers/issues/10560
# script = f"{self.examples_dir_str}/research_projects/deebert/run_glue_deebert.py"
# distributed_args = f"-m torch.distributed.launch --nproc_per_node={n_gpu} {script}".split()
# cmd = [sys.executable] + distributed_args + args
# execute_subprocess_async(cmd, env=self.get_env())
# XXX: test the results - need to save them first into .json file
else:
args.insert(0 ,'''run_glue_deebert.py''' )
with patch.object(_SCREAMING_SNAKE_CASE ,'''argv''' ,_SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ : List[str] = run_glue_deebert.main()
for value in result.values():
self.assertGreaterEqual(_SCREAMING_SNAKE_CASE ,0.6_66 )
@slow
@require_torch_non_multi_gpu
def a__ ( self ) -> List[str]:
UpperCAmelCase_ : List[Any] = '''
--model_type roberta
--model_name_or_path roberta-base
--task_name MRPC
--do_train
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--max_seq_length 128
--per_gpu_eval_batch_size=1
--per_gpu_train_batch_size=8
--learning_rate 2e-4
--num_train_epochs 3
--overwrite_output_dir
--seed 42
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--save_steps 0
--overwrite_cache
--eval_after_first_stage
'''.split()
self.run_and_check(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Union[str, Any] = '''
--model_type roberta
--model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--task_name MRPC
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--max_seq_length 128
--eval_each_highway
--eval_highway
--overwrite_cache
--per_gpu_eval_batch_size=1
'''.split()
self.run_and_check(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Any = '''
--model_type roberta
--model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--task_name MRPC
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--max_seq_length 128
--early_exit_entropy 0.1
--eval_highway
--overwrite_cache
--per_gpu_eval_batch_size=1
'''.split()
self.run_and_check(_SCREAMING_SNAKE_CASE ) | 235 | 1 |
def lowercase ( SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Any:
_snake_case : Optional[int] = []
_snake_case : List[Any] = set({"""(""", """[""", """{"""} )
_snake_case : str = set({""")""", """]""", """}"""} )
_snake_case : str = {"""{""": """}""", """[""": """]""", """(""": """)"""}
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
if s[i] in open_brackets:
stack.append(s[i] )
elif s[i] in closed_brackets and (
len(SCREAMING_SNAKE_CASE__ ) == 0 or (len(SCREAMING_SNAKE_CASE__ ) > 0 and open_to_closed[stack.pop()] != s[i])
):
return False
return len(SCREAMING_SNAKE_CASE__ ) == 0
def lowercase ( ) -> Optional[Any]:
_snake_case : int = input("""Enter sequence of brackets: """ )
if is_balanced(SCREAMING_SNAKE_CASE__ ):
print(SCREAMING_SNAKE_CASE__ , """is balanced""" )
else:
print(SCREAMING_SNAKE_CASE__ , """is not balanced""" )
if __name__ == "__main__":
main()
| 317 |
def lowercase ( SCREAMING_SNAKE_CASE__ : str ) -> list:
_snake_case : Optional[Any] = [0] * len(SCREAMING_SNAKE_CASE__ )
for i in range(1 , len(SCREAMING_SNAKE_CASE__ ) ):
# use last results for better performance - dynamic programming
_snake_case : Optional[Any] = prefix_result[i - 1]
while j > 0 and input_string[i] != input_string[j]:
_snake_case : List[Any] = prefix_result[j - 1]
if input_string[i] == input_string[j]:
j += 1
_snake_case : Optional[int] = j
return prefix_result
def lowercase ( SCREAMING_SNAKE_CASE__ : str ) -> int:
return max(prefix_function(SCREAMING_SNAKE_CASE__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 317 | 1 |
import heapq
def A (__A : dict ) -> set[int]:
"""simple docstring"""
UpperCAmelCase_ = []
# for each node and his adjacency list add them and the rank of the node to queue
# using heapq module the queue will be filled like a Priority Queue
# heapq works with a min priority queue, so I used -1*len(v) to build it
for key, value in graph.items():
# O(log(n))
heapq.heappush(__A , [-1 * len(__A ), (key, value)] )
# chosen_vertices = set of chosen vertices
UpperCAmelCase_ = set()
# while queue isn't empty and there are still edges
# (queue[0][0] is the rank of the node with max rank)
while queue and queue[0][0] != 0:
# extract vertex with max rank from queue and add it to chosen_vertices
UpperCAmelCase_ = heapq.heappop(__A )[1][0]
chosen_vertices.add(__A )
# Remove all arcs adjacent to argmax
for elem in queue:
# if v haven't adjacent node, skip
if elem[0] == 0:
continue
# if argmax is reachable from elem
# remove argmax from elem's adjacent list and update his rank
if argmax in elem[1][1]:
UpperCAmelCase_ = elem[1][1].index(__A )
del elem[1][1][index]
elem[0] += 1
# re-order the queue
heapq.heapify(__A )
return chosen_vertices
if __name__ == "__main__":
import doctest
doctest.testmod()
snake_case_ : Optional[int] = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
print(f"Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}")
| 7 |
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_torch_available
from transformers.testing_utils import require_torch, torch_device
if is_torch_available():
from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments
@require_torch
class __snake_case ( unittest.TestCase ):
def lowerCamelCase ( self : Optional[int] , _snake_case : Union[str, Any]):
"""simple docstring"""
for model_result in results.values():
for batch_size, sequence_length in zip(model_result['''bs'''] , model_result['''ss''']):
UpperCAmelCase_ = model_result['''result'''][batch_size][sequence_length]
self.assertIsNotNone(_snake_case)
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = '''sshleifer/tiny-gpt2'''
UpperCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_snake_case , inference=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_snake_case , )
UpperCAmelCase_ = PyTorchBenchmark(_snake_case)
UpperCAmelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def lowerCamelCase ( self : str):
"""simple docstring"""
UpperCAmelCase_ = '''sgugger/tiny-distilbert-classification'''
UpperCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_snake_case , inference=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_snake_case , only_pretrain_model=_snake_case , )
UpperCAmelCase_ = PyTorchBenchmark(_snake_case)
UpperCAmelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
UpperCAmelCase_ = '''sshleifer/tiny-gpt2'''
UpperCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_snake_case , inference=_snake_case , torchscript=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_snake_case , )
UpperCAmelCase_ = PyTorchBenchmark(_snake_case)
UpperCAmelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
@unittest.skipIf(torch_device == '''cpu''' , '''Cant do half precision''')
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
UpperCAmelCase_ = '''sshleifer/tiny-gpt2'''
UpperCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_snake_case , inference=_snake_case , fpaa=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_snake_case , )
UpperCAmelCase_ = PyTorchBenchmark(_snake_case)
UpperCAmelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = '''sshleifer/tiny-gpt2'''
UpperCAmelCase_ = AutoConfig.from_pretrained(_snake_case)
# set architectures equal to `None`
UpperCAmelCase_ = None
UpperCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_snake_case , inference=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_snake_case , )
UpperCAmelCase_ = PyTorchBenchmark(_snake_case , configs=[config])
UpperCAmelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def lowerCamelCase ( self : Dict):
"""simple docstring"""
UpperCAmelCase_ = '''sshleifer/tiny-gpt2'''
UpperCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_snake_case , inference=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_snake_case , )
UpperCAmelCase_ = PyTorchBenchmark(_snake_case)
UpperCAmelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result)
self.check_results_dict_not_empty(results.memory_train_result)
@unittest.skipIf(torch_device == '''cpu''' , '''Can\'t do half precision''')
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = '''sshleifer/tiny-gpt2'''
UpperCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_snake_case , inference=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , fpaa=_snake_case , multi_process=_snake_case , )
UpperCAmelCase_ = PyTorchBenchmark(_snake_case)
UpperCAmelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result)
self.check_results_dict_not_empty(results.memory_train_result)
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
UpperCAmelCase_ = '''sshleifer/tiny-gpt2'''
UpperCAmelCase_ = AutoConfig.from_pretrained(_snake_case)
UpperCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_snake_case , inference=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_snake_case , )
UpperCAmelCase_ = PyTorchBenchmark(_snake_case , configs=[config])
UpperCAmelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def lowerCamelCase ( self : Dict):
"""simple docstring"""
UpperCAmelCase_ = '''sshleifer/tinier_bart'''
UpperCAmelCase_ = AutoConfig.from_pretrained(_snake_case)
UpperCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_snake_case , inference=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_snake_case , )
UpperCAmelCase_ = PyTorchBenchmark(_snake_case , configs=[config])
UpperCAmelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def lowerCamelCase ( self : Dict):
"""simple docstring"""
UpperCAmelCase_ = '''sshleifer/tiny-gpt2'''
UpperCAmelCase_ = AutoConfig.from_pretrained(_snake_case)
UpperCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_snake_case , inference=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_snake_case , )
UpperCAmelCase_ = PyTorchBenchmark(_snake_case , configs=[config])
UpperCAmelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result)
self.check_results_dict_not_empty(results.memory_train_result)
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
UpperCAmelCase_ = '''sshleifer/tinier_bart'''
UpperCAmelCase_ = AutoConfig.from_pretrained(_snake_case)
UpperCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_snake_case , inference=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_snake_case , )
UpperCAmelCase_ = PyTorchBenchmark(_snake_case , configs=[config])
UpperCAmelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result)
self.check_results_dict_not_empty(results.memory_train_result)
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
UpperCAmelCase_ = '''sshleifer/tiny-gpt2'''
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_snake_case , inference=_snake_case , save_to_csv=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(_snake_case , '''inf_time.csv''') , train_memory_csv_file=os.path.join(_snake_case , '''train_mem.csv''') , inference_memory_csv_file=os.path.join(_snake_case , '''inf_mem.csv''') , train_time_csv_file=os.path.join(_snake_case , '''train_time.csv''') , env_info_csv_file=os.path.join(_snake_case , '''env.csv''') , multi_process=_snake_case , )
UpperCAmelCase_ = PyTorchBenchmark(_snake_case)
benchmark.run()
self.assertTrue(Path(os.path.join(_snake_case , '''inf_time.csv''')).exists())
self.assertTrue(Path(os.path.join(_snake_case , '''train_time.csv''')).exists())
self.assertTrue(Path(os.path.join(_snake_case , '''inf_mem.csv''')).exists())
self.assertTrue(Path(os.path.join(_snake_case , '''train_mem.csv''')).exists())
self.assertTrue(Path(os.path.join(_snake_case , '''env.csv''')).exists())
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
UpperCAmelCase_ = '''sshleifer/tiny-gpt2'''
def _check_summary_is_not_empty(_snake_case : Tuple):
self.assertTrue(hasattr(_snake_case , '''sequential'''))
self.assertTrue(hasattr(_snake_case , '''cumulative'''))
self.assertTrue(hasattr(_snake_case , '''current'''))
self.assertTrue(hasattr(_snake_case , '''total'''))
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_snake_case , inference=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(_snake_case , '''log.txt''') , log_print=_snake_case , trace_memory_line_by_line=_snake_case , multi_process=_snake_case , )
UpperCAmelCase_ = PyTorchBenchmark(_snake_case)
UpperCAmelCase_ = benchmark.run()
_check_summary_is_not_empty(result.inference_summary)
_check_summary_is_not_empty(result.train_summary)
self.assertTrue(Path(os.path.join(_snake_case , '''log.txt''')).exists())
| 7 | 1 |
# flake8: noqa
# Lint as: python3
lowerCAmelCase__ :int = [
"VerificationMode",
"Version",
"disable_progress_bar",
"enable_progress_bar",
"is_progress_bar_enabled",
"experimental",
]
from .info_utils import VerificationMode
from .logging import disable_progress_bar, enable_progress_bar, is_progress_bar_enabled
from .version import Version
from .experimental import experimental
| 329 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import XLMRobertaTokenizer
from diffusers import (
AltDiffusionImgaImgPipeline,
AutoencoderKL,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class _lowerCamelCase( unittest.TestCase ):
def UpperCamelCase ( self) -> Optional[Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
_lowercase : Optional[Any] = 1
_lowercase : Any = 3
_lowercase : Tuple = (32, 32)
_lowercase : Tuple = floats_tensor((batch_size, num_channels) + sizes, rng=random.Random(0)).to(lowerCamelCase)
return image
@property
def UpperCamelCase ( self) -> str:
"""simple docstring"""
torch.manual_seed(0)
_lowercase : Dict = UNetaDConditionModel(
block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D'), up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D'), cross_attention_dim=32, )
return model
@property
def UpperCamelCase ( self) -> List[Any]:
"""simple docstring"""
torch.manual_seed(0)
_lowercase : str = AutoencoderKL(
block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'], up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'], latent_channels=4, )
return model
@property
def UpperCamelCase ( self) -> Optional[Any]:
"""simple docstring"""
torch.manual_seed(0)
_lowercase : Optional[int] = RobertaSeriesConfig(
hidden_size=32, project_dim=32, intermediate_size=37, layer_norm_eps=1E-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=50_06, )
return RobertaSeriesModelWithTransformation(lowerCamelCase)
@property
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
def extract(*lowerCamelCase, **lowerCamelCase):
class _lowerCamelCase:
def __init__( self) -> Optional[Any]:
"""simple docstring"""
_lowercase : Optional[int] = torch.ones([0])
def UpperCamelCase ( self, lowerCamelCase) -> int:
"""simple docstring"""
self.pixel_values.to(lowerCamelCase)
return self
return Out()
return extract
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
_lowercase : Any = 'cpu' # ensure determinism for the device-dependent torch.Generator
_lowercase : List[Any] = self.dummy_cond_unet
_lowercase : Union[str, Any] = PNDMScheduler(skip_prk_steps=lowerCamelCase)
_lowercase : Optional[Any] = self.dummy_vae
_lowercase : List[Any] = self.dummy_text_encoder
_lowercase : Any = XLMRobertaTokenizer.from_pretrained('hf-internal-testing/tiny-xlm-roberta')
_lowercase : Tuple = 77
_lowercase : int = self.dummy_image.to(lowerCamelCase)
_lowercase : int = init_image / 2 + 0.5
# make sure here that pndm scheduler skips prk
_lowercase : Union[str, Any] = AltDiffusionImgaImgPipeline(
unet=lowerCamelCase, scheduler=lowerCamelCase, vae=lowerCamelCase, text_encoder=lowerCamelCase, tokenizer=lowerCamelCase, safety_checker=lowerCamelCase, feature_extractor=self.dummy_extractor, )
_lowercase : List[Any] = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor, do_normalize=lowerCamelCase)
_lowercase : Optional[int] = alt_pipe.to(lowerCamelCase)
alt_pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : Optional[Any] = 'A painting of a squirrel eating a burger'
_lowercase : Dict = torch.Generator(device=lowerCamelCase).manual_seed(0)
_lowercase : Any = alt_pipe(
[prompt], generator=lowerCamelCase, guidance_scale=6.0, num_inference_steps=2, output_type='np', image=lowerCamelCase, )
_lowercase : Optional[int] = output.images
_lowercase : Optional[Any] = torch.Generator(device=lowerCamelCase).manual_seed(0)
_lowercase : Optional[Any] = alt_pipe(
[prompt], generator=lowerCamelCase, guidance_scale=6.0, num_inference_steps=2, output_type='np', image=lowerCamelCase, return_dict=lowerCamelCase, )[0]
_lowercase : Optional[int] = image[0, -3:, -3:, -1]
_lowercase : Dict = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_lowercase : int = np.array([0.4_4_2_7, 0.3_7_3_1, 0.4_2_4_9, 0.4_9_4_1, 0.4_5_4_6, 0.4_1_4_8, 0.4_1_9_3, 0.4_6_6_6, 0.4_4_9_9])
assert np.abs(image_slice.flatten() - expected_slice).max() < 5E-3
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 5E-3
@unittest.skipIf(torch_device != 'cuda', 'This test requires a GPU')
def UpperCamelCase ( self) -> str:
"""simple docstring"""
_lowercase : List[Any] = self.dummy_cond_unet
_lowercase : Tuple = PNDMScheduler(skip_prk_steps=lowerCamelCase)
_lowercase : str = self.dummy_vae
_lowercase : Optional[Any] = self.dummy_text_encoder
_lowercase : Optional[Any] = XLMRobertaTokenizer.from_pretrained('hf-internal-testing/tiny-xlm-roberta')
_lowercase : Optional[Any] = 77
_lowercase : str = self.dummy_image.to(lowerCamelCase)
# put models in fp16
_lowercase : List[str] = unet.half()
_lowercase : List[Any] = vae.half()
_lowercase : Any = bert.half()
# make sure here that pndm scheduler skips prk
_lowercase : Union[str, Any] = AltDiffusionImgaImgPipeline(
unet=lowerCamelCase, scheduler=lowerCamelCase, vae=lowerCamelCase, text_encoder=lowerCamelCase, tokenizer=lowerCamelCase, safety_checker=lowerCamelCase, feature_extractor=self.dummy_extractor, )
_lowercase : List[str] = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor, do_normalize=lowerCamelCase)
_lowercase : Any = alt_pipe.to(lowerCamelCase)
alt_pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : int = 'A painting of a squirrel eating a burger'
_lowercase : Optional[Any] = torch.manual_seed(0)
_lowercase : Union[str, Any] = alt_pipe(
[prompt], generator=lowerCamelCase, num_inference_steps=2, output_type='np', image=lowerCamelCase, ).images
assert image.shape == (1, 32, 32, 3)
@unittest.skipIf(torch_device != 'cuda', 'This test requires a GPU')
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
_lowercase : int = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg')
# resize to resolution that is divisible by 8 but not 16 or 32
_lowercase : str = init_image.resize((7_60, 5_04))
_lowercase : Optional[int] = 'BAAI/AltDiffusion'
_lowercase : str = AltDiffusionImgaImgPipeline.from_pretrained(
lowerCamelCase, safety_checker=lowerCamelCase, )
pipe.to(lowerCamelCase)
pipe.set_progress_bar_config(disable=lowerCamelCase)
pipe.enable_attention_slicing()
_lowercase : List[str] = 'A fantasy landscape, trending on artstation'
_lowercase : Any = torch.manual_seed(0)
_lowercase : Dict = pipe(
prompt=lowerCamelCase, image=lowerCamelCase, strength=0.7_5, guidance_scale=7.5, generator=lowerCamelCase, output_type='np', )
_lowercase : List[str] = output.images[0]
_lowercase : Tuple = image[2_55:2_58, 3_83:3_86, -1]
assert image.shape == (5_04, 7_60, 3)
_lowercase : Optional[Any] = np.array([0.9_3_5_8, 0.9_3_9_7, 0.9_5_9_9, 0.9_9_0_1, 1.0_0_0_0, 1.0_0_0_0, 0.9_8_8_2, 1.0_0_0_0, 1.0_0_0_0])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
@slow
@require_torch_gpu
class _lowerCamelCase( unittest.TestCase ):
def UpperCamelCase ( self) -> Optional[Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase ( self) -> List[Any]:
"""simple docstring"""
_lowercase : Union[str, Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg')
_lowercase : str = init_image.resize((7_68, 5_12))
_lowercase : Any = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy')
_lowercase : str = 'BAAI/AltDiffusion'
_lowercase : Optional[Any] = AltDiffusionImgaImgPipeline.from_pretrained(
lowerCamelCase, safety_checker=lowerCamelCase, )
pipe.to(lowerCamelCase)
pipe.set_progress_bar_config(disable=lowerCamelCase)
pipe.enable_attention_slicing()
_lowercase : int = 'A fantasy landscape, trending on artstation'
_lowercase : List[Any] = torch.manual_seed(0)
_lowercase : int = pipe(
prompt=lowerCamelCase, image=lowerCamelCase, strength=0.7_5, guidance_scale=7.5, generator=lowerCamelCase, output_type='np', )
_lowercase : Union[str, Any] = output.images[0]
assert image.shape == (5_12, 7_68, 3)
# img2img is flaky across GPUs even in fp32, so using MAE here
assert np.abs(expected_image - image).max() < 1E-2
| 21 | 0 |
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFGPTaLMHeadModel, is_keras_nlp_available, is_tf_available
from transformers.models.gpta.tokenization_gpta import GPTaTokenizer
from transformers.testing_utils import require_keras_nlp, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_keras_nlp_available():
from transformers.models.gpta import TFGPTaTokenizer
A_ : Union[str, Any] = ['gpt2']
A_ : Any = 'gpt2'
if is_tf_available():
class _lowerCAmelCase( tf.Module ):
"""simple docstring"""
def __init__( self , _lowerCamelCase ):
super().__init__()
UpperCamelCase_: Optional[Any] = tokenizer
UpperCamelCase_: List[Any] = AutoConfig.from_pretrained(_UpperCAmelCase )
UpperCamelCase_: List[str] = TFGPTaLMHeadModel.from_config(_UpperCAmelCase )
@tf.function(input_signature=(tf.TensorSpec((None,) , tf.string , name='text' ),) )
def _a ( self , _lowerCamelCase ):
UpperCamelCase_: Optional[int] = self.tokenizer(_UpperCAmelCase )
UpperCamelCase_: List[Any] = tokenized['input_ids'].to_tensor()
UpperCamelCase_: List[Any] = tf.cast(input_ids_dense > 0 , tf.intaa )
# input_mask = tf.reshape(input_mask, [-1, MAX_SEQ_LEN])
UpperCamelCase_: Optional[int] = self.model(input_ids=_UpperCAmelCase , attention_mask=_UpperCAmelCase )['logits']
return outputs
@require_tf
@require_keras_nlp
class _lowerCAmelCase( unittest.TestCase ):
"""simple docstring"""
def _a ( self ):
super().setUp()
UpperCamelCase_: str = [GPTaTokenizer.from_pretrained(_UpperCAmelCase ) for checkpoint in (TOKENIZER_CHECKPOINTS)]
UpperCamelCase_: int = [TFGPTaTokenizer.from_pretrained(_UpperCAmelCase ) for checkpoint in TOKENIZER_CHECKPOINTS]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
UpperCamelCase_: str = [
'This is a straightforward English test sentence.',
'This one has some weird characters\rto\nsee\r\nif those\u00E9break things.',
'Now we\'re going to add some Chinese: 一 二 三 一二三',
'And some much more rare Chinese: 齉 堃 齉堃',
'Je vais aussi écrire en français pour tester les accents',
'Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ',
]
UpperCamelCase_: Tuple = list(zip(self.test_sentences , self.test_sentences[::-1] ) )
def _a ( self ):
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ):
for test_inputs in self.test_sentences:
UpperCamelCase_: Union[str, Any] = tokenizer([test_inputs] , return_tensors='tf' )
UpperCamelCase_: Any = tf_tokenizer([test_inputs] )
for key in python_outputs.keys():
# convert them to numpy to avoid messing with ragged tensors
UpperCamelCase_: List[str] = python_outputs[key].numpy()
UpperCamelCase_: Optional[int] = tf_outputs[key].numpy()
self.assertTrue(tf.reduce_all(python_outputs_values.shape == tf_outputs_values.shape ) )
self.assertTrue(tf.reduce_all(tf.cast(_UpperCAmelCase , tf.intaa ) == tf_outputs_values ) )
@slow
def _a ( self ):
for tf_tokenizer in self.tf_tokenizers:
UpperCamelCase_: str = tf.function(_UpperCAmelCase )
for test_inputs in self.test_sentences:
UpperCamelCase_: List[str] = tf.constant(_UpperCAmelCase )
UpperCamelCase_: Optional[int] = compiled_tokenizer(_UpperCAmelCase )
UpperCamelCase_: Tuple = tf_tokenizer(_UpperCAmelCase )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def _a ( self ):
for tf_tokenizer in self.tf_tokenizers:
UpperCamelCase_: str = ModelToSave(tokenizer=_UpperCAmelCase )
UpperCamelCase_: List[Any] = tf.convert_to_tensor([self.test_sentences[0]] )
UpperCamelCase_: List[Any] = model.serving(_UpperCAmelCase ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
UpperCamelCase_: Tuple = Path(_UpperCAmelCase ) / 'saved.model'
tf.saved_model.save(_UpperCAmelCase , _UpperCAmelCase , signatures={'serving_default': model.serving} )
UpperCamelCase_: List[str] = tf.saved_model.load(_UpperCAmelCase )
UpperCamelCase_: List[str] = loaded_model.signatures['serving_default'](_UpperCAmelCase )['output_0']
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertTrue(tf.reduce_all(out == loaded_output ) )
@slow
def _a ( self ):
for tf_tokenizer in self.tf_tokenizers:
UpperCamelCase_: str = tf.convert_to_tensor([self.test_sentences[0]] )
UpperCamelCase_: Optional[Any] = tf_tokenizer(_UpperCAmelCase ) # Build model with some sample inputs
UpperCamelCase_: Tuple = tf_tokenizer.get_config()
UpperCamelCase_: List[str] = TFGPTaTokenizer.from_config(_UpperCAmelCase )
UpperCamelCase_: int = model_from_config(_UpperCAmelCase )
for key in from_config_output.keys():
self.assertTrue(tf.reduce_all(from_config_output[key] == out[key] ) )
@slow
def _a ( self ):
for tf_tokenizer in self.tf_tokenizers:
# for the test to run
UpperCamelCase_: List[str] = 1_2_3_1_2_3
for max_length in [3, 5, 1_0_2_4]:
UpperCamelCase_: List[Any] = tf.convert_to_tensor([self.test_sentences[0]] )
UpperCamelCase_: List[Any] = tf_tokenizer(_UpperCAmelCase , max_length=_UpperCAmelCase )
UpperCamelCase_: Tuple = out['input_ids'].numpy().shape[1]
assert out_length == max_length | 369 |
import datetime
import platform
import subprocess
from typing import Optional, Tuple, Union
import numpy as np
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ ) -> np.array:
UpperCamelCase_: Dict = F'''{sampling_rate}'''
UpperCamelCase_: Any = '1'
UpperCamelCase_: Any = 'f32le'
UpperCamelCase_: Union[str, Any] = [
'ffmpeg',
'-i',
'pipe:0',
'-ac',
ac,
'-ar',
ar,
'-f',
format_for_conversion,
'-hide_banner',
'-loglevel',
'quiet',
'pipe:1',
]
try:
with subprocess.Popen(UpperCAmelCase__ , stdin=subprocess.PIPE , stdout=subprocess.PIPE ) as ffmpeg_process:
UpperCamelCase_: Optional[Any] = ffmpeg_process.communicate(UpperCAmelCase__ )
except FileNotFoundError as error:
raise ValueError('ffmpeg was not found but is required to load audio files from filename' ) from error
UpperCamelCase_: Union[str, Any] = output_stream[0]
UpperCamelCase_: List[str] = np.frombuffer(UpperCAmelCase__ , np.floataa )
if audio.shape[0] == 0:
raise ValueError('Malformed soundfile' )
return audio
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = "f32le" , ) -> Tuple:
UpperCamelCase_: Any = F'''{sampling_rate}'''
UpperCamelCase_: Union[str, Any] = '1'
if format_for_conversion == "s16le":
UpperCamelCase_: Optional[Any] = 2
elif format_for_conversion == "f32le":
UpperCamelCase_: Any = 4
else:
raise ValueError(F'''Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`''' )
UpperCamelCase_: int = platform.system()
if system == "Linux":
UpperCamelCase_: Tuple = 'alsa'
UpperCamelCase_: List[str] = 'default'
elif system == "Darwin":
UpperCamelCase_: int = 'avfoundation'
UpperCamelCase_: Union[str, Any] = ':0'
elif system == "Windows":
UpperCamelCase_: Tuple = 'dshow'
UpperCamelCase_: Dict = 'default'
UpperCamelCase_: Any = [
'ffmpeg',
'-f',
format_,
'-i',
input_,
'-ac',
ac,
'-ar',
ar,
'-f',
format_for_conversion,
'-fflags',
'nobuffer',
'-hide_banner',
'-loglevel',
'quiet',
'pipe:1',
]
UpperCamelCase_: Tuple = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
UpperCamelCase_: Optional[int] = _ffmpeg_stream(UpperCAmelCase__ , UpperCAmelCase__ )
for item in iterator:
yield item
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = None , UpperCAmelCase__ = None , UpperCAmelCase__ = "f32le" , ) -> Any:
if stream_chunk_s is not None:
UpperCamelCase_: List[Any] = stream_chunk_s
else:
UpperCamelCase_: Dict = chunk_length_s
UpperCamelCase_: List[str] = ffmpeg_microphone(UpperCAmelCase__ , UpperCAmelCase__ , format_for_conversion=UpperCAmelCase__ )
if format_for_conversion == "s16le":
UpperCamelCase_: Union[str, Any] = np.intaa
UpperCamelCase_: List[Any] = 2
elif format_for_conversion == "f32le":
UpperCamelCase_: str = np.floataa
UpperCamelCase_: Tuple = 4
else:
raise ValueError(F'''Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`''' )
if stride_length_s is None:
UpperCamelCase_: int = chunk_length_s / 6
UpperCamelCase_: Tuple = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
if isinstance(UpperCAmelCase__ , (int, float) ):
UpperCamelCase_: Union[str, Any] = [stride_length_s, stride_length_s]
UpperCamelCase_: Any = int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample
UpperCamelCase_: Dict = int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample
UpperCamelCase_: Optional[int] = datetime.datetime.now()
UpperCamelCase_: Optional[int] = datetime.timedelta(seconds=UpperCAmelCase__ )
for item in chunk_bytes_iter(UpperCAmelCase__ , UpperCAmelCase__ , stride=(stride_left, stride_right) , stream=UpperCAmelCase__ ):
# Put everything back in numpy scale
UpperCamelCase_: Tuple = np.frombuffer(item['raw'] , dtype=UpperCAmelCase__ )
UpperCamelCase_: Optional[int] = (
item['stride'][0] // size_of_sample,
item['stride'][1] // size_of_sample,
)
UpperCamelCase_: int = sampling_rate
audio_time += delta
if datetime.datetime.now() > audio_time + 1_0 * delta:
# We're late !! SKIP
continue
yield item
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = False ) -> int:
UpperCamelCase_: str = b''
UpperCamelCase_ ,UpperCamelCase_: Union[str, Any] = stride
if stride_left + stride_right >= chunk_len:
raise ValueError(
F'''Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}''' )
UpperCamelCase_: List[str] = 0
for raw in iterator:
acc += raw
if stream and len(UpperCAmelCase__ ) < chunk_len:
UpperCamelCase_: Optional[Any] = (_stride_left, 0)
yield {"raw": acc[:chunk_len], "stride": stride, "partial": True}
else:
while len(UpperCAmelCase__ ) >= chunk_len:
# We are flushing the accumulator
UpperCamelCase_: int = (_stride_left, stride_right)
UpperCamelCase_: Optional[Any] = {'raw': acc[:chunk_len], 'stride': stride}
if stream:
UpperCamelCase_: Any = False
yield item
UpperCamelCase_: Optional[int] = stride_left
UpperCamelCase_: Optional[Any] = acc[chunk_len - stride_left - stride_right :]
# Last chunk
if len(UpperCAmelCase__ ) > stride_left:
UpperCamelCase_: int = {'raw': acc, 'stride': (_stride_left, 0)}
if stream:
UpperCamelCase_: Optional[Any] = False
yield item
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ ) -> int:
UpperCamelCase_: Any = 2**2_4 # 16Mo
try:
with subprocess.Popen(UpperCAmelCase__ , stdout=subprocess.PIPE , bufsize=UpperCAmelCase__ ) as ffmpeg_process:
while True:
UpperCamelCase_: Any = ffmpeg_process.stdout.read(UpperCAmelCase__ )
if raw == b"":
break
yield raw
except FileNotFoundError as error:
raise ValueError('ffmpeg was not found but is required to stream audio files from filename' ) from error | 292 | 0 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
'junnyu/roformer_chinese_small': 'https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/config.json',
'junnyu/roformer_chinese_base': 'https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/config.json',
'junnyu/roformer_chinese_char_small': (
'https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/config.json'
),
'junnyu/roformer_chinese_char_base': (
'https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/config.json'
),
'junnyu/roformer_small_discriminator': (
'https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/config.json'
),
'junnyu/roformer_small_generator': (
'https://huggingface.co/junnyu/roformer_small_generator/resolve/main/config.json'
),
# See all RoFormer models at https://huggingface.co/models?filter=roformer
}
class __lowerCAmelCase ( SCREAMING_SNAKE_CASE ):
_a = """roformer"""
def __init__( self , lowerCAmelCase=50_000 , lowerCAmelCase=None , lowerCAmelCase=768 , lowerCAmelCase=12 , lowerCAmelCase=12 , lowerCAmelCase=3_072 , lowerCAmelCase="gelu" , lowerCAmelCase=0.1 , lowerCAmelCase=0.1 , lowerCAmelCase=1_536 , lowerCAmelCase=2 , lowerCAmelCase=0.02 , lowerCAmelCase=1e-12 , lowerCAmelCase=0 , lowerCAmelCase=False , lowerCAmelCase=True , **lowerCAmelCase , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(pad_token_id=lowerCAmelCase , **lowerCAmelCase )
_lowercase =vocab_size
_lowercase =hidden_size if embedding_size is None else embedding_size
_lowercase =hidden_size
_lowercase =num_hidden_layers
_lowercase =num_attention_heads
_lowercase =hidden_act
_lowercase =intermediate_size
_lowercase =hidden_dropout_prob
_lowercase =attention_probs_dropout_prob
_lowercase =max_position_embeddings
_lowercase =type_vocab_size
_lowercase =initializer_range
_lowercase =layer_norm_eps
_lowercase =rotary_value
_lowercase =use_cache
class __lowerCAmelCase ( SCREAMING_SNAKE_CASE ):
@property
def A__ ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
_lowercase ={0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_lowercase ={0: 'batch', 1: 'sequence'}
_lowercase ={0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('token_type_ids', dynamic_axis),
] )
| 205 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowercase_ = {'configuration_xlnet': ['XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'XLNetConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ['XLNetTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ['XLNetTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
'XLNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'XLNetForMultipleChoice',
'XLNetForQuestionAnswering',
'XLNetForQuestionAnsweringSimple',
'XLNetForSequenceClassification',
'XLNetForTokenClassification',
'XLNetLMHeadModel',
'XLNetModel',
'XLNetPreTrainedModel',
'load_tf_weights_in_xlnet',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
'TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFXLNetForMultipleChoice',
'TFXLNetForQuestionAnsweringSimple',
'TFXLNetForSequenceClassification',
'TFXLNetForTokenClassification',
'TFXLNetLMHeadModel',
'TFXLNetMainLayer',
'TFXLNetModel',
'TFXLNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet import XLNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet_fast import XLNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlnet import (
XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
XLNetForMultipleChoice,
XLNetForQuestionAnswering,
XLNetForQuestionAnsweringSimple,
XLNetForSequenceClassification,
XLNetForTokenClassification,
XLNetLMHeadModel,
XLNetModel,
XLNetPreTrainedModel,
load_tf_weights_in_xlnet,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlnet import (
TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLNetForMultipleChoice,
TFXLNetForQuestionAnsweringSimple,
TFXLNetForSequenceClassification,
TFXLNetForTokenClassification,
TFXLNetLMHeadModel,
TFXLNetMainLayer,
TFXLNetModel,
TFXLNetPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 205 | 1 |
'''simple docstring'''
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class UpperCAmelCase_ ( _a ):
"""simple docstring"""
lowercase = (UniPCMultistepScheduler,)
lowercase = (("num_inference_steps", 25),)
def lowerCamelCase ( self : List[Any] , **snake_case_ : str ):
snake_case__ : Any = {
"""num_train_timesteps""": 1_000,
"""beta_start""": 0.0001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
"""solver_order""": 2,
"""solver_type""": """bh2""",
}
config.update(**snake_case_ )
return config
def lowerCamelCase ( self : Dict , snake_case_ : Union[str, Any]=0 , **snake_case_ : int ):
snake_case__ : Optional[int] = dict(self.forward_default_kwargs )
snake_case__ : int = kwargs.pop("""num_inference_steps""" , snake_case_ )
snake_case__ : Dict = self.dummy_sample
snake_case__ : Optional[int] = 0.1 * sample
snake_case__ : Any = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
snake_case__ : Any = self.get_scheduler_config(**snake_case_ )
snake_case__ : List[str] = scheduler_class(**snake_case_ )
scheduler.set_timesteps(snake_case_ )
# copy over dummy past residuals
snake_case__ : Union[str, Any] = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(snake_case_ )
snake_case__ : Union[str, Any] = scheduler_class.from_pretrained(snake_case_ )
new_scheduler.set_timesteps(snake_case_ )
# copy over dummy past residuals
snake_case__ : int = dummy_past_residuals[: new_scheduler.config.solver_order]
snake_case__ , snake_case__ : Dict = sample, sample
for t in range(snake_case_ , time_step + scheduler.config.solver_order + 1 ):
snake_case__ : Dict = scheduler.step(snake_case_ , snake_case_ , snake_case_ , **snake_case_ ).prev_sample
snake_case__ : Optional[Any] = new_scheduler.step(snake_case_ , snake_case_ , snake_case_ , **snake_case_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def lowerCamelCase ( self : List[Any] , snake_case_ : List[str]=0 , **snake_case_ : Tuple ):
snake_case__ : Dict = dict(self.forward_default_kwargs )
snake_case__ : Tuple = kwargs.pop("""num_inference_steps""" , snake_case_ )
snake_case__ : str = self.dummy_sample
snake_case__ : Dict = 0.1 * sample
snake_case__ : int = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
snake_case__ : List[str] = self.get_scheduler_config()
snake_case__ : str = scheduler_class(**snake_case_ )
scheduler.set_timesteps(snake_case_ )
# copy over dummy past residuals (must be after setting timesteps)
snake_case__ : str = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(snake_case_ )
snake_case__ : Any = scheduler_class.from_pretrained(snake_case_ )
# copy over dummy past residuals
new_scheduler.set_timesteps(snake_case_ )
# copy over dummy past residual (must be after setting timesteps)
snake_case__ : List[Any] = dummy_past_residuals[: new_scheduler.config.solver_order]
snake_case__ : Any = scheduler.step(snake_case_ , snake_case_ , snake_case_ , **snake_case_ ).prev_sample
snake_case__ : Tuple = new_scheduler.step(snake_case_ , snake_case_ , snake_case_ , **snake_case_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def lowerCamelCase ( self : Dict , snake_case_ : List[Any]=None , **snake_case_ : Tuple ):
if scheduler is None:
snake_case__ : str = self.scheduler_classes[0]
snake_case__ : List[str] = self.get_scheduler_config(**snake_case_ )
snake_case__ : Optional[int] = scheduler_class(**snake_case_ )
snake_case__ : str = self.scheduler_classes[0]
snake_case__ : List[Any] = self.get_scheduler_config(**snake_case_ )
snake_case__ : str = scheduler_class(**snake_case_ )
snake_case__ : List[Any] = 10
snake_case__ : Optional[int] = self.dummy_model()
snake_case__ : Optional[Any] = self.dummy_sample_deter
scheduler.set_timesteps(snake_case_ )
for i, t in enumerate(scheduler.timesteps ):
snake_case__ : List[str] = model(snake_case_ , snake_case_ )
snake_case__ : List[Any] = scheduler.step(snake_case_ , snake_case_ , snake_case_ ).prev_sample
return sample
def lowerCamelCase ( self : Optional[Any] ):
snake_case__ : Optional[Any] = dict(self.forward_default_kwargs )
snake_case__ : str = kwargs.pop("""num_inference_steps""" , snake_case_ )
for scheduler_class in self.scheduler_classes:
snake_case__ : Optional[int] = self.get_scheduler_config()
snake_case__ : List[str] = scheduler_class(**snake_case_ )
snake_case__ : Dict = self.dummy_sample
snake_case__ : Optional[Any] = 0.1 * sample
if num_inference_steps is not None and hasattr(snake_case_ , """set_timesteps""" ):
scheduler.set_timesteps(snake_case_ )
elif num_inference_steps is not None and not hasattr(snake_case_ , """set_timesteps""" ):
snake_case__ : Optional[int] = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
snake_case__ : int = [residual + 0.2, residual + 0.15, residual + 0.10]
snake_case__ : str = dummy_past_residuals[: scheduler.config.solver_order]
snake_case__ : Union[str, Any] = scheduler.timesteps[5]
snake_case__ : Any = scheduler.timesteps[6]
snake_case__ : List[Any] = scheduler.step(snake_case_ , snake_case_ , snake_case_ , **snake_case_ ).prev_sample
snake_case__ : Tuple = scheduler.step(snake_case_ , snake_case_ , snake_case_ , **snake_case_ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def lowerCamelCase ( self : Tuple ):
# make sure that iterating over schedulers with same config names gives same results
# for defaults
snake_case__ : Union[str, Any] = UniPCMultistepScheduler(**self.get_scheduler_config() )
snake_case__ : Optional[int] = self.full_loop(scheduler=snake_case_ )
snake_case__ : Union[str, Any] = torch.mean(torch.abs(snake_case_ ) )
assert abs(result_mean.item() - 0.2464 ) < 1E-3
snake_case__ : int = DPMSolverSinglestepScheduler.from_config(scheduler.config )
snake_case__ : List[str] = DEISMultistepScheduler.from_config(scheduler.config )
snake_case__ : Any = DPMSolverMultistepScheduler.from_config(scheduler.config )
snake_case__ : Union[str, Any] = UniPCMultistepScheduler.from_config(scheduler.config )
snake_case__ : str = self.full_loop(scheduler=snake_case_ )
snake_case__ : Tuple = torch.mean(torch.abs(snake_case_ ) )
assert abs(result_mean.item() - 0.2464 ) < 1E-3
def lowerCamelCase ( self : str ):
for timesteps in [25, 50, 100, 999, 1_000]:
self.check_over_configs(num_train_timesteps=snake_case_ )
def lowerCamelCase ( self : Any ):
self.check_over_configs(thresholding=snake_case_ )
for order in [1, 2, 3]:
for solver_type in ["bh1", "bh2"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=snake_case_ , prediction_type=snake_case_ , sample_max_value=snake_case_ , solver_order=snake_case_ , solver_type=snake_case_ , )
def lowerCamelCase ( self : List[str] ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=snake_case_ )
def lowerCamelCase ( self : List[Any] ):
for solver_type in ["bh1", "bh2"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=snake_case_ , solver_type=snake_case_ , prediction_type=snake_case_ , )
snake_case__ : List[str] = self.full_loop(
solver_order=snake_case_ , solver_type=snake_case_ , prediction_type=snake_case_ , )
assert not torch.isnan(snake_case_ ).any(), "Samples have nan numbers"
def lowerCamelCase ( self : Dict ):
self.check_over_configs(lower_order_final=snake_case_ )
self.check_over_configs(lower_order_final=snake_case_ )
def lowerCamelCase ( self : List[Any] ):
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1_000]:
self.check_over_forward(num_inference_steps=snake_case_ , time_step=0 )
def lowerCamelCase ( self : Any ):
snake_case__ : Tuple = self.full_loop()
snake_case__ : Union[str, Any] = torch.mean(torch.abs(snake_case_ ) )
assert abs(result_mean.item() - 0.2464 ) < 1E-3
def lowerCamelCase ( self : str ):
snake_case__ : Optional[Any] = self.full_loop(prediction_type="""v_prediction""" )
snake_case__ : Any = torch.mean(torch.abs(snake_case_ ) )
assert abs(result_mean.item() - 0.1014 ) < 1E-3
def lowerCamelCase ( self : Optional[Any] ):
snake_case__ : List[Any] = self.scheduler_classes[0]
snake_case__ : Any = self.get_scheduler_config(thresholding=snake_case_ , dynamic_thresholding_ratio=0 )
snake_case__ : Tuple = scheduler_class(**snake_case_ )
snake_case__ : Dict = 10
snake_case__ : int = self.dummy_model()
snake_case__ : List[str] = self.dummy_sample_deter.half()
scheduler.set_timesteps(snake_case_ )
for i, t in enumerate(scheduler.timesteps ):
snake_case__ : Optional[Any] = model(snake_case_ , snake_case_ )
snake_case__ : Any = scheduler.step(snake_case_ , snake_case_ , snake_case_ ).prev_sample
assert sample.dtype == torch.floataa
def lowerCamelCase ( self : Optional[int] , **snake_case_ : List[Any] ):
for scheduler_class in self.scheduler_classes:
snake_case__ : Dict = self.get_scheduler_config(**snake_case_ )
snake_case__ : List[str] = scheduler_class(**snake_case_ )
scheduler.set_timesteps(scheduler.config.num_train_timesteps )
assert len(scheduler.timesteps.unique() ) == scheduler.num_inference_steps
| 43 |
'''simple docstring'''
import math
import os
import re
import sys
import unittest
from pathlib import Path
from typing import Tuple
from unittest.mock import patch
from parameterized import parameterized
from transformers.testing_utils import (
CaptureStderr,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
get_torch_dist_unique_port,
require_apex,
require_bitsandbytes,
require_fairscale,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
require_torch_non_multi_gpu,
slow,
)
from transformers.trainer_callback import TrainerState
from transformers.trainer_utils import set_seed
__a = os.path.abspath(os.path.dirname(__file__))
with ExtendSysPath(F"{bindir}/../../examples/pytorch/translation"):
from run_translation import main # noqa
set_seed(42)
__a = "sshleifer/student_marian_en_ro_6_1"
__a = "sshleifer/tiny-mbart"
@require_torch
class UpperCAmelCase_ ( _a ):
"""simple docstring"""
def lowerCamelCase ( self : Any , snake_case_ : List[str]=False , snake_case_ : Tuple=None , snake_case_ : Dict=True , snake_case_ : Any=True , snake_case_ : Tuple=True , snake_case_ : List[str]=True , ):
snake_case__ : List[Any] = self.run_trainer(
eval_steps=1 , max_len=12 , model_name=snake_case_ , num_train_epochs=1 , distributed=snake_case_ , extra_args_str=snake_case_ , predict_with_generate=snake_case_ , do_train=snake_case_ , do_eval=snake_case_ , do_predict=snake_case_ , )
snake_case__ : int = TrainerState.load_from_json(os.path.join(snake_case_ , """trainer_state.json""" ) ).log_history
if not do_eval:
return
snake_case__ : Tuple = [log for log in logs if """eval_loss""" in log.keys()]
snake_case__ : List[Any] = eval_metrics[0]
if predict_with_generate:
assert "eval_bleu" in first_step_stats
snake_case__ : Dict = eval_metrics[-1]
assert isinstance(last_step_stats["""eval_bleu"""] , snake_case_ )
assert not math.isnan(float(last_step_stats["""eval_loss"""] ) ), "eval_loss must not be `nan`"
@require_torch_non_multi_gpu
def lowerCamelCase ( self : List[Any] ):
self.run_seqaseq_quick()
@require_torch_multi_gpu
def lowerCamelCase ( self : int ):
self.run_seqaseq_quick(distributed=snake_case_ )
@require_torch_multi_gpu
def lowerCamelCase ( self : Tuple ):
self.run_seqaseq_quick(distributed=snake_case_ )
@unittest.skip("""Requires an update of the env running those tests""" )
@require_torch_multi_gpu
@require_fairscale
def lowerCamelCase ( self : int ):
self.run_seqaseq_quick(distributed=snake_case_ , extra_args_str="""--sharded_ddp simple""" )
@unittest.skip("""Requires an update of the env running those tests""" )
@require_torch_multi_gpu
@require_fairscale
def lowerCamelCase ( self : str ):
self.run_seqaseq_quick(distributed=snake_case_ , extra_args_str="""--sharded_ddp simple --fp16""" )
@unittest.skip("""Requires an update of the env running those tests""" )
@require_torch_multi_gpu
@require_fairscale
def lowerCamelCase ( self : List[str] ):
self.run_seqaseq_quick(distributed=snake_case_ , extra_args_str="""--sharded_ddp zero_dp_2""" , predict_with_generate=snake_case_ )
@unittest.skip("""Requires an update of the env running those tests""" )
@require_torch_multi_gpu
@require_fairscale
def lowerCamelCase ( self : str ):
self.run_seqaseq_quick(
distributed=snake_case_ , extra_args_str="""--sharded_ddp zero_dp_2 --fp16""" , predict_with_generate=snake_case_ )
@require_apex
@require_torch_gpu
def lowerCamelCase ( self : str ):
# XXX: apex breaks the trainer if it's run twice e.g. run_seq2seq.main() from the same
# program and it breaks other tests that run from the same pytest worker, therefore until this is
# sorted out it must be run only in an external program, that is distributed=True in this
# test and only under one or more gpus - if we want cpu will need to make a special test
#
# specifically to the problem traced it to self.optimizer.step() - if it's run 2nd time via
# 2nd main() call it botches the future eval.
#
self.run_seqaseq_quick(distributed=snake_case_ , extra_args_str="""--fp16 --fp16_backend=apex""" )
# test 2nd time - was getting eval_loss': nan'
# to reproduce the problem set distributed=False
self.run_seqaseq_quick(distributed=snake_case_ , extra_args_str="""--fp16 --fp16_backend=apex""" )
@parameterized.expand(["""base""", """low""", """high""", """mixed"""] )
@require_torch_multi_gpu
def lowerCamelCase ( self : Optional[int] , snake_case_ : Union[str, Any] ):
# as each sub-test is slow-ish split into multiple sub-tests to avoid CI timeout
snake_case__ : Any = {
# test with the default log_level - should be info and thus log info once
"""base""": {"""extra_args_str""": """""", """n_matches""": 1},
# test with low log_level and log_level_replica - should be noisy on all processes
# now the info string should appear twice on 2 processes
"""low""": {"""extra_args_str""": """--log_level debug --log_level_replica debug""", """n_matches""": 2},
# test with high log_level and low log_level_replica
# now the info string should appear once only on the replica
"""high""": {"""extra_args_str""": """--log_level error --log_level_replica debug""", """n_matches""": 1},
# test with high log_level and log_level_replica - should be quiet on all processes
"""mixed""": {"""extra_args_str""": """--log_level error --log_level_replica error""", """n_matches""": 0},
}
snake_case__ : Optional[int] = experiments[experiment_id]
snake_case__ : Optional[int] = {"""distributed""": True, """predict_with_generate""": False, """do_eval""": False, """do_predict""": False}
snake_case__ : Union[str, Any] = """Running training"""
with CaptureStderr() as cl:
self.run_seqaseq_quick(**snake_case_ , extra_args_str=data["""extra_args_str"""] )
snake_case__ : str = len(re.findall(snake_case_ , cl.err ) )
self.assertEqual(snake_case_ , data["""n_matches"""] )
@slow
def lowerCamelCase ( self : Optional[int] ):
snake_case__ : Tuple = self.run_trainer(
eval_steps=2 , max_len=128 , model_name=snake_case_ , learning_rate=3E-4 , num_train_epochs=10 , distributed=snake_case_ , )
# Check metrics
snake_case__ : Dict = TrainerState.load_from_json(os.path.join(snake_case_ , """trainer_state.json""" ) ).log_history
snake_case__ : List[str] = [log for log in logs if """eval_loss""" in log.keys()]
snake_case__ : List[str] = eval_metrics[0]
snake_case__ : Any = eval_metrics[-1]
assert first_step_stats["eval_loss"] > last_step_stats["eval_loss"], "model learned nothing"
assert isinstance(last_step_stats["""eval_bleu"""] , snake_case_ )
# test if do_predict saves generations and metrics
snake_case__ : Optional[int] = os.listdir(snake_case_ )
snake_case__ : List[str] = {os.path.basename(snake_case_ ) for p in contents}
assert "generated_predictions.txt" in contents
assert "predict_results.json" in contents
@slow
@require_bitsandbytes
def lowerCamelCase ( self : List[str] ):
from transformers.training_args import OptimizerNames
def train_and_return_metrics(snake_case_ : str ) -> Tuple[int, float]:
snake_case__ : Dict = """--skip_memory_metrics 0"""
snake_case__ : Optional[int] = self.run_trainer(
max_len=128 , model_name=snake_case_ , learning_rate=3E-4 , num_train_epochs=1 , optim=snake_case_ , distributed=snake_case_ , extra_args_str=snake_case_ , do_eval=snake_case_ , do_predict=snake_case_ , n_gpus_to_use=1 , )
# Check metrics
snake_case__ : Optional[Any] = TrainerState.load_from_json(Path(snake_case_ , """trainer_state.json""" ) ).log_history
snake_case__ : Optional[int] = int(logs[0]["""train_mem_gpu_peaked_delta"""] / 2**20 )
snake_case__ : Tuple = int(logs[0]["""train_mem_gpu_alloc_delta"""] / 2**20 )
snake_case__ : Optional[int] = logs[0]["""train_loss"""]
return gpu_peak_mem_mb, gpu_alloc_mem_mb, loss
snake_case__ , snake_case__ , snake_case__ : List[Any] = train_and_return_metrics(OptimizerNames.ADAMW_TORCH.value )
snake_case__ , snake_case__ , snake_case__ : List[Any] = train_and_return_metrics(OptimizerNames.ADAMW_BNB.value )
snake_case__ : Dict = gpu_alloc_mem_orig - gpu_alloc_mem_bnb
snake_case__ : Optional[Any] = gpu_peak_mem_orig + gpu_alloc_mem_orig
snake_case__ : Dict = gpu_peak_mem_bnb + gpu_alloc_mem_bnb
snake_case__ : Tuple = gpu_total_mem_orig - gpu_total_mem_bnb
# sshleifer/student_marian_en_ro_6_1 has 54M parameter, 29M of which is `nn.Embedding` which
# doesn't get quantized and remains in fp32. Therefore we only have 25M parameters quantized
# in 2 bytes and the diff in optim memory usage is derived as so:
#
# - normal 25*8=~200MB (8 bytes per param)
# - bnb 25*2= ~50MB (2 bytes per param)
#
# Thus we should expect ~150MB total memory saved.
#
# Peak memory should be the same - the total should be different by about that same margin
#
# After leaving a small margin to accommodate for differences between gpus let's check
# that we have at least 120MB in savings
snake_case__ : int = 120
# uncomment the following if this test starts failing - requires py38 for a new print feature
# gpu_peak_mem_diff = gpu_peak_mem_orig - gpu_peak_mem_bnb
# print(f"{gpu_alloc_mem_orig=}MB {gpu_peak_mem_orig=}MB {gpu_alloc_mem_orig+gpu_peak_mem_orig=}MB")
# print(f" {gpu_alloc_mem_bnb=}MB {gpu_peak_mem_bnb=}MB {gpu_alloc_mem_bnb+gpu_peak_mem_bnb=}MB")
# print(f"{gpu_alloc_mem_diff=}MB")
# print(f"{gpu_peak_mem_diff=}MB")
# print(f"{gpu_total_mem_orig=}MB, {gpu_total_mem_bnb=}MB")
# print(f"{gpu_total_mem_diff=}MB, {gpu_total_mem_diff=}MB")
self.assertGreater(
snake_case_ , snake_case_ , """should use ~150MB less alloc gpu memory with BNB, compared to without it for this model but got"""
f" a difference of {gpu_alloc_mem_diff}MB, with gpu_alloc_mem_orig={gpu_alloc_mem_orig}MB and"
f" gpu_alloc_mem_bnb={gpu_alloc_mem_bnb}MB" , )
self.assertGreater(
snake_case_ , snake_case_ , """should use ~150MB less total gpu memory with BNB, compared to without it for this model but got"""
f" a difference of {gpu_total_mem_diff}MB, with gpu_total_mem_orig={gpu_total_mem_orig}MB and"
f" gpu_total_mem_bnb={gpu_total_mem_bnb}MB" , )
self.assertEqual(
snake_case_ , snake_case_ , f"loss should be the same, but got loss_orig={loss_orig}, loss_bnb={loss_bnb}" )
def lowerCamelCase ( self : Dict , snake_case_ : int , snake_case_ : str , snake_case_ : int , snake_case_ : float = 3E-3 , snake_case_ : str = "adafactor" , snake_case_ : bool = False , snake_case_ : str = None , snake_case_ : int = 0 , snake_case_ : bool = True , snake_case_ : bool = True , snake_case_ : bool = True , snake_case_ : bool = True , snake_case_ : int = None , ):
snake_case__ : Optional[Any] = self.test_file_dir / """../fixtures/tests_samples/wmt_en_ro"""
snake_case__ : Union[str, Any] = self.get_auto_remove_tmp_dir()
snake_case__ : List[Any] = f"\n --model_name_or_path {model_name}\n --train_file {data_dir}/train.json\n --validation_file {data_dir}/val.json\n --test_file {data_dir}/test.json\n --output_dir {output_dir}\n --overwrite_output_dir\n --max_train_samples 8\n --max_source_length {max_len}\n --max_target_length {max_len}\n --do_train\n --num_train_epochs {str(snake_case_ )}\n --per_device_train_batch_size 4\n --learning_rate {learning_rate}\n --warmup_steps 8\n --logging_steps 0\n --logging_strategy no\n --save_steps {str(snake_case_ )}\n --group_by_length\n --label_smoothing_factor 0.1\n --target_lang ro_RO\n --source_lang en_XX\n ".split()
snake_case__ : List[Any] = f"\n --do_eval\n --per_device_eval_batch_size 4\n --max_eval_samples 8\n --val_max_target_length {max_len}\n --evaluation_strategy steps\n --eval_steps {str(snake_case_ )}\n ".split()
snake_case__ : Dict = """
--do_predict
""".split()
snake_case__ : List[Any] = []
if do_train:
args += args_train
if do_eval:
args += args_eval
if do_predict:
args += args_predict
if predict_with_generate:
args += "--predict_with_generate".split()
if do_train:
if optim == "adafactor":
args += "--adafactor".split()
else:
args += f"--optim {optim}".split()
if extra_args_str is not None:
args += extra_args_str.split()
if distributed:
if n_gpus_to_use is None:
snake_case__ : Any = get_gpu_count()
snake_case__ : Optional[int] = get_torch_dist_unique_port()
snake_case__ : List[str] = f"\n -m torch.distributed.run\n --nproc_per_node={n_gpus_to_use}\n --master_port={master_port}\n {self.examples_dir_str}/pytorch/translation/run_translation.py\n ".split()
snake_case__ : int = [sys.executable] + distributed_args + args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(snake_case_ , env=self.get_env() )
else:
snake_case__ : str = ["""run_translation.py"""] + args
with patch.object(snake_case_ , """argv""" , snake_case_ ):
main()
return output_dir
| 43 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
lowerCamelCase_ = {
'''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/config.json''',
'''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/config.json''',
'''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/config.json''',
'''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json''',
'''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/config.json''',
'''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/config.json''',
'''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/config.json''',
'''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json''',
}
class _UpperCAmelCase ( snake_case_ ):
"""simple docstring"""
snake_case = '''albert'''
def __init__( self : List[Any] , __UpperCAmelCase : Union[str, Any]=30000 , __UpperCAmelCase : List[str]=128 , __UpperCAmelCase : List[Any]=4096 , __UpperCAmelCase : Optional[Any]=12 , __UpperCAmelCase : Optional[int]=1 , __UpperCAmelCase : List[str]=64 , __UpperCAmelCase : str=16384 , __UpperCAmelCase : Optional[int]=1 , __UpperCAmelCase : int="gelu_new" , __UpperCAmelCase : Dict=0 , __UpperCAmelCase : Tuple=0 , __UpperCAmelCase : Tuple=512 , __UpperCAmelCase : Any=2 , __UpperCAmelCase : Any=0.02 , __UpperCAmelCase : Optional[Any]=1E-12 , __UpperCAmelCase : Any=0.1 , __UpperCAmelCase : Optional[Any]="absolute" , __UpperCAmelCase : Optional[int]=0 , __UpperCAmelCase : Optional[int]=2 , __UpperCAmelCase : Dict=3 , **__UpperCAmelCase : Optional[int] , ):
'''simple docstring'''
super().__init__(pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , **__UpperCAmelCase )
_A = vocab_size
_A = embedding_size
_A = hidden_size
_A = num_hidden_layers
_A = num_hidden_groups
_A = num_attention_heads
_A = inner_group_num
_A = hidden_act
_A = intermediate_size
_A = hidden_dropout_prob
_A = attention_probs_dropout_prob
_A = max_position_embeddings
_A = type_vocab_size
_A = initializer_range
_A = layer_norm_eps
_A = classifier_dropout_prob
_A = position_embedding_type
class _UpperCAmelCase ( snake_case_ ):
"""simple docstring"""
@property
def lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
if self.task == "multiple-choice":
_A = {0: "batch", 1: "choice", 2: "sequence"}
else:
_A = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("token_type_ids", dynamic_axis),
] )
| 79 | import contextlib
import importlib
import io
import unittest
import transformers
# Try to import everything from transformers to ensure every object can be loaded.
from transformers import * # noqa F406
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, require_tf, require_torch
from transformers.utils import ContextManagers, find_labels, is_flax_available, is_tf_available, is_torch_available
if is_torch_available():
from transformers import BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification
if is_tf_available():
from transformers import TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification
if is_flax_available():
from transformers import FlaxBertForPreTraining, FlaxBertForQuestionAnswering, FlaxBertForSequenceClassification
A : Tuple = DUMMY_UNKNOWN_IDENTIFIER
# An actual model hosted on huggingface.co
A : Dict = "main"
# Default branch name
A : List[str] = "f2c752cfc5c0ab6f4bdec59acea69eefbee381c2"
# One particular commit (not the top of `main`)
A : Tuple = "aaaaaaa"
# This commit does not exist, so we should 404.
A : int = "d9e9f15bc825e4b2c9249e9578f884bbcb5e3684"
# Sha-1 of config.json on the top of `main`, for checking purposes
A : Tuple = "4b243c475af8d0a7754e87d7d096c92e5199ec2fe168a2ee7998e3b8e9bcb1d3"
@contextlib.contextmanager
def a__ ( ):
print("Welcome!" )
yield
print("Bye!" )
@contextlib.contextmanager
def a__ ( ):
print("Bonjour!" )
yield
print("Au revoir!" )
class lowerCamelCase (unittest.TestCase ):
"""simple docstring"""
def __A ( self : Union[str, Any] ) -> Any:
# If the spec is missing, importlib would not be able to import the module dynamically.
assert transformers.__spec__ is not None
assert importlib.util.find_spec("transformers" ) is not None
class lowerCamelCase (unittest.TestCase ):
"""simple docstring"""
@unittest.mock.patch("sys.stdout" , new_callable=io.StringIO )
def __A ( self : Tuple , __magic_name__ : Union[str, Any] ) -> Union[str, Any]:
with ContextManagers([] ):
print("Transformers are awesome!" )
# The print statement adds a new line at the end of the output
self.assertEqual(mock_stdout.getvalue() , "Transformers are awesome!\n" )
@unittest.mock.patch("sys.stdout" , new_callable=io.StringIO )
def __A ( self : Dict , __magic_name__ : Union[str, Any] ) -> int:
with ContextManagers([context_en()] ):
print("Transformers are awesome!" )
# The output should be wrapped with an English welcome and goodbye
self.assertEqual(mock_stdout.getvalue() , "Welcome!\nTransformers are awesome!\nBye!\n" )
@unittest.mock.patch("sys.stdout" , new_callable=io.StringIO )
def __A ( self : Tuple , __magic_name__ : str ) -> Union[str, Any]:
with ContextManagers([context_fr(), context_en()] ):
print("Transformers are awesome!" )
# The output should be wrapped with an English and French welcome and goodbye
self.assertEqual(mock_stdout.getvalue() , "Bonjour!\nWelcome!\nTransformers are awesome!\nBye!\nAu revoir!\n" )
@require_torch
def __A ( self : List[str] ) -> Union[str, Any]:
self.assertEqual(find_labels(__magic_name__ ) , ["labels"] )
self.assertEqual(find_labels(__magic_name__ ) , ["labels", "next_sentence_label"] )
self.assertEqual(find_labels(__magic_name__ ) , ["start_positions", "end_positions"] )
class lowerCamelCase (SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
pass
self.assertEqual(find_labels(__magic_name__ ) , ["labels"] )
@require_tf
def __A ( self : List[str] ) -> Optional[Any]:
self.assertEqual(find_labels(__magic_name__ ) , ["labels"] )
self.assertEqual(find_labels(__magic_name__ ) , ["labels", "next_sentence_label"] )
self.assertEqual(find_labels(__magic_name__ ) , ["start_positions", "end_positions"] )
class lowerCamelCase (SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
pass
self.assertEqual(find_labels(__magic_name__ ) , ["labels"] )
@require_flax
def __A ( self : int ) -> Tuple:
# Flax models don't have labels
self.assertEqual(find_labels(__magic_name__ ) , [] )
self.assertEqual(find_labels(__magic_name__ ) , [] )
self.assertEqual(find_labels(__magic_name__ ) , [] )
class lowerCamelCase (SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
pass
self.assertEqual(find_labels(__magic_name__ ) , [] )
| 118 | 0 |
import numpy as np
from scipy.spatial.distance import cdist
from sklearn.metrics import fa_score
import datasets
lowerCamelCase_ : List[Any] = """\
@inproceedings{kakwani2020indicnlpsuite,
title={{IndicNLPSuite: Monolingual Corpora, Evaluation Benchmarks and Pre-trained Multilingual Language Models for Indian Languages}},
author={Divyanshu Kakwani and Anoop Kunchukuttan and Satish Golla and Gokul N.C. and Avik Bhattacharyya and Mitesh M. Khapra and Pratyush Kumar},
year={2020},
booktitle={Findings of EMNLP},
}
"""
lowerCamelCase_ : Optional[int] = """\
IndicGLUE is a natural language understanding benchmark for Indian languages. It contains a wide
variety of tasks and covers 11 major Indian languages - as, bn, gu, hi, kn, ml, mr, or, pa, ta, te.
"""
lowerCamelCase_ : str = """
Compute IndicGLUE evaluation metric associated to each IndicGLUE dataset.
Args:
predictions: list of predictions to score (as int64),
except for 'cvit-mkb-clsr' where each prediction is a vector (of float32).
references: list of ground truth labels corresponding to the predictions (as int64),
except for 'cvit-mkb-clsr' where each reference is a vector (of float32).
Returns: depending on the IndicGLUE subset, one or several of:
\"accuracy\": Accuracy
\"f1\": F1 score
\"precision\": Precision@10
Examples:
>>> indic_glue_metric = datasets.load_metric('indic_glue', 'wnli') # 'wnli' or any of [\"copa\", \"sna\", \"csqa\", \"wstp\", \"inltkh\", \"bbca\", \"iitp-mr\", \"iitp-pr\", \"actsa-sc\", \"md\"]
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = indic_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'accuracy': 1.0}
>>> indic_glue_metric = datasets.load_metric('indic_glue', 'wiki-ner')
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = indic_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'accuracy': 1.0, 'f1': 1.0}
>>> indic_glue_metric = datasets.load_metric('indic_glue', 'cvit-mkb-clsr')
>>> references = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]
>>> predictions = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]
>>> results = indic_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'precision@10': 1.0}
"""
def lowerCAmelCase( __lowerCamelCase , __lowerCamelCase ):
return float((preds == labels).mean() )
def lowerCAmelCase( __lowerCamelCase , __lowerCamelCase ):
__a = simple_accuracy(__lowerCamelCase , __lowerCamelCase )
__a = float(fa_score(y_true=__lowerCamelCase , y_pred=__lowerCamelCase ) )
return {
"accuracy": acc,
"f1": fa,
}
def lowerCAmelCase( __lowerCamelCase , __lowerCamelCase ):
__a = np.array(__lowerCamelCase )
__a = np.array(__lowerCamelCase )
__a = en_sentvecs.shape[0]
# mean centering
__a = en_sentvecs - np.mean(__lowerCamelCase , axis=0 )
__a = in_sentvecs - np.mean(__lowerCamelCase , axis=0 )
__a = cdist(__lowerCamelCase , __lowerCamelCase , 'cosine' )
__a = np.array(range(__lowerCamelCase ) )
__a = sim.argsort(axis=1 )[:, :10]
__a = np.any(preds == actual[:, None] , axis=1 )
return float(matches.mean() )
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a__ ( datasets.Metric ):
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
if self.config_name not in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"cvit-mkb-clsr",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
"wiki-ner",
]:
raise KeyError(
'You should supply a configuration name selected in '
'["wnli", "copa", "sna", "csqa", "wstp", "inltkh", "bbca", '
'"cvit-mkb-clsr", "iitp-mr", "iitp-pr", "actsa-sc", "md", '
'"wiki-ner"]' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('int64' )
if self.config_name != 'cvit-mkb-clsr'
else datasets.Sequence(datasets.Value('float32' ) ),
'references': datasets.Value('int64' )
if self.config_name != 'cvit-mkb-clsr'
else datasets.Sequence(datasets.Value('float32' ) ),
} ) , codebase_urls=[] , reference_urls=[] , format='numpy' if self.config_name != 'cvit-mkb-clsr' else None , )
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase , UpperCAmelCase ) -> int:
if self.config_name == "cvit-mkb-clsr":
return {"precision@10": precision_at_aa(UpperCAmelCase , UpperCAmelCase )}
elif self.config_name in ["wiki-ner"]:
return acc_and_fa(UpperCAmelCase , UpperCAmelCase )
elif self.config_name in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
]:
return {"accuracy": simple_accuracy(UpperCAmelCase , UpperCAmelCase )}
else:
raise KeyError(
'You should supply a configuration name selected in '
'["wnli", "copa", "sna", "csqa", "wstp", "inltkh", "bbca", '
'"cvit-mkb-clsr", "iitp-mr", "iitp-pr", "actsa-sc", "md", '
'"wiki-ner"]' )
| 197 | from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels
from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor
from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels
| 197 | 1 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__snake_case ={
"""configuration_efficientnet""": [
"""EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""EfficientNetConfig""",
"""EfficientNetOnnxConfig""",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case =["""EfficientNetImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case =[
"""EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""EfficientNetForImageClassification""",
"""EfficientNetModel""",
"""EfficientNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_efficientnet import (
EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
EfficientNetConfig,
EfficientNetOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientnet import EfficientNetImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientnet import (
EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientNetForImageClassification,
EfficientNetModel,
EfficientNetPreTrainedModel,
)
else:
import sys
__snake_case =_LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 4 |
from __future__ import annotations
import unittest
from transformers import DebertaVaConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
TFDebertaVaModel,
)
class __UpperCAmelCase :
def __init__( self: Any , UpperCAmelCase_: int , UpperCAmelCase_: Optional[int]=13 , UpperCAmelCase_: str=7 , UpperCAmelCase_: int=True , UpperCAmelCase_: List[str]=True , UpperCAmelCase_: Dict=True , UpperCAmelCase_: Any=True , UpperCAmelCase_: Tuple=99 , UpperCAmelCase_: Optional[Any]=32 , UpperCAmelCase_: Optional[int]=2 , UpperCAmelCase_: Tuple=4 , UpperCAmelCase_: Tuple=37 , UpperCAmelCase_: Union[str, Any]="gelu" , UpperCAmelCase_: List[str]=0.1 , UpperCAmelCase_: int=0.1 , UpperCAmelCase_: str=512 , UpperCAmelCase_: Union[str, Any]=16 , UpperCAmelCase_: List[Any]=2 , UpperCAmelCase_: str=0.02 , UpperCAmelCase_: int=False , UpperCAmelCase_: Union[str, Any]=True , UpperCAmelCase_: Optional[Any]="None" , UpperCAmelCase_: Optional[int]=3 , UpperCAmelCase_: Any=4 , UpperCAmelCase_: Optional[int]=None , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = parent
_SCREAMING_SNAKE_CASE = batch_size
_SCREAMING_SNAKE_CASE = seq_length
_SCREAMING_SNAKE_CASE = is_training
_SCREAMING_SNAKE_CASE = use_input_mask
_SCREAMING_SNAKE_CASE = use_token_type_ids
_SCREAMING_SNAKE_CASE = use_labels
_SCREAMING_SNAKE_CASE = vocab_size
_SCREAMING_SNAKE_CASE = hidden_size
_SCREAMING_SNAKE_CASE = num_hidden_layers
_SCREAMING_SNAKE_CASE = num_attention_heads
_SCREAMING_SNAKE_CASE = intermediate_size
_SCREAMING_SNAKE_CASE = hidden_act
_SCREAMING_SNAKE_CASE = hidden_dropout_prob
_SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE = max_position_embeddings
_SCREAMING_SNAKE_CASE = type_vocab_size
_SCREAMING_SNAKE_CASE = type_sequence_label_size
_SCREAMING_SNAKE_CASE = initializer_range
_SCREAMING_SNAKE_CASE = num_labels
_SCREAMING_SNAKE_CASE = num_choices
_SCREAMING_SNAKE_CASE = relative_attention
_SCREAMING_SNAKE_CASE = position_biased_input
_SCREAMING_SNAKE_CASE = pos_att_type
_SCREAMING_SNAKE_CASE = scope
def UpperCamelCase ( self: int ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_SCREAMING_SNAKE_CASE = None
if self.use_input_mask:
_SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length] )
_SCREAMING_SNAKE_CASE = None
if self.use_token_type_ids:
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = None
if self.use_labels:
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_SCREAMING_SNAKE_CASE = DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , initializer_range=self.initializer_range , return_dict=UpperCAmelCase_ , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase ( self: Optional[Any] , UpperCAmelCase_: int , UpperCAmelCase_: Optional[Any] , UpperCAmelCase_: str , UpperCAmelCase_: int , UpperCAmelCase_: List[str] , UpperCAmelCase_: List[str] , UpperCAmelCase_: Union[str, Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = TFDebertaVaModel(config=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
_SCREAMING_SNAKE_CASE = [input_ids, input_mask]
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase ( self: Tuple , UpperCAmelCase_: Optional[int] , UpperCAmelCase_: List[str] , UpperCAmelCase_: Tuple , UpperCAmelCase_: int , UpperCAmelCase_: Optional[Any] , UpperCAmelCase_: str , UpperCAmelCase_: Union[str, Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = TFDebertaVaForMaskedLM(config=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase ( self: Any , UpperCAmelCase_: Any , UpperCAmelCase_: List[str] , UpperCAmelCase_: Dict , UpperCAmelCase_: List[str] , UpperCAmelCase_: str , UpperCAmelCase_: int , UpperCAmelCase_: int ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.num_labels
_SCREAMING_SNAKE_CASE = TFDebertaVaForSequenceClassification(config=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase ( self: Optional[Any] , UpperCAmelCase_: Optional[int] , UpperCAmelCase_: Optional[int] , UpperCAmelCase_: Optional[int] , UpperCAmelCase_: List[Any] , UpperCAmelCase_: Any , UpperCAmelCase_: List[Any] , UpperCAmelCase_: Any ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.num_labels
_SCREAMING_SNAKE_CASE = TFDebertaVaForTokenClassification(config=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase ( self: Any , UpperCAmelCase_: Optional[Any] , UpperCAmelCase_: Tuple , UpperCAmelCase_: Union[str, Any] , UpperCAmelCase_: str , UpperCAmelCase_: str , UpperCAmelCase_: Any , UpperCAmelCase_: Dict ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = TFDebertaVaForQuestionAnswering(config=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
(
(
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) ,
) = config_and_inputs
_SCREAMING_SNAKE_CASE = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class __UpperCAmelCase (_UpperCAmelCase ,_UpperCAmelCase ,unittest.TestCase ):
__snake_case : int = (
(
TFDebertaVaModel,
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
)
if is_tf_available()
else ()
)
__snake_case : Union[str, Any] = (
{
"feature-extraction": TFDebertaVaModel,
"fill-mask": TFDebertaVaForMaskedLM,
"question-answering": TFDebertaVaForQuestionAnswering,
"text-classification": TFDebertaVaForSequenceClassification,
"token-classification": TFDebertaVaForTokenClassification,
"zero-shot": TFDebertaVaForSequenceClassification,
}
if is_tf_available()
else {}
)
__snake_case : Dict = False
__snake_case : Optional[Any] = False
def UpperCamelCase ( self: Union[str, Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = TFDebertaVaModelTester(self )
_SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=UpperCAmelCase_ , hidden_size=37 )
def UpperCamelCase ( self: Tuple ):
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase_ )
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCAmelCase_ )
def UpperCamelCase ( self: Any ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCAmelCase_ )
def UpperCamelCase ( self: Tuple ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*UpperCAmelCase_ )
def UpperCamelCase ( self: Optional[int] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCAmelCase_ )
@slow
def UpperCamelCase ( self: Any ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = TFDebertaVaModel.from_pretrained("""kamalkraj/deberta-v2-xlarge""" )
self.assertIsNotNone(UpperCAmelCase_ )
@require_tf
class __UpperCAmelCase (unittest.TestCase ):
@unittest.skip(reason="""Model not available yet""" )
def UpperCamelCase ( self: Tuple ):
'''simple docstring'''
pass
@slow
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = TFDebertaVaModel.from_pretrained("""kamalkraj/deberta-v2-xlarge""" )
_SCREAMING_SNAKE_CASE = tf.constant([[0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2]] )
_SCREAMING_SNAKE_CASE = tf.constant([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ )[0]
_SCREAMING_SNAKE_CASE = tf.constant(
[[[0.23_56, 0.19_48, 0.03_69], [-0.10_63, 0.35_86, -0.51_52], [-0.63_99, -0.02_59, -0.25_25]]] )
tf.debugging.assert_near(output[:, 1:4, 1:4] , UpperCAmelCase_ , atol=1E-4 )
| 306 | 0 |
def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: Optional[int] = len(_UpperCAmelCase ), len(grid[0] )
if (
min(_UpperCAmelCase , _UpperCAmelCase ) < 0
or row == row_length
or col == col_length
or (row, col) in visit
or grid[row][col] == 1
):
return 0
if row == row_length - 1 and col == col_length - 1:
return 1
visit.add((row, col) )
SCREAMING_SNAKE_CASE_: str = 0
count += depth_first_search(_UpperCAmelCase , row + 1 , _UpperCAmelCase , _UpperCAmelCase )
count += depth_first_search(_UpperCAmelCase , row - 1 , _UpperCAmelCase , _UpperCAmelCase )
count += depth_first_search(_UpperCAmelCase , _UpperCAmelCase , col + 1 , _UpperCAmelCase )
count += depth_first_search(_UpperCAmelCase , _UpperCAmelCase , col - 1 , _UpperCAmelCase )
visit.remove((row, col) )
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 355 |
from typing import Dict, List, Optional
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase : List[str] = logging.get_logger(__name__)
lowerCAmelCase : Tuple = {
"""nielsr/canine-s""": 2048,
}
# Unicode defines 1,114,112 total “codepoints”
lowerCAmelCase : Any = 1114112
# Below: Constants defining canonical codepoints for special, pseudo-characters.
# Copied from https://github.com/google-research/language/blob/master/language/canine/special_codepoints.py
lowerCAmelCase : Dict = 0
lowerCAmelCase : Dict = 0xe000
lowerCAmelCase : str = 0xe001
lowerCAmelCase : str = 0xe002
lowerCAmelCase : Optional[int] = 0xe003
lowerCAmelCase : List[Any] = 0xe004
# Maps special codepoints to human-readable names.
lowerCAmelCase : Dict[int, str] = {
# Special symbols are represented using codepoints values that are valid,
# but designated as "Private Use", meaning that they will never be assigned
# characters by the Unicode Consortium, and are thus safe for use here.
#
# NOTE: Do *NOT* add any sort of [UNK_CHAR] here. They are explicitly
# excluded and should fail with a hard error.
CLS: "[CLS]",
SEP: "[SEP]",
BOS: "[BOS]",
MASK: "[MASK]",
PAD: "[PAD]",
RESERVED: "[RESERVED]",
}
# Maps special codepoint human-readable names to their codepoint values.
lowerCAmelCase : Dict[str, int] = {name: codepoint for codepoint, name in SPECIAL_CODEPOINTS.items()}
class __lowercase ( UpperCAmelCase_ ):
"""simple docstring"""
_UpperCAmelCase : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : Optional[Any] , lowerCAmelCase__ : Tuple=chr(lowerCAmelCase__) , lowerCAmelCase__ : Tuple=chr(lowerCAmelCase__) , lowerCAmelCase__ : str=chr(lowerCAmelCase__) , lowerCAmelCase__ : List[Any]=chr(lowerCAmelCase__) , lowerCAmelCase__ : Optional[int]=chr(lowerCAmelCase__) , lowerCAmelCase__ : Union[str, Any]=chr(lowerCAmelCase__) , lowerCAmelCase__ : Union[str, Any]=False , lowerCAmelCase__ : int=2048 , **lowerCAmelCase__ : Tuple , ):
SCREAMING_SNAKE_CASE_: Union[str, Any] = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__ , lowerCAmelCase__) else bos_token
SCREAMING_SNAKE_CASE_: List[Any] = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__ , lowerCAmelCase__) else eos_token
SCREAMING_SNAKE_CASE_: List[Any] = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__ , lowerCAmelCase__) else sep_token
SCREAMING_SNAKE_CASE_: Any = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__ , lowerCAmelCase__) else cls_token
SCREAMING_SNAKE_CASE_: str = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__ , lowerCAmelCase__) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
SCREAMING_SNAKE_CASE_: Dict = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__ , lowerCAmelCase__) else mask_token
super().__init__(
bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , model_max_length=lowerCAmelCase__ , **lowerCAmelCase__ , )
# Creates a mapping for looking up the IDs of special symbols.
SCREAMING_SNAKE_CASE_: Dict[str, int] = {}
for codepoint, name in SPECIAL_CODEPOINTS.items():
SCREAMING_SNAKE_CASE_: List[Any] = codepoint
# Creates a mapping for looking up the string forms of special symbol IDs.
SCREAMING_SNAKE_CASE_: Dict[int, str] = {
codepoint: name for name, codepoint in self._special_codepoints.items()
}
SCREAMING_SNAKE_CASE_: List[str] = UNICODE_VOCAB_SIZE
SCREAMING_SNAKE_CASE_: Tuple = len(self._special_codepoints)
@property
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
return self._unicode_vocab_size
def _SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase__ : str):
return list(lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase__ : str):
try:
return ord(lowerCAmelCase__)
except TypeError:
raise ValueError(F"invalid token: '{token}'")
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase__ : int):
try:
if index in SPECIAL_CODEPOINTS:
return SPECIAL_CODEPOINTS[index]
return chr(lowerCAmelCase__)
except TypeError:
raise ValueError(F"invalid id: {index}")
def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase__ : Dict):
return "".join(lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : Optional[List[int]] = None):
SCREAMING_SNAKE_CASE_: List[str] = [self.sep_token_id]
SCREAMING_SNAKE_CASE_: Tuple = [self.cls_token_id]
SCREAMING_SNAKE_CASE_: int = cls + token_ids_a + sep
if token_ids_a is not None:
result += token_ids_a + sep
return result
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : Optional[List[int]] = None , lowerCAmelCase__ : bool = False):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase__ , token_ids_a=lowerCAmelCase__ , already_has_special_tokens=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Any = [1] + ([0] * len(lowerCAmelCase__)) + [1]
if token_ids_a is not None:
result += ([0] * len(lowerCAmelCase__)) + [1]
return result
def _SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : Optional[List[int]] = None):
SCREAMING_SNAKE_CASE_: Dict = [self.sep_token_id]
SCREAMING_SNAKE_CASE_: int = [self.cls_token_id]
SCREAMING_SNAKE_CASE_: List[str] = len(cls + token_ids_a + sep) * [0]
if token_ids_a is not None:
result += len(token_ids_a + sep) * [1]
return result
def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[str] = None):
return ()
| 127 | 0 |
"""simple docstring"""
import logging
import os
from typing import List, Tuple
import numpy as np
import psutil
import torch
import torch.distributed as dist
from transformers import RagRetriever
_a : int = logging.getLogger(__name__)
class __A ( __lowerCamelCase ):
def __init__( self , a__ , a__ , a__ , a__=None ):
super().__init__(
__lowercase , question_encoder_tokenizer=__lowercase , generator_tokenizer=__lowercase , index=__lowercase , init_retrieval=__lowercase , )
_lowerCAmelCase : List[str] = None
def __A ( self , a__ ):
logger.info("""initializing retrieval""" )
# initializing a separate process group for retrieval as the default
# nccl backend doesn't support gather/scatter operations while gloo
# is too slow to replace nccl for the core gpu communication
if dist.is_initialized():
logger.info("""dist initialized""" )
# needs to be set manually
_lowerCAmelCase : List[Any] = self._infer_socket_ifname()
# avoid clash with the NCCL port
_lowerCAmelCase : List[Any] = str(distributed_port + 1 )
_lowerCAmelCase : str = dist.new_group(ranks=__lowercase , backend="""gloo""" )
# initialize retriever only on the main worker
if not dist.is_initialized() or self._is_main():
logger.info("""dist not initialized / main""" )
self.index.init_index()
# all processes wait untill the retriever is initialized by the main process
if dist.is_initialized():
torch.distributed.barrier(group=self.process_group )
def __A ( self ):
return dist.get_rank(group=self.process_group ) == 0
def __A ( self , a__ , a__ , a__=torch.floataa ):
_lowerCAmelCase : List[str] = torch.empty(__lowercase , dtype=__lowercase )
dist.scatter(__lowercase , src=0 , scatter_list=__lowercase , group=self.process_group )
return target_tensor
def __A ( self ):
_lowerCAmelCase : Any = psutil.net_if_addrs()
# a hacky way to deal with varying network interface names
_lowerCAmelCase : int = next((addr for addr in addrs if addr.startswith("""e""" )) , __lowercase )
return ifname
def __A ( self , a__ , a__ ):
# single GPU training
if not dist.is_initialized():
_lowerCAmelCase : Optional[int] = self._main_retrieve(__lowercase , __lowercase )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(__lowercase )
# distributed training
_lowerCAmelCase : Optional[int] = dist.get_world_size(group=self.process_group )
# gather logic
_lowerCAmelCase : List[Any] = None
if self._is_main():
_lowerCAmelCase : List[Any] = [torch.empty(question_hidden_states.shape , dtype=torch.floataa ) for _ in range(__lowercase )]
dist.gather(torch.tensor(__lowercase ) , dst=0 , gather_list=__lowercase , group=self.process_group )
# scatter logic
_lowerCAmelCase : str = question_hidden_states.shape[0]
_lowerCAmelCase : str = []
_lowerCAmelCase : Tuple = []
if self._is_main():
assert len(__lowercase ) == world_size
_lowerCAmelCase : str = self._main_retrieve(torch.cat(__lowercase ).numpy() , __lowercase )
_lowerCAmelCase : str = torch.tensor(__lowercase ), torch.tensor(__lowercase )
_lowerCAmelCase : List[str] = self._chunk_tensor(__lowercase , __lowercase )
_lowerCAmelCase : Optional[int] = self._chunk_tensor(__lowercase , __lowercase )
_lowerCAmelCase : Union[str, Any] = self._scattered(__lowercase , [n_queries, n_docs] , target_type=torch.intaa )
_lowerCAmelCase : Dict = self._scattered(__lowercase , [n_queries, n_docs, question_hidden_states.shape[1]] )
return retrieved_doc_embeds.numpy(), doc_ids.numpy(), self.index.get_doc_dicts(__lowercase )
| 44 |
import os
import unittest
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
BertTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class a ( __lowerCamelCase , unittest.TestCase ):
__lowerCAmelCase : int = BertTokenizer
__lowerCAmelCase : int = BertTokenizerFast
__lowerCAmelCase : int = True
__lowerCAmelCase : List[Any] = True
__lowerCAmelCase : Any = filter_non_english
def __lowerCamelCase ( self :str ):
super().setUp()
snake_case__ : str = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
snake_case__ : str = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file ,'''w''' ,encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def __lowerCamelCase ( self :List[str] ,__lowercase :str ):
snake_case__ : List[Any] = '''UNwant\u00E9d,running'''
snake_case__ : str = '''unwanted, running'''
return input_text, output_text
def __lowerCamelCase ( self :Optional[int] ):
snake_case__ : str = self.tokenizer_class(self.vocab_file )
snake_case__ : str = tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(__lowercase ,['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowercase ) ,[9, 6, 7, 1_2, 1_0, 1_1] )
def __lowerCamelCase ( self :List[str] ):
if not self.test_rust_tokenizer:
return
snake_case__ : str = self.get_tokenizer()
snake_case__ : Dict = self.get_rust_tokenizer()
snake_case__ : List[Any] = '''UNwant\u00E9d,running'''
snake_case__ : Dict = tokenizer.tokenize(__lowercase )
snake_case__ : int = rust_tokenizer.tokenize(__lowercase )
self.assertListEqual(__lowercase ,__lowercase )
snake_case__ : List[str] = tokenizer.encode(__lowercase ,add_special_tokens=__lowercase )
snake_case__ : List[str] = rust_tokenizer.encode(__lowercase ,add_special_tokens=__lowercase )
self.assertListEqual(__lowercase ,__lowercase )
snake_case__ : Optional[Any] = self.get_rust_tokenizer()
snake_case__ : List[Any] = tokenizer.encode(__lowercase )
snake_case__ : Dict = rust_tokenizer.encode(__lowercase )
self.assertListEqual(__lowercase ,__lowercase )
# With lower casing
snake_case__ : List[Any] = self.get_tokenizer(do_lower_case=__lowercase )
snake_case__ : Optional[int] = self.get_rust_tokenizer(do_lower_case=__lowercase )
snake_case__ : int = '''UNwant\u00E9d,running'''
snake_case__ : List[str] = tokenizer.tokenize(__lowercase )
snake_case__ : Union[str, Any] = rust_tokenizer.tokenize(__lowercase )
self.assertListEqual(__lowercase ,__lowercase )
snake_case__ : int = tokenizer.encode(__lowercase ,add_special_tokens=__lowercase )
snake_case__ : List[str] = rust_tokenizer.encode(__lowercase ,add_special_tokens=__lowercase )
self.assertListEqual(__lowercase ,__lowercase )
snake_case__ : Tuple = self.get_rust_tokenizer()
snake_case__ : int = tokenizer.encode(__lowercase )
snake_case__ : Dict = rust_tokenizer.encode(__lowercase )
self.assertListEqual(__lowercase ,__lowercase )
def __lowerCamelCase ( self :Tuple ):
snake_case__ : Optional[Any] = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize('''ah\u535A\u63A8zz''' ) ,['''ah''', '''\u535A''', '''\u63A8''', '''zz'''] )
def __lowerCamelCase ( self :Any ):
snake_case__ : Dict = BasicTokenizer(do_lower_case=__lowercase )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) ,['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) ,['''hello'''] )
def __lowerCamelCase ( self :List[str] ):
snake_case__ : Optional[int] = BasicTokenizer(do_lower_case=__lowercase ,strip_accents=__lowercase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) ,['''hällo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) ,['''h\u00E9llo'''] )
def __lowerCamelCase ( self :str ):
snake_case__ : Union[str, Any] = BasicTokenizer(do_lower_case=__lowercase ,strip_accents=__lowercase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) ,['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) ,['''hello'''] )
def __lowerCamelCase ( self :Optional[Any] ):
snake_case__ : Union[str, Any] = BasicTokenizer(do_lower_case=__lowercase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) ,['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) ,['''hello'''] )
def __lowerCamelCase ( self :Optional[Any] ):
snake_case__ : str = BasicTokenizer(do_lower_case=__lowercase )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) ,['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def __lowerCamelCase ( self :Tuple ):
snake_case__ : Any = BasicTokenizer(do_lower_case=__lowercase ,strip_accents=__lowercase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) ,['''HäLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def __lowerCamelCase ( self :int ):
snake_case__ : str = BasicTokenizer(do_lower_case=__lowercase ,strip_accents=__lowercase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) ,['''HaLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def __lowerCamelCase ( self :Tuple ):
snake_case__ : List[Any] = BasicTokenizer(do_lower_case=__lowercase ,never_split=['''[UNK]'''] )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? [UNK]''' ) ,['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?''', '''[UNK]'''] )
def __lowerCamelCase ( self :Dict ):
snake_case__ : List[str] = BasicTokenizer()
snake_case__ : Any = '''a\n\'ll !!to?\'d of, can\'t.'''
snake_case__ : Optional[int] = ['''a''', '''\'''', '''ll''', '''!''', '''!''', '''to''', '''?''', '''\'''', '''d''', '''of''', ''',''', '''can''', '''\'''', '''t''', '''.''']
self.assertListEqual(tokenizer.tokenize(__lowercase ) ,__lowercase )
def __lowerCamelCase ( self :List[str] ):
snake_case__ : Any = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''']
snake_case__ : Optional[Any] = {}
for i, token in enumerate(__lowercase ):
snake_case__ : Dict = i
snake_case__ : int = WordpieceTokenizer(vocab=__lowercase ,unk_token='''[UNK]''' )
self.assertListEqual(tokenizer.tokenize('''''' ) ,[] )
self.assertListEqual(tokenizer.tokenize('''unwanted running''' ) ,['''un''', '''##want''', '''##ed''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.tokenize('''unwantedX running''' ) ,['''[UNK]''', '''runn''', '''##ing'''] )
def __lowerCamelCase ( self :List[Any] ):
self.assertTrue(_is_whitespace(''' ''' ) )
self.assertTrue(_is_whitespace('''\t''' ) )
self.assertTrue(_is_whitespace('''\r''' ) )
self.assertTrue(_is_whitespace('''\n''' ) )
self.assertTrue(_is_whitespace('''\u00A0''' ) )
self.assertFalse(_is_whitespace('''A''' ) )
self.assertFalse(_is_whitespace('''-''' ) )
def __lowerCamelCase ( self :Union[str, Any] ):
self.assertTrue(_is_control('''\u0005''' ) )
self.assertFalse(_is_control('''A''' ) )
self.assertFalse(_is_control(''' ''' ) )
self.assertFalse(_is_control('''\t''' ) )
self.assertFalse(_is_control('''\r''' ) )
def __lowerCamelCase ( self :str ):
self.assertTrue(_is_punctuation('''-''' ) )
self.assertTrue(_is_punctuation('''$''' ) )
self.assertTrue(_is_punctuation('''`''' ) )
self.assertTrue(_is_punctuation('''.''' ) )
self.assertFalse(_is_punctuation('''A''' ) )
self.assertFalse(_is_punctuation(''' ''' ) )
def __lowerCamelCase ( self :List[str] ):
snake_case__ : str = self.get_tokenizer()
snake_case__ : List[str] = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(__lowercase ) for t in ['''Test''', '''\xad''', '''test''']] ,[['''[UNK]'''], [], ['''[UNK]''']] )
self.assertListEqual(
[rust_tokenizer.tokenize(__lowercase ) for t in ['''Test''', '''\xad''', '''test''']] ,[['''[UNK]'''], [], ['''[UNK]''']] )
@slow
def __lowerCamelCase ( self :List[str] ):
snake_case__ : List[str] = self.tokenizer_class.from_pretrained('''bert-base-uncased''' )
snake_case__ : Union[str, Any] = tokenizer.encode('''sequence builders''' ,add_special_tokens=__lowercase )
snake_case__ : str = tokenizer.encode('''multi-sequence build''' ,add_special_tokens=__lowercase )
snake_case__ : Tuple = tokenizer.build_inputs_with_special_tokens(__lowercase )
snake_case__ : int = tokenizer.build_inputs_with_special_tokens(__lowercase ,__lowercase )
assert encoded_sentence == [1_0_1] + text + [1_0_2]
assert encoded_pair == [1_0_1] + text + [1_0_2] + text_a + [1_0_2]
def __lowerCamelCase ( self :List[Any] ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
snake_case__ : str = self.rust_tokenizer_class.from_pretrained(__lowercase ,**__lowercase )
snake_case__ : List[str] = F"""A, naïve {tokenizer_r.mask_token} AllenNLP sentence."""
snake_case__ : List[Any] = tokenizer_r.encode_plus(
__lowercase ,return_attention_mask=__lowercase ,return_token_type_ids=__lowercase ,return_offsets_mapping=__lowercase ,add_special_tokens=__lowercase ,)
snake_case__ : List[str] = tokenizer_r.do_lower_case if hasattr(__lowercase ,'''do_lower_case''' ) else False
snake_case__ : Optional[int] = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), '''A'''),
((1, 2), ''','''),
((3, 5), '''na'''),
((5, 6), '''##ï'''),
((6, 8), '''##ve'''),
((9, 1_5), tokenizer_r.mask_token),
((1_6, 2_1), '''Allen'''),
((2_1, 2_3), '''##NL'''),
((2_3, 2_4), '''##P'''),
((2_5, 3_3), '''sentence'''),
((3_3, 3_4), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), '''a'''),
((1, 2), ''','''),
((3, 8), '''naive'''),
((9, 1_5), tokenizer_r.mask_token),
((1_6, 2_1), '''allen'''),
((2_1, 2_3), '''##nl'''),
((2_3, 2_4), '''##p'''),
((2_5, 3_3), '''sentence'''),
((3_3, 3_4), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] ,tokenizer_r.convert_ids_to_tokens(tokens['''input_ids'''] ) )
self.assertEqual([e[0] for e in expected_results] ,tokens['''offset_mapping'''] )
def __lowerCamelCase ( self :List[str] ):
snake_case__ : str = ['''的''', '''人''', '''有''']
snake_case__ : Optional[int] = ''''''.join(__lowercase )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
snake_case__ : Optional[Any] = True
snake_case__ : int = self.tokenizer_class.from_pretrained(__lowercase ,**__lowercase )
snake_case__ : Tuple = self.rust_tokenizer_class.from_pretrained(__lowercase ,**__lowercase )
snake_case__ : Union[str, Any] = tokenizer_p.encode(__lowercase ,add_special_tokens=__lowercase )
snake_case__ : List[str] = tokenizer_r.encode(__lowercase ,add_special_tokens=__lowercase )
snake_case__ : List[str] = tokenizer_r.convert_ids_to_tokens(__lowercase )
snake_case__ : Optional[Any] = tokenizer_p.convert_ids_to_tokens(__lowercase )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(__lowercase ,__lowercase )
self.assertListEqual(__lowercase ,__lowercase )
snake_case__ : str = False
snake_case__ : Dict = self.rust_tokenizer_class.from_pretrained(__lowercase ,**__lowercase )
snake_case__ : int = self.tokenizer_class.from_pretrained(__lowercase ,**__lowercase )
snake_case__ : int = tokenizer_r.encode(__lowercase ,add_special_tokens=__lowercase )
snake_case__ : Tuple = tokenizer_p.encode(__lowercase ,add_special_tokens=__lowercase )
snake_case__ : List[Any] = tokenizer_r.convert_ids_to_tokens(__lowercase )
snake_case__ : Union[str, Any] = tokenizer_p.convert_ids_to_tokens(__lowercase )
# it is expected that only the first Chinese character is not preceded by "##".
snake_case__ : Optional[int] = [
F"""##{token}""" if idx != 0 else token for idx, token in enumerate(__lowercase )
]
self.assertListEqual(__lowercase ,__lowercase )
self.assertListEqual(__lowercase ,__lowercase )
| 230 | 0 |
import numpy as np
def _snake_case( SCREAMING_SNAKE_CASE__ : np.array ) -> np.array:
'''simple docstring'''
return 1 / (1 + np.exp(-vector ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 282 |
from __future__ import annotations
from typing import Any
def _snake_case( SCREAMING_SNAKE_CASE__ : list ) -> int:
'''simple docstring'''
if not postfix_notation:
return 0
A__ = {'+', '-', '*', '/'}
A__ = []
for token in postfix_notation:
if token in operations:
A__ , A__ = stack.pop(), stack.pop()
if token == "+":
stack.append(a + b )
elif token == "-":
stack.append(a - b )
elif token == "*":
stack.append(a * b )
else:
if a * b < 0 and a % b != 0:
stack.append(a // b + 1 )
else:
stack.append(a // b )
else:
stack.append(int(SCREAMING_SNAKE_CASE__ ) )
return stack.pop()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 282 | 1 |
"""simple docstring"""
from typing import Optional
from urllib.parse import quote
import huggingface_hub as hfh
from packaging import version
def lowercase ( __snake_case : str , __snake_case : str , __snake_case : Optional[str] = None ):
if version.parse(hfh.__version__ ).release < version.parse('''0.11.0''' ).release:
# old versions of hfh don't url-encode the file path
lowercase_ : Union[str, Any] = quote(__snake_case )
return hfh.hf_hub_url(__snake_case , __snake_case , repo_type='''dataset''' , revision=__snake_case )
| 33 |
"""simple docstring"""
import argparse
import os
import re
import packaging.version
__A : List[str] = '''examples/'''
__A : int = {
'''examples''': (re.compile(R'''^check_min_version\("[^"]+"\)\s*$''', re.MULTILINE), '''check_min_version("VERSION")\n'''),
'''init''': (re.compile(R'''^__version__\s+=\s+"([^"]+)"\s*$''', re.MULTILINE), '''__version__ = "VERSION"\n'''),
'''setup''': (re.compile(R'''^(\s*)version\s*=\s*"[^"]+",''', re.MULTILINE), R'''\1version="VERSION",'''),
'''doc''': (re.compile(R'''^(\s*)release\s*=\s*"[^"]+"$''', re.MULTILINE), '''release = "VERSION"\n'''),
}
__A : Dict = {
'''init''': '''src/transformers/__init__.py''',
'''setup''': '''setup.py''',
}
__A : Optional[int] = '''README.md'''
def lowercase ( __snake_case : int , __snake_case : Any , __snake_case : int ):
with open(__snake_case , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
lowercase_ : int = f.read()
lowercase_ , lowercase_ : List[str] = REPLACE_PATTERNS[pattern]
lowercase_ : Union[str, Any] = replace.replace('''VERSION''' , __snake_case )
lowercase_ : Optional[Any] = re_pattern.sub(__snake_case , __snake_case )
with open(__snake_case , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.write(__snake_case )
def lowercase ( __snake_case : int ):
for folder, directories, fnames in os.walk(__snake_case ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove('''research_projects''' )
if "legacy" in directories:
directories.remove('''legacy''' )
for fname in fnames:
if fname.endswith('''.py''' ):
update_version_in_file(os.path.join(__snake_case , __snake_case ) , __snake_case , pattern='''examples''' )
def lowercase ( __snake_case : Optional[Any] , __snake_case : Optional[Any]=False ):
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(__snake_case , __snake_case , __snake_case )
if not patch:
update_version_in_examples(__snake_case )
def lowercase ( ):
lowercase_ : Union[str, Any] = '''🤗 Transformers currently provides the following architectures'''
lowercase_ : Union[str, Any] = '''1. Want to contribute a new model?'''
with open(__snake_case , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
lowercase_ : List[str] = f.readlines()
# Find the start of the list.
lowercase_ : Optional[Any] = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
lowercase_ : str = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith('''1.''' ):
lowercase_ : str = lines[index].replace(
'''https://huggingface.co/docs/transformers/main/model_doc''' , '''https://huggingface.co/docs/transformers/model_doc''' , )
index += 1
with open(__snake_case , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.writelines(__snake_case )
def lowercase ( ):
with open(REPLACE_FILES['''init'''] , '''r''' ) as f:
lowercase_ : List[Any] = f.read()
lowercase_ : List[str] = REPLACE_PATTERNS['''init'''][0].search(__snake_case ).groups()[0]
return packaging.version.parse(__snake_case )
def lowercase ( __snake_case : Optional[Any]=False ):
lowercase_ : str = get_version()
if patch and default_version.is_devrelease:
raise ValueError('''Can\'t create a patch version from the dev branch, checkout a released version!''' )
if default_version.is_devrelease:
lowercase_ : Optional[Any] = default_version.base_version
elif patch:
lowercase_ : Optional[int] = F'''{default_version.major}.{default_version.minor}.{default_version.micro + 1}'''
else:
lowercase_ : Optional[int] = F'''{default_version.major}.{default_version.minor + 1}.0'''
# Now let's ask nicely if that's the right one.
lowercase_ : int = input(F'''Which version are you releasing? [{default_version}]''' )
if len(__snake_case ) == 0:
lowercase_ : Dict = default_version
print(F'''Updating version to {version}.''' )
global_version_update(__snake_case , patch=__snake_case )
if not patch:
print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' )
clean_main_ref_in_model_list()
def lowercase ( ):
lowercase_ : List[Any] = get_version()
lowercase_ : List[str] = F'''{current_version.major}.{current_version.minor + 1}.0.dev0'''
lowercase_ : Any = current_version.base_version
# Check with the user we got that right.
lowercase_ : Tuple = input(F'''Which version are we developing now? [{dev_version}]''' )
if len(__snake_case ) == 0:
lowercase_ : str = dev_version
print(F'''Updating version to {version}.''' )
global_version_update(__snake_case )
print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' )
clean_main_ref_in_model_list()
if __name__ == "__main__":
__A : int = argparse.ArgumentParser()
parser.add_argument('''--post_release''', action='''store_true''', help='''Whether this is pre or post release.''')
parser.add_argument('''--patch''', action='''store_true''', help='''Whether or not this is a patch release.''')
__A : Any = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print('''Nothing to do after a patch :-)''')
else:
post_release_work()
| 33 | 1 |
from bisect import bisect
from itertools import accumulate
def _UpperCamelCase (a__ :str , a__ :Dict , a__ :int , a__ :int ):
"""simple docstring"""
UpperCamelCase__ = sorted(zip(a__ , a__ ) , key=lambda a__ : x[0] / x[1] , reverse=a__ )
UpperCamelCase__ , UpperCamelCase__ = [i[0] for i in r], [i[1] for i in r]
UpperCamelCase__ = list(accumulate(a__ ) )
UpperCamelCase__ = bisect(a__ , a__ )
return (
0
if k == 0
else sum(vl[:k] ) + (w - acc[k - 1]) * (vl[k]) / (wt[k])
if k != n
else sum(vl[:k] )
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 87 |
import unittest
from parameterized import parameterized
from transformers import AutoTokenizer, GPTNeoXConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXModel,
)
class __SCREAMING_SNAKE_CASE :
def __init__( self , __lowerCAmelCase , __lowerCAmelCase=13 , __lowerCAmelCase=7 , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=99 , __lowerCAmelCase=64 , __lowerCAmelCase=5 , __lowerCAmelCase=4 , __lowerCAmelCase=37 , __lowerCAmelCase="gelu" , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.1 , __lowerCAmelCase=512 , __lowerCAmelCase=16 , __lowerCAmelCase=2 , __lowerCAmelCase=0.02 , __lowerCAmelCase=3 , __lowerCAmelCase=4 , __lowerCAmelCase=None , ):
UpperCamelCase__ = parent
UpperCamelCase__ = batch_size
UpperCamelCase__ = seq_length
UpperCamelCase__ = is_training
UpperCamelCase__ = use_input_mask
UpperCamelCase__ = use_token_type_ids
UpperCamelCase__ = use_labels
UpperCamelCase__ = vocab_size
UpperCamelCase__ = hidden_size
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = intermediate_size
UpperCamelCase__ = hidden_act
UpperCamelCase__ = hidden_dropout_prob
UpperCamelCase__ = attention_probs_dropout_prob
UpperCamelCase__ = max_position_embeddings
UpperCamelCase__ = type_vocab_size
UpperCamelCase__ = type_sequence_label_size
UpperCamelCase__ = initializer_range
UpperCamelCase__ = num_labels
UpperCamelCase__ = num_choices
UpperCamelCase__ = scope
UpperCamelCase__ = vocab_size - 1
def _lowerCamelCase ( self ):
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase__ = None
if self.use_input_mask:
UpperCamelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase__ = None
if self.use_labels:
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase__ = self.get_config()
return config, input_ids, input_mask, token_labels
def _lowerCamelCase ( self ):
return GPTNeoXConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__lowerCAmelCase , initializer_range=self.initializer_range , pad_token_id=self.pad_token_id , )
def _lowerCamelCase ( self ):
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = self.prepare_config_and_inputs()
UpperCamelCase__ = True
return config, input_ids, input_mask, token_labels
def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
UpperCamelCase__ = GPTNeoXModel(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
UpperCamelCase__ = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase )
UpperCamelCase__ = model(__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
UpperCamelCase__ = True
UpperCamelCase__ = GPTNeoXModel(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
UpperCamelCase__ = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
UpperCamelCase__ = GPTNeoXForCausalLM(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
UpperCamelCase__ = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
UpperCamelCase__ = self.num_labels
UpperCamelCase__ = GPTNeoXForQuestionAnswering(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
UpperCamelCase__ = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
UpperCamelCase__ = self.num_labels
UpperCamelCase__ = GPTNeoXForSequenceClassification(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
UpperCamelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase__ = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
UpperCamelCase__ = self.num_labels
UpperCamelCase__ = GPTNeoXForTokenClassification(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
UpperCamelCase__ = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
UpperCamelCase__ = True
UpperCamelCase__ = GPTNeoXForCausalLM(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
# first forward pass
UpperCamelCase__ = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , use_cache=__lowerCAmelCase )
UpperCamelCase__ = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
UpperCamelCase__ = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCamelCase__ = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
UpperCamelCase__ = torch.cat([input_ids, next_tokens] , dim=-1 )
UpperCamelCase__ = torch.cat([input_mask, next_mask] , dim=-1 )
UpperCamelCase__ = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , output_hidden_states=__lowerCAmelCase )
UpperCamelCase__ = output_from_no_past["""hidden_states"""][0]
UpperCamelCase__ = model(
__lowerCAmelCase , attention_mask=__lowerCAmelCase , past_key_values=__lowerCAmelCase , output_hidden_states=__lowerCAmelCase , )["""hidden_states"""][0]
# select random slice
UpperCamelCase__ = ids_tensor((1,) , output_from_past.shape[-1] ).item()
UpperCamelCase__ = output_from_no_past[:, -3:, random_slice_idx].detach()
UpperCamelCase__ = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__lowerCAmelCase , __lowerCAmelCase , atol=1E-3 ) )
def _lowerCamelCase ( self ):
UpperCamelCase__ = self.prepare_config_and_inputs()
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = config_and_inputs
UpperCamelCase__ = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class __SCREAMING_SNAKE_CASE ( _a , _a , _a , unittest.TestCase ):
snake_case : Optional[Any] = (
(
GPTNeoXModel,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
)
if is_torch_available()
else ()
)
snake_case : Union[str, Any] = (GPTNeoXForCausalLM,) if is_torch_available() else ()
snake_case : Dict = (
{
"""feature-extraction""": GPTNeoXModel,
"""question-answering""": GPTNeoXForQuestionAnswering,
"""text-classification""": GPTNeoXForSequenceClassification,
"""text-generation""": GPTNeoXForCausalLM,
"""token-classification""": GPTNeoXForTokenClassification,
"""zero-shot""": GPTNeoXForSequenceClassification,
}
if is_torch_available()
else {}
)
snake_case : Tuple = False
snake_case : Dict = False
snake_case : Tuple = False
snake_case : Any = False
def _lowerCamelCase ( self ):
UpperCamelCase__ = GPTNeoXModelTester(self )
UpperCamelCase__ = ConfigTester(self , config_class=__lowerCAmelCase , hidden_size=64 , num_attention_heads=8 )
def _lowerCamelCase ( self ):
self.config_tester.run_common_tests()
def _lowerCamelCase ( self ):
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
def _lowerCamelCase ( self ):
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
def _lowerCamelCase ( self ):
# This regression test was failing with PyTorch < 1.3
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_decoder()
UpperCamelCase__ = None
self.model_tester.create_and_check_model_as_decoder(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
def _lowerCamelCase ( self ):
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
def _lowerCamelCase ( self ):
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*__lowerCAmelCase )
def _lowerCamelCase ( self ):
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__lowerCAmelCase )
def _lowerCamelCase ( self ):
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__lowerCAmelCase )
def _lowerCamelCase ( self ):
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__lowerCAmelCase )
@unittest.skip(reason="""Feed forward chunking is not implemented""" )
def _lowerCamelCase ( self ):
pass
@parameterized.expand([("""linear""",), ("""dynamic""",)] )
def _lowerCamelCase ( self , __lowerCAmelCase ):
UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase__ = ids_tensor([1, 10] , config.vocab_size )
UpperCamelCase__ = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
UpperCamelCase__ = GPTNeoXModel(__lowerCAmelCase )
original_model.to(__lowerCAmelCase )
original_model.eval()
UpperCamelCase__ = original_model(__lowerCAmelCase ).last_hidden_state
UpperCamelCase__ = original_model(__lowerCAmelCase ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
UpperCamelCase__ = {"""type""": scaling_type, """factor""": 10.0}
UpperCamelCase__ = GPTNeoXModel(__lowerCAmelCase )
scaled_model.to(__lowerCAmelCase )
scaled_model.eval()
UpperCamelCase__ = scaled_model(__lowerCAmelCase ).last_hidden_state
UpperCamelCase__ = scaled_model(__lowerCAmelCase ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(__lowerCAmelCase , __lowerCAmelCase , atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(__lowerCAmelCase , __lowerCAmelCase , atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(__lowerCAmelCase , __lowerCAmelCase , atol=1E-5 ) )
@require_torch
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@slow
def _lowerCamelCase ( self ):
UpperCamelCase__ = AutoTokenizer.from_pretrained("""EleutherAI/pythia-410m-deduped""" )
for checkpointing in [True, False]:
UpperCamelCase__ = GPTNeoXForCausalLM.from_pretrained("""EleutherAI/pythia-410m-deduped""" )
if checkpointing:
model.gradient_checkpointing_enable()
else:
model.gradient_checkpointing_disable()
model.to(__lowerCAmelCase )
UpperCamelCase__ = tokenizer("""My favorite food is""" , return_tensors="""pt""" ).to(__lowerCAmelCase )
# The hub repo. is updated on 2023-04-04, resulting in poor outputs.
# See: https://github.com/huggingface/transformers/pull/24193
UpperCamelCase__ = """My favorite food is a good old-fashioned, old-fashioned, old-fashioned.\n\nI'm not sure"""
UpperCamelCase__ = model.generate(**__lowerCAmelCase , do_sample=__lowerCAmelCase , max_new_tokens=20 )
UpperCamelCase__ = tokenizer.batch_decode(__lowerCAmelCase )[0]
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
| 87 | 1 |
'''simple docstring'''
import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
_UpperCamelCase = logging.get_logger(__name__)
_UpperCamelCase = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt'''}
# See all LED models at https://huggingface.co/models?filter=LED
_UpperCamelCase = {
'''vocab_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json''',
},
'''merges_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json''',
},
}
_UpperCamelCase = {
'''allenai/led-base-16384''': 1_6384,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def lowercase_ ( ):
"""simple docstring"""
__UpperCAmelCase : Dict = (
list(range(ord("""!""" ) , ord("""~""" ) + 1 ) ) + list(range(ord("""¡""" ) , ord("""¬""" ) + 1 ) ) + list(range(ord("""®""" ) , ord("""ÿ""" ) + 1 ) )
)
__UpperCAmelCase : Tuple = bs[:]
__UpperCAmelCase : int = 0
for b in range(2**8 ):
if b not in bs:
bs.append(__lowerCAmelCase )
cs.append(2**8 + n )
n += 1
__UpperCAmelCase : Dict = [chr(__lowerCAmelCase ) for n in cs]
return dict(zip(__lowerCAmelCase , __lowerCAmelCase ) )
def lowercase_ ( lowerCAmelCase__ : List[str] ):
"""simple docstring"""
__UpperCAmelCase : Dict = set()
__UpperCAmelCase : int = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__UpperCAmelCase : List[Any] = char
return pairs
class _A ( UpperCAmelCase__ ):
_SCREAMING_SNAKE_CASE : Any = VOCAB_FILES_NAMES
_SCREAMING_SNAKE_CASE : List[str] = PRETRAINED_VOCAB_FILES_MAP
_SCREAMING_SNAKE_CASE : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_SCREAMING_SNAKE_CASE : Tuple = ["input_ids", "attention_mask"]
def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase="replace" , __UpperCAmelCase="<s>" , __UpperCAmelCase="</s>" , __UpperCAmelCase="</s>" , __UpperCAmelCase="<s>" , __UpperCAmelCase="<unk>" , __UpperCAmelCase="<pad>" , __UpperCAmelCase="<mask>" , __UpperCAmelCase=False , **__UpperCAmelCase , ) -> Optional[int]:
'''simple docstring'''
__UpperCAmelCase : int = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else bos_token
__UpperCAmelCase : Optional[Any] = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else eos_token
__UpperCAmelCase : Tuple = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else sep_token
__UpperCAmelCase : Any = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else cls_token
__UpperCAmelCase : Optional[Any] = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else unk_token
__UpperCAmelCase : int = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
__UpperCAmelCase : Any = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else mask_token
super().__init__(
errors=lowerCamelCase__ , bos_token=lowerCamelCase__ , eos_token=lowerCamelCase__ , unk_token=lowerCamelCase__ , sep_token=lowerCamelCase__ , cls_token=lowerCamelCase__ , pad_token=lowerCamelCase__ , mask_token=lowerCamelCase__ , add_prefix_space=lowerCamelCase__ , **lowerCamelCase__ , )
with open(lowerCamelCase__ , encoding="""utf-8""" ) as vocab_handle:
__UpperCAmelCase : Tuple = json.load(lowerCamelCase__ )
__UpperCAmelCase : List[Any] = {v: k for k, v in self.encoder.items()}
__UpperCAmelCase : Optional[int] = errors # how to handle errors in decoding
__UpperCAmelCase : int = bytes_to_unicode()
__UpperCAmelCase : Tuple = {v: k for k, v in self.byte_encoder.items()}
with open(lowerCamelCase__ , encoding="""utf-8""" ) as merges_handle:
__UpperCAmelCase : Tuple = merges_handle.read().split("""\n""" )[1:-1]
__UpperCAmelCase : str = [tuple(merge.split() ) for merge in bpe_merges]
__UpperCAmelCase : Dict = dict(zip(lowerCamelCase__ , range(len(lowerCamelCase__ ) ) ) )
__UpperCAmelCase : List[Any] = {}
__UpperCAmelCase : List[Any] = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
__UpperCAmelCase : str = re.compile(r"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""" )
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def __A ( self ) -> Union[str, Any]:
'''simple docstring'''
return len(self.encoder )
def __A ( self ) -> Optional[int]:
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def __A ( self , __UpperCAmelCase ) -> Dict:
'''simple docstring'''
if token in self.cache:
return self.cache[token]
__UpperCAmelCase : Optional[int] = tuple(lowerCamelCase__ )
__UpperCAmelCase : int = get_pairs(lowerCamelCase__ )
if not pairs:
return token
while True:
__UpperCAmelCase : Union[str, Any] = min(lowerCamelCase__ , key=lambda __UpperCAmelCase : self.bpe_ranks.get(lowerCamelCase__ , float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
__UpperCAmelCase : List[str] = bigram
__UpperCAmelCase : List[str] = []
__UpperCAmelCase : Optional[Any] = 0
while i < len(lowerCamelCase__ ):
try:
__UpperCAmelCase : str = word.index(lowerCamelCase__ , lowerCamelCase__ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
__UpperCAmelCase : Any = j
if word[i] == first and i < len(lowerCamelCase__ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__UpperCAmelCase : Any = tuple(lowerCamelCase__ )
__UpperCAmelCase : Tuple = new_word
if len(lowerCamelCase__ ) == 1:
break
else:
__UpperCAmelCase : Any = get_pairs(lowerCamelCase__ )
__UpperCAmelCase : str = " ".join(lowerCamelCase__ )
__UpperCAmelCase : int = word
return word
def __A ( self , __UpperCAmelCase ) -> Dict:
'''simple docstring'''
__UpperCAmelCase : int = []
for token in re.findall(self.pat , lowerCamelCase__ ):
__UpperCAmelCase : List[str] = "".join(
self.byte_encoder[b] for b in token.encode("""utf-8""" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(lowerCamelCase__ ).split(""" """ ) )
return bpe_tokens
def __A ( self , __UpperCAmelCase ) -> str:
'''simple docstring'''
return self.encoder.get(lowerCamelCase__ , self.encoder.get(self.unk_token ) )
def __A ( self , __UpperCAmelCase ) -> List[str]:
'''simple docstring'''
return self.decoder.get(lowerCamelCase__ )
def __A ( self , __UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
__UpperCAmelCase : List[Any] = "".join(lowerCamelCase__ )
__UpperCAmelCase : Dict = bytearray([self.byte_decoder[c] for c in text] ).decode("""utf-8""" , errors=self.errors )
return text
def __A ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(lowerCamelCase__ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
__UpperCAmelCase : str = os.path.join(
lowerCamelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
__UpperCAmelCase : int = os.path.join(
lowerCamelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(lowerCamelCase__ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCamelCase__ , ensure_ascii=lowerCamelCase__ ) + """\n""" )
__UpperCAmelCase : List[str] = 0
with open(lowerCamelCase__ , """w""" , encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __UpperCAmelCase : kv[1] ):
if index != token_index:
logger.warning(
f'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
""" Please check that the tokenizer is not corrupted!""" )
__UpperCAmelCase : int = token_index
writer.write(""" """.join(lowerCamelCase__ ) + """\n""" )
index += 1
return vocab_file, merge_file
def __A ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__UpperCAmelCase : int = [self.cls_token_id]
__UpperCAmelCase : List[str] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __A ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase__ , token_ids_a=lowerCamelCase__ , already_has_special_tokens=lowerCamelCase__ )
if token_ids_a is None:
return [1] + ([0] * len(lowerCamelCase__ )) + [1]
return [1] + ([0] * len(lowerCamelCase__ )) + [1, 1] + ([0] * len(lowerCamelCase__ )) + [1]
def __A ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> List[int]:
'''simple docstring'''
__UpperCAmelCase : Tuple = [self.sep_token_id]
__UpperCAmelCase : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __A ( self , __UpperCAmelCase , __UpperCAmelCase=False , **__UpperCAmelCase ) -> List[str]:
'''simple docstring'''
__UpperCAmelCase : int = kwargs.pop("""add_prefix_space""" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(lowerCamelCase__ ) > 0 and not text[0].isspace()):
__UpperCAmelCase : Optional[int] = " " + text
return (text, kwargs)
def __A ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = PaddingStrategy.DO_NOT_PAD , __UpperCAmelCase = None , __UpperCAmelCase = None , ) -> dict:
'''simple docstring'''
__UpperCAmelCase : Optional[int] = super()._pad(
encoded_inputs=lowerCamelCase__ , max_length=lowerCamelCase__ , padding_strategy=lowerCamelCase__ , pad_to_multiple_of=lowerCamelCase__ , return_attention_mask=lowerCamelCase__ , )
# Load from model defaults
if return_attention_mask is None:
__UpperCAmelCase : Tuple = "attention_mask" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
__UpperCAmelCase : Union[str, Any] = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
__UpperCAmelCase : Union[str, Any] = len(encoded_inputs["""global_attention_mask"""] ) != len(lowerCamelCase__ )
if needs_to_be_padded:
__UpperCAmelCase : Optional[Any] = len(lowerCamelCase__ ) - len(encoded_inputs["""global_attention_mask"""] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
__UpperCAmelCase : str = (
encoded_inputs["global_attention_mask"] + [-1] * difference
)
elif self.padding_side == "left":
__UpperCAmelCase : str = [-1] * difference + encoded_inputs[
"global_attention_mask"
]
else:
raise ValueError("""Invalid padding strategy:""" + str(self.padding_side ) )
return encoded_inputs
| 254 |
'''simple docstring'''
def __lowerCAmelCase (__lowerCAmelCase = 50 ):
_UpperCAmelCase : Tuple = [[0] * 3 for _ in range(length + 1 )]
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
different_colour_ways_number[row_length][tile_length - 2] += (
different_colour_ways_number[row_length - tile_start - tile_length][
tile_length - 2
]
+ 1
)
return sum(different_colour_ways_number[length] )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 234 | 0 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_nllb import NllbTokenizer
else:
SCREAMING_SNAKE_CASE__ : Dict = None
SCREAMING_SNAKE_CASE__ : Optional[Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : Any = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"}
SCREAMING_SNAKE_CASE__ : str = {
"vocab_file": {
"facebook/nllb-200-distilled-600M": (
"https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model"
),
},
"tokenizer_file": {
"facebook/nllb-200-distilled-600M": (
"https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json"
),
},
}
SCREAMING_SNAKE_CASE__ : str = {
"facebook/nllb-large-en-ro": 1_024,
"facebook/nllb-200-distilled-600M": 1_024,
}
# fmt: off
SCREAMING_SNAKE_CASE__ : str = ["ace_Arab", "ace_Latn", "acm_Arab", "acq_Arab", "aeb_Arab", "afr_Latn", "ajp_Arab", "aka_Latn", "amh_Ethi", "apc_Arab", "arb_Arab", "ars_Arab", "ary_Arab", "arz_Arab", "asm_Beng", "ast_Latn", "awa_Deva", "ayr_Latn", "azb_Arab", "azj_Latn", "bak_Cyrl", "bam_Latn", "ban_Latn", "bel_Cyrl", "bem_Latn", "ben_Beng", "bho_Deva", "bjn_Arab", "bjn_Latn", "bod_Tibt", "bos_Latn", "bug_Latn", "bul_Cyrl", "cat_Latn", "ceb_Latn", "ces_Latn", "cjk_Latn", "ckb_Arab", "crh_Latn", "cym_Latn", "dan_Latn", "deu_Latn", "dik_Latn", "dyu_Latn", "dzo_Tibt", "ell_Grek", "eng_Latn", "epo_Latn", "est_Latn", "eus_Latn", "ewe_Latn", "fao_Latn", "pes_Arab", "fij_Latn", "fin_Latn", "fon_Latn", "fra_Latn", "fur_Latn", "fuv_Latn", "gla_Latn", "gle_Latn", "glg_Latn", "grn_Latn", "guj_Gujr", "hat_Latn", "hau_Latn", "heb_Hebr", "hin_Deva", "hne_Deva", "hrv_Latn", "hun_Latn", "hye_Armn", "ibo_Latn", "ilo_Latn", "ind_Latn", "isl_Latn", "ita_Latn", "jav_Latn", "jpn_Jpan", "kab_Latn", "kac_Latn", "kam_Latn", "kan_Knda", "kas_Arab", "kas_Deva", "kat_Geor", "knc_Arab", "knc_Latn", "kaz_Cyrl", "kbp_Latn", "kea_Latn", "khm_Khmr", "kik_Latn", "kin_Latn", "kir_Cyrl", "kmb_Latn", "kon_Latn", "kor_Hang", "kmr_Latn", "lao_Laoo", "lvs_Latn", "lij_Latn", "lim_Latn", "lin_Latn", "lit_Latn", "lmo_Latn", "ltg_Latn", "ltz_Latn", "lua_Latn", "lug_Latn", "luo_Latn", "lus_Latn", "mag_Deva", "mai_Deva", "mal_Mlym", "mar_Deva", "min_Latn", "mkd_Cyrl", "plt_Latn", "mlt_Latn", "mni_Beng", "khk_Cyrl", "mos_Latn", "mri_Latn", "zsm_Latn", "mya_Mymr", "nld_Latn", "nno_Latn", "nob_Latn", "npi_Deva", "nso_Latn", "nus_Latn", "nya_Latn", "oci_Latn", "gaz_Latn", "ory_Orya", "pag_Latn", "pan_Guru", "pap_Latn", "pol_Latn", "por_Latn", "prs_Arab", "pbt_Arab", "quy_Latn", "ron_Latn", "run_Latn", "rus_Cyrl", "sag_Latn", "san_Deva", "sat_Beng", "scn_Latn", "shn_Mymr", "sin_Sinh", "slk_Latn", "slv_Latn", "smo_Latn", "sna_Latn", "snd_Arab", "som_Latn", "sot_Latn", "spa_Latn", "als_Latn", "srd_Latn", "srp_Cyrl", "ssw_Latn", "sun_Latn", "swe_Latn", "swh_Latn", "szl_Latn", "tam_Taml", "tat_Cyrl", "tel_Telu", "tgk_Cyrl", "tgl_Latn", "tha_Thai", "tir_Ethi", "taq_Latn", "taq_Tfng", "tpi_Latn", "tsn_Latn", "tso_Latn", "tuk_Latn", "tum_Latn", "tur_Latn", "twi_Latn", "tzm_Tfng", "uig_Arab", "ukr_Cyrl", "umb_Latn", "urd_Arab", "uzn_Latn", "vec_Latn", "vie_Latn", "war_Latn", "wol_Latn", "xho_Latn", "ydd_Hebr", "yor_Latn", "yue_Hant", "zho_Hans", "zho_Hant", "zul_Latn"]
class lowerCAmelCase__ ( __lowercase ):
a__ : List[Any] = VOCAB_FILES_NAMES
a__ : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ : List[str] = PRETRAINED_VOCAB_FILES_MAP
a__ : int = ["""input_ids""", """attention_mask"""]
a__ : List[str] = NllbTokenizer
a__ : List[int] = []
a__ : List[int] = []
def __init__( self : Tuple , SCREAMING_SNAKE_CASE__ : Union[str, Any]=None , SCREAMING_SNAKE_CASE__ : Optional[int]=None , SCREAMING_SNAKE_CASE__ : int="<s>" , SCREAMING_SNAKE_CASE__ : int="</s>" , SCREAMING_SNAKE_CASE__ : List[Any]="</s>" , SCREAMING_SNAKE_CASE__ : Dict="<s>" , SCREAMING_SNAKE_CASE__ : List[str]="<unk>" , SCREAMING_SNAKE_CASE__ : Any="<pad>" , SCREAMING_SNAKE_CASE__ : List[Any]="<mask>" , SCREAMING_SNAKE_CASE__ : Tuple=None , SCREAMING_SNAKE_CASE__ : int=None , SCREAMING_SNAKE_CASE__ : List[str]=None , SCREAMING_SNAKE_CASE__ : str=False , **SCREAMING_SNAKE_CASE__ : Optional[int] , ) -> List[str]:
# Mask token behave like a normal word, i.e. include the space before it
__lowerCamelCase = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else mask_token
__lowerCamelCase = legacy_behaviour
super().__init__(
vocab_file=SCREAMING_SNAKE_CASE__ , tokenizer_file=SCREAMING_SNAKE_CASE__ , bos_token=SCREAMING_SNAKE_CASE__ , eos_token=SCREAMING_SNAKE_CASE__ , sep_token=SCREAMING_SNAKE_CASE__ , cls_token=SCREAMING_SNAKE_CASE__ , unk_token=SCREAMING_SNAKE_CASE__ , pad_token=SCREAMING_SNAKE_CASE__ , mask_token=SCREAMING_SNAKE_CASE__ , src_lang=SCREAMING_SNAKE_CASE__ , tgt_lang=SCREAMING_SNAKE_CASE__ , additional_special_tokens=SCREAMING_SNAKE_CASE__ , legacy_behaviour=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
__lowerCamelCase = vocab_file
__lowerCamelCase = False if not self.vocab_file else True
__lowerCamelCase = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({'''additional_special_tokens''': _additional_special_tokens} )
__lowerCamelCase = {
lang_code: self.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
__lowerCamelCase = src_lang if src_lang is not None else '''eng_Latn'''
__lowerCamelCase = self.convert_tokens_to_ids(self._src_lang )
__lowerCamelCase = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def __A ( self : int ) -> str:
return self._src_lang
@src_lang.setter
def __A ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : str ) -> None:
__lowerCamelCase = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __A ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def __A ( self : int , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ) -> List[int]:
__lowerCamelCase = [self.sep_token_id]
__lowerCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __A ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[str] , SCREAMING_SNAKE_CASE__ : Optional[str] , **SCREAMING_SNAKE_CASE__ : Any ) -> List[Any]:
if src_lang is None or tgt_lang is None:
raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' )
__lowerCamelCase = src_lang
__lowerCamelCase = self(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = self.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = tgt_lang_id
return inputs
def __A ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : str = "eng_Latn" , SCREAMING_SNAKE_CASE__ : Optional[List[str]] = None , SCREAMING_SNAKE_CASE__ : str = "fra_Latn" , **SCREAMING_SNAKE_CASE__ : List[str] , ) -> BatchEncoding:
__lowerCamelCase = src_lang
__lowerCamelCase = tgt_lang
return super().prepare_seqaseq_batch(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def __A ( self : Optional[Any] ) -> List[str]:
return self.set_src_lang_special_tokens(self.src_lang )
def __A ( self : Union[str, Any] ) -> Tuple:
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def __A ( self : Any , SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> None:
__lowerCamelCase = self.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ )
if self.legacy_behaviour:
__lowerCamelCase = []
__lowerCamelCase = [self.eos_token_id, self.cur_lang_code]
else:
__lowerCamelCase = [self.cur_lang_code]
__lowerCamelCase = [self.eos_token_id]
__lowerCamelCase = self.convert_ids_to_tokens(self.prefix_tokens )
__lowerCamelCase = self.convert_ids_to_tokens(self.suffix_tokens )
__lowerCamelCase = processors.TemplateProcessing(
single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def __A ( self : int , SCREAMING_SNAKE_CASE__ : str ) -> None:
__lowerCamelCase = self.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ )
if self.legacy_behaviour:
__lowerCamelCase = []
__lowerCamelCase = [self.eos_token_id, self.cur_lang_code]
else:
__lowerCamelCase = [self.cur_lang_code]
__lowerCamelCase = [self.eos_token_id]
__lowerCamelCase = self.convert_ids_to_tokens(self.prefix_tokens )
__lowerCamelCase = self.convert_ids_to_tokens(self.suffix_tokens )
__lowerCamelCase = processors.TemplateProcessing(
single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def __A ( self : Dict , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[str] = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(SCREAMING_SNAKE_CASE__ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory.''' )
return
__lowerCamelCase = os.path.join(
SCREAMING_SNAKE_CASE__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE__ ):
copyfile(self.vocab_file , SCREAMING_SNAKE_CASE__ )
return (out_vocab_file,)
| 365 |
import json
import os
from typing import Dict, List, Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
SCREAMING_SNAKE_CASE__ : Any = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : Optional[Any] = {
"vocab_file": "vocab.json",
"merges_file": "merges.txt",
"tokenizer_config_file": "tokenizer_config.json",
}
SCREAMING_SNAKE_CASE__ : str = {
"vocab_file": {
"facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json"
},
"merges_file": {
"facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt"
},
"tokenizer_config_file": {
"facebook/blenderbot_small-90M": (
"https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json"
)
},
}
SCREAMING_SNAKE_CASE__ : int = {"facebook/blenderbot_small-90M": 512}
def __magic_name__ ( __lowerCAmelCase : List[str] ) -> Tuple:
__lowerCamelCase = set()
__lowerCamelCase = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__lowerCamelCase = char
__lowerCamelCase = set(__lowerCAmelCase )
return pairs
class lowerCAmelCase__ ( __lowercase ):
a__ : List[Any] = VOCAB_FILES_NAMES
a__ : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
a__ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ : Dict = ["""input_ids""", """attention_mask"""]
def __init__( self : str , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Tuple="__start__" , SCREAMING_SNAKE_CASE__ : Tuple="__end__" , SCREAMING_SNAKE_CASE__ : List[str]="__unk__" , SCREAMING_SNAKE_CASE__ : str="__null__" , **SCREAMING_SNAKE_CASE__ : Optional[Any] , ) -> Optional[Any]:
super().__init__(unk_token=SCREAMING_SNAKE_CASE__ , bos_token=SCREAMING_SNAKE_CASE__ , eos_token=SCREAMING_SNAKE_CASE__ , pad_token=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
with open(SCREAMING_SNAKE_CASE__ , encoding='''utf-8''' ) as vocab_handle:
__lowerCamelCase = json.load(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = {v: k for k, v in self.encoder.items()}
with open(SCREAMING_SNAKE_CASE__ , encoding='''utf-8''' ) as merges_handle:
__lowerCamelCase = merges_handle.read().split('''\n''' )[1:-1]
__lowerCamelCase = [tuple(merge.split() ) for merge in merges]
__lowerCamelCase = dict(zip(SCREAMING_SNAKE_CASE__ , range(len(SCREAMING_SNAKE_CASE__ ) ) ) )
__lowerCamelCase = {}
@property
def __A ( self : Dict ) -> int:
return len(self.encoder )
def __A ( self : str ) -> Dict:
return dict(self.encoder , **self.added_tokens_encoder )
def __A ( self : Any , SCREAMING_SNAKE_CASE__ : str ) -> str:
if token in self.cache:
return self.cache[token]
__lowerCamelCase = re.sub('''([.,!?()])''' , R''' \1''' , SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = re.sub('''(\')''' , R''' \1 ''' , SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = re.sub(R'''\s{2,}''' , ''' ''' , SCREAMING_SNAKE_CASE__ )
if "\n" in token:
__lowerCamelCase = token.replace('''\n''' , ''' __newln__''' )
__lowerCamelCase = token.split(''' ''' )
__lowerCamelCase = []
for token in tokens:
if not len(SCREAMING_SNAKE_CASE__ ):
continue
__lowerCamelCase = token.lower()
__lowerCamelCase = tuple(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = tuple(list(word[:-1] ) + [word[-1] + '''</w>'''] )
__lowerCamelCase = get_pairs(SCREAMING_SNAKE_CASE__ )
if not pairs:
words.append(SCREAMING_SNAKE_CASE__ )
continue
while True:
__lowerCamelCase = min(SCREAMING_SNAKE_CASE__ , key=lambda SCREAMING_SNAKE_CASE__ : self.bpe_ranks.get(SCREAMING_SNAKE_CASE__ , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
__lowerCamelCase , __lowerCamelCase = bigram
__lowerCamelCase = []
__lowerCamelCase = 0
while i < len(SCREAMING_SNAKE_CASE__ ):
try:
__lowerCamelCase = word.index(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
new_word.extend(word[i:j] )
__lowerCamelCase = j
except ValueError:
new_word.extend(word[i:] )
break
if word[i] == first and i < len(SCREAMING_SNAKE_CASE__ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__lowerCamelCase = tuple(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = new_word
if len(SCREAMING_SNAKE_CASE__ ) == 1:
break
else:
__lowerCamelCase = get_pairs(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = '''@@ '''.join(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = word[:-4]
__lowerCamelCase = word
words.append(SCREAMING_SNAKE_CASE__ )
return " ".join(SCREAMING_SNAKE_CASE__ )
def __A ( self : Tuple , SCREAMING_SNAKE_CASE__ : str ) -> List[str]:
__lowerCamelCase = []
__lowerCamelCase = re.findall(R'''\S+\n?''' , SCREAMING_SNAKE_CASE__ )
for token in words:
split_tokens.extend(list(self.bpe(SCREAMING_SNAKE_CASE__ ).split(''' ''' ) ) )
return split_tokens
def __A ( self : str , SCREAMING_SNAKE_CASE__ : str ) -> int:
__lowerCamelCase = token.lower()
return self.encoder.get(SCREAMING_SNAKE_CASE__ , self.encoder.get(self.unk_token ) )
def __A ( self : Dict , SCREAMING_SNAKE_CASE__ : int ) -> str:
return self.decoder.get(SCREAMING_SNAKE_CASE__ , self.unk_token )
def __A ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[str] ) -> str:
__lowerCamelCase = ''' '''.join(SCREAMING_SNAKE_CASE__ ).replace('''@@ ''' , '''''' ).strip()
return out_string
def __A ( self : Tuple , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(SCREAMING_SNAKE_CASE__ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
__lowerCamelCase = os.path.join(
SCREAMING_SNAKE_CASE__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
__lowerCamelCase = os.path.join(
SCREAMING_SNAKE_CASE__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(SCREAMING_SNAKE_CASE__ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=SCREAMING_SNAKE_CASE__ , ensure_ascii=SCREAMING_SNAKE_CASE__ ) + '''\n''' )
__lowerCamelCase = 0
with open(SCREAMING_SNAKE_CASE__ , '''w''' , encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda SCREAMING_SNAKE_CASE__ : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
''' Please check that the tokenizer is not corrupted!''' )
__lowerCamelCase = token_index
writer.write(''' '''.join(SCREAMING_SNAKE_CASE__ ) + '''\n''' )
index += 1
return vocab_file, merge_file
| 339 | 0 |
import heapq
def _snake_case( SCREAMING_SNAKE_CASE__ : dict ) -> set[int]:
'''simple docstring'''
A__ = []
# for each node and his adjacency list add them and the rank of the node to queue
# using heapq module the queue will be filled like a Priority Queue
# heapq works with a min priority queue, so I used -1*len(v) to build it
for key, value in graph.items():
# O(log(n))
heapq.heappush(SCREAMING_SNAKE_CASE__ , [-1 * len(SCREAMING_SNAKE_CASE__ ), (key, value)] )
# chosen_vertices = set of chosen vertices
A__ = set()
# while queue isn't empty and there are still edges
# (queue[0][0] is the rank of the node with max rank)
while queue and queue[0][0] != 0:
# extract vertex with max rank from queue and add it to chosen_vertices
A__ = heapq.heappop(SCREAMING_SNAKE_CASE__ )[1][0]
chosen_vertices.add(SCREAMING_SNAKE_CASE__ )
# Remove all arcs adjacent to argmax
for elem in queue:
# if v haven't adjacent node, skip
if elem[0] == 0:
continue
# if argmax is reachable from elem
# remove argmax from elem's adjacent list and update his rank
if argmax in elem[1][1]:
A__ = elem[1][1].index(SCREAMING_SNAKE_CASE__ )
del elem[1][1][index]
elem[0] += 1
# re-order the queue
heapq.heapify(SCREAMING_SNAKE_CASE__ )
return chosen_vertices
if __name__ == "__main__":
import doctest
doctest.testmod()
lowercase_ = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
print(f"""Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}""")
| 7 |
import math
import os
from copy import deepcopy
import datasets
import evaluate
import torch
import transformers
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from accelerate import Accelerator
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import is_tpu_available, set_seed
lowercase_ = "true"
def _snake_case( SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : List[Any]=82 , SCREAMING_SNAKE_CASE__ : Optional[int]=16 ) -> Optional[Any]:
'''simple docstring'''
set_seed(42 )
A__ = RegressionModel()
A__ = deepcopy(SCREAMING_SNAKE_CASE__ )
A__ = RegressionDataset(length=SCREAMING_SNAKE_CASE__ )
A__ = DataLoader(SCREAMING_SNAKE_CASE__ , batch_size=SCREAMING_SNAKE_CASE__ )
model.to(accelerator.device )
A__ , A__ = accelerator.prepare(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return model, ddp_model, dataloader
def _snake_case( SCREAMING_SNAKE_CASE__ : Accelerator , SCREAMING_SNAKE_CASE__ : Tuple=False ) -> int:
'''simple docstring'''
A__ = AutoTokenizer.from_pretrained('hf-internal-testing/mrpc-bert-base-cased' )
A__ = load_dataset('glue' , 'mrpc' , split='validation' )
def tokenize_function(SCREAMING_SNAKE_CASE__ : List[Any] ):
A__ = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ )
return outputs
with accelerator.main_process_first():
A__ = dataset.map(
SCREAMING_SNAKE_CASE__ , batched=SCREAMING_SNAKE_CASE__ , remove_columns=['idx', 'sentence1', 'sentence2'] , )
A__ = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(SCREAMING_SNAKE_CASE__ : Dict ):
if use_longest:
return tokenizer.pad(SCREAMING_SNAKE_CASE__ , padding='longest' , return_tensors='pt' )
return tokenizer.pad(SCREAMING_SNAKE_CASE__ , padding='max_length' , max_length=128 , return_tensors='pt' )
return DataLoader(SCREAMING_SNAKE_CASE__ , shuffle=SCREAMING_SNAKE_CASE__ , collate_fn=SCREAMING_SNAKE_CASE__ , batch_size=16 )
def _snake_case( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Any ) -> str:
'''simple docstring'''
A__ = Accelerator(dispatch_batches=SCREAMING_SNAKE_CASE__ , split_batches=SCREAMING_SNAKE_CASE__ )
A__ = get_dataloader(SCREAMING_SNAKE_CASE__ , not dispatch_batches )
A__ = AutoModelForSequenceClassification.from_pretrained(
'hf-internal-testing/mrpc-bert-base-cased' , return_dict=SCREAMING_SNAKE_CASE__ )
A__ , A__ = accelerator.prepare(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator
def _snake_case( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Optional[int] ) -> List[str]:
'''simple docstring'''
A__ = []
for batch in dataloader:
A__ , A__ = batch.values()
with torch.no_grad():
A__ = model(SCREAMING_SNAKE_CASE__ )
A__ , A__ = accelerator.gather_for_metrics((logit, target) )
logits_and_targets.append((logit, target) )
A__ , A__ = [], []
for logit, targ in logits_and_targets:
logits.append(SCREAMING_SNAKE_CASE__ )
targs.append(SCREAMING_SNAKE_CASE__ )
A__ , A__ = torch.cat(SCREAMING_SNAKE_CASE__ ), torch.cat(SCREAMING_SNAKE_CASE__ )
return logits, targs
def _snake_case( SCREAMING_SNAKE_CASE__ : Accelerator , SCREAMING_SNAKE_CASE__ : int=82 , SCREAMING_SNAKE_CASE__ : Optional[Any]=False , SCREAMING_SNAKE_CASE__ : Any=False , SCREAMING_SNAKE_CASE__ : Tuple=16 ) -> List[Any]:
'''simple docstring'''
A__ , A__ , A__ = get_basic_setup(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
A__ , A__ = generate_predictions(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
assert (
len(SCREAMING_SNAKE_CASE__ ) == num_samples
), f'Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(SCREAMING_SNAKE_CASE__ )}'
def _snake_case( SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : bool = False ) -> str:
'''simple docstring'''
A__ = evaluate.load('glue' , 'mrpc' )
A__ , A__ = get_mrpc_setup(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# First do baseline
A__ , A__ , A__ = setup['no']
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
for batch in dataloader:
batch.to(SCREAMING_SNAKE_CASE__ )
with torch.inference_mode():
A__ = model(**SCREAMING_SNAKE_CASE__ )
A__ = outputs.logits.argmax(dim=-1 )
metric.add_batch(predictions=SCREAMING_SNAKE_CASE__ , references=batch['labels'] )
A__ = metric.compute()
# Then do distributed
A__ , A__ , A__ = setup['ddp']
model.eval()
for batch in dataloader:
with torch.inference_mode():
A__ = model(**SCREAMING_SNAKE_CASE__ )
A__ = outputs.logits.argmax(dim=-1 )
A__ = batch['labels']
A__ , A__ = accelerator.gather_for_metrics((preds, references) )
metric.add_batch(predictions=SCREAMING_SNAKE_CASE__ , references=SCREAMING_SNAKE_CASE__ )
A__ = metric.compute()
for key in "accuracy f1".split():
assert math.isclose(
baseline[key] , distributed[key] ), f'Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n'
def _snake_case( ) -> Optional[Any]:
'''simple docstring'''
A__ = Accelerator(split_batches=SCREAMING_SNAKE_CASE__ , dispatch_batches=SCREAMING_SNAKE_CASE__ )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# These are a bit slower so they should only be ran on the GPU or TPU
if torch.cuda.is_available() or is_tpu_available():
if accelerator.is_local_main_process:
print('**Testing gather_for_metrics**' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
if accelerator.is_local_main_process:
print(f'With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`' )
test_mrpc(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('**Test torch metrics**' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
A__ = Accelerator(split_batches=SCREAMING_SNAKE_CASE__ , dispatch_batches=SCREAMING_SNAKE_CASE__ )
if accelerator.is_local_main_process:
print(f'With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99' )
test_torch_metrics(SCREAMING_SNAKE_CASE__ , 99 )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('**Test last batch is not dropped when perfectly divisible**' )
A__ = Accelerator()
test_torch_metrics(SCREAMING_SNAKE_CASE__ , 512 )
accelerator.state._reset_state()
def _snake_case( SCREAMING_SNAKE_CASE__ : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 7 | 1 |
import argparse
import torch
from transformers import (
UniSpeechSatConfig,
UniSpeechSatForAudioFrameClassification,
UniSpeechSatForSequenceClassification,
UniSpeechSatForXVector,
WavaVecaFeatureExtractor,
logging,
)
logging.set_verbosity_info()
__snake_case : List[str] = logging.get_logger(__name__)
def __lowerCamelCase ( __snake_case : Any, __snake_case : int, __snake_case : Any ) -> Optional[Any]:
"""simple docstring"""
A__ : str =UniSpeechSatForSequenceClassification.from_pretrained(snake_case_, config=snake_case_ )
A__ : List[Any] =downstream_dict["""projector.weight"""]
A__ : Tuple =downstream_dict["""projector.bias"""]
A__ : List[str] =downstream_dict["""model.post_net.linear.weight"""]
A__ : int =downstream_dict["""model.post_net.linear.bias"""]
return model
def __lowerCamelCase ( __snake_case : Union[str, Any], __snake_case : Tuple, __snake_case : str ) -> Optional[int]:
"""simple docstring"""
A__ : Union[str, Any] =UniSpeechSatForAudioFrameClassification.from_pretrained(snake_case_, config=snake_case_ )
A__ : str =downstream_dict["""model.linear.weight"""]
A__ : int =downstream_dict["""model.linear.bias"""]
return model
def __lowerCamelCase ( __snake_case : Optional[Any], __snake_case : Dict, __snake_case : str ) -> Optional[int]:
"""simple docstring"""
A__ : Union[str, Any] =UniSpeechSatForXVector.from_pretrained(snake_case_, config=snake_case_ )
A__ : Union[str, Any] =downstream_dict["""connector.weight"""]
A__ : Optional[Any] =downstream_dict["""connector.bias"""]
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
A__ : Optional[int] =downstream_dict[
f"model.framelevel_feature_extractor.module.{i}.kernel.weight"
]
A__ : int =downstream_dict[f"model.framelevel_feature_extractor.module.{i}.kernel.bias"]
A__ : Optional[Any] =downstream_dict["""model.utterancelevel_feature_extractor.linear1.weight"""]
A__ : List[Any] =downstream_dict["""model.utterancelevel_feature_extractor.linear1.bias"""]
A__ : Union[str, Any] =downstream_dict["""model.utterancelevel_feature_extractor.linear2.weight"""]
A__ : str =downstream_dict["""model.utterancelevel_feature_extractor.linear2.bias"""]
A__ : Optional[int] =downstream_dict["""objective.W"""]
return model
@torch.no_grad()
def __lowerCamelCase ( __snake_case : Optional[int], __snake_case : Dict, __snake_case : Optional[int], __snake_case : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
A__ : Optional[Any] =torch.load(snake_case_, map_location="""cpu""" )
A__ : List[str] =checkpoint["""Downstream"""]
A__ : List[Any] =UniSpeechSatConfig.from_pretrained(snake_case_ )
A__ : List[Any] =WavaVecaFeatureExtractor.from_pretrained(
snake_case_, return_attention_mask=snake_case_, do_normalize=snake_case_ )
A__ : str =hf_config.architectures[0]
if arch.endswith("""ForSequenceClassification""" ):
A__ : List[Any] =convert_classification(snake_case_, snake_case_, snake_case_ )
elif arch.endswith("""ForAudioFrameClassification""" ):
A__ : int =convert_diarization(snake_case_, snake_case_, snake_case_ )
elif arch.endswith("""ForXVector""" ):
A__ : List[str] =convert_xvector(snake_case_, snake_case_, snake_case_ )
else:
raise NotImplementedError(f"S3PRL weights conversion is not supported for {arch}" )
if hf_config.use_weighted_layer_sum:
A__ : str =checkpoint["""Featurizer"""]["""weights"""]
hf_feature_extractor.save_pretrained(snake_case_ )
hf_model.save_pretrained(snake_case_ )
if __name__ == "__main__":
__snake_case : str = argparse.ArgumentParser()
parser.add_argument(
'--base_model_name', default=None, type=str, help='Name of the huggingface pretrained base model.'
)
parser.add_argument('--config_path', default=None, type=str, help='Path to the huggingface classifier config.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to the s3prl checkpoint.')
parser.add_argument('--model_dump_path', default=None, type=str, help='Path to the final converted model.')
__snake_case : Tuple = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 355 |
'''simple docstring'''
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
__snake_case : List[str] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If BertLMPredictionHead is changed in modeling_bert.py, this code needs to be manually updated.
__snake_case : Optional[int] = ' def __init__(self, config):\n super().__init__()\n self.transform = BertPredictionHeadTransform(config)\n\n # The output weights are the same as the input embeddings, but there is\n # an output-only bias for each token.\n self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)\n\n self.bias = nn.Parameter(torch.zeros(config.vocab_size))\n\n # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`\n self.decoder.bias = self.bias\n\n def forward(self, hidden_states):\n hidden_states = self.transform(hidden_states)\n hidden_states = self.decoder(hidden_states)\n return hidden_states\n'
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowercase__ ( self : Tuple ) -> Dict:
'''simple docstring'''
A__ : Tuple =tempfile.mkdtemp()
os.makedirs(os.path.join(self.transformer_dir , """models/bert/""" ) )
A__ : List[str] =self.transformer_dir
shutil.copy(
os.path.join(lowerCAmelCase_ , """src/transformers/models/bert/modeling_bert.py""" ) , os.path.join(self.transformer_dir , """models/bert/modeling_bert.py""" ) , )
def lowercase__ ( self : str ) -> List[str]:
'''simple docstring'''
A__ : Union[str, Any] ="""src/transformers"""
shutil.rmtree(self.transformer_dir )
def lowercase__ ( self : Union[str, Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : int=None ) -> Dict:
'''simple docstring'''
A__ : Union[str, Any] =comment + f"\nclass {class_name}(nn.Module):\n" + class_code
if overwrite_result is not None:
A__ : Optional[Any] =comment + f"\nclass {class_name}(nn.Module):\n" + overwrite_result
A__ : str =black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=1_19 )
A__ : Any =black.format_str(lowerCAmelCase_ , mode=lowerCAmelCase_ )
A__ : Union[str, Any] =os.path.join(self.transformer_dir , """new_code.py""" )
with open(lowerCAmelCase_ , """w""" , newline="""\n""" ) as f:
f.write(lowerCAmelCase_ )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(lowerCAmelCase_ ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=lowerCAmelCase_ )
with open(lowerCAmelCase_ , """r""" ) as f:
self.assertTrue(f.read() , lowerCAmelCase_ )
def lowercase__ ( self : Any ) -> Any:
'''simple docstring'''
A__ : Dict =check_copies.find_code_in_transformers("""models.bert.modeling_bert.BertLMPredictionHead""" )
self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ )
def lowercase__ ( self : Dict ) -> List[Any]:
'''simple docstring'''
# Base copy consistency
self.check_copy_consistency(
"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead""" , """BertLMPredictionHead""" , REFERENCE_CODE + """\n""" , )
# With no empty line at the end
self.check_copy_consistency(
"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead""" , """BertLMPredictionHead""" , lowerCAmelCase_ , )
# Copy consistency with rename
self.check_copy_consistency(
"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel""" , """TestModelLMPredictionHead""" , re.sub("""Bert""" , """TestModel""" , lowerCAmelCase_ ) , )
# Copy consistency with a really long name
A__ : Tuple ="""TestModelWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason"""
self.check_copy_consistency(
f"# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->{long_class_name}" , f"{long_class_name}LMPredictionHead" , re.sub("""Bert""" , lowerCAmelCase_ , lowerCAmelCase_ ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel""" , """TestModelLMPredictionHead""" , lowerCAmelCase_ , overwrite_result=re.sub("""Bert""" , """TestModel""" , lowerCAmelCase_ ) , )
def lowercase__ ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
A__ : List[Any] =check_copies.LOCALIZED_READMES["""README_zh-hans.md"""]
A__ : List[str] =(
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the"""
""" Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for"""
""" Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong"""
""" Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.\n1."""
""" **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (from HuggingFace),"""
""" released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and"""
""" lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same"""
""" method has been applied to compress GPT2 into"""
""" [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into"""
""" [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),"""
""" Multilingual BERT into"""
""" [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German"""
""" version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)**"""
""" (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders"""
""" as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang"""
""" Luong, Quoc V. Le, Christopher D. Manning."""
)
A__ : Union[str, Any] =(
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"""
""" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"""
""" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"""
""" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"""
)
A__ : str =(
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"""
""" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"""
""" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"""
""" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n1."""
""" **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (来自 HuggingFace) 伴随论文"""
""" [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and"""
""" lighter](https://arxiv.org/abs/1910.01108) 由 Victor Sanh, Lysandre Debut and Thomas Wolf 发布。 The same"""
""" method has been applied to compress GPT2 into"""
""" [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into"""
""" [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),"""
""" Multilingual BERT into"""
""" [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German"""
""" version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)** (来自"""
""" Google Research/Stanford University) 伴随论文 [ELECTRA: Pre-training text encoders as discriminators rather"""
""" than generators](https://arxiv.org/abs/2003.10555) 由 Kevin Clark, Minh-Thang Luong, Quoc V. Le,"""
""" Christopher D. Manning 发布。\n"""
)
A__ , A__ : Optional[Any] =check_copies.convert_to_localized_md(
lowerCAmelCase_ , lowerCAmelCase_ , localized_readme["""format_model_list"""] )
self.assertFalse(lowerCAmelCase_ )
self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ )
A__ , A__ : List[str] =check_copies.convert_to_localized_md(
lowerCAmelCase_ , lowerCAmelCase_ , localized_readme["""format_model_list"""] )
# Check whether the number of models is equal to README.md after conversion.
self.assertTrue(lowerCAmelCase_ )
A__ : Dict =(
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the"""
""" Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for"""
""" Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong"""
""" Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut."""
)
A__ : str =(
"""1. **[ALBERT](https://huggingface.co/transformers/main/model_doc/albert.html)** (来自 Google Research and"""
""" the Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"""
""" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"""
""" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"""
)
A__ : int =(
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"""
""" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"""
""" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"""
""" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"""
)
A__ , A__ : int =check_copies.convert_to_localized_md(
lowerCAmelCase_ , lowerCAmelCase_ , localized_readme["""format_model_list"""] )
# Check if the model link is synchronized.
self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ )
| 136 | 0 |
import contextlib
import copy
import random
from typing import Any, Dict, Iterable, Optional, Union
import numpy as np
import torch
from .utils import deprecate, is_transformers_available
if is_transformers_available():
import transformers
def _snake_case ( lowerCAmelCase : int ):
"""simple docstring"""
random.seed(lowerCAmelCase )
np.random.seed(lowerCAmelCase )
torch.manual_seed(lowerCAmelCase )
torch.cuda.manual_seed_all(lowerCAmelCase )
# ^^ safe to call this function even if cuda is not available
class a__ :
def __init__( self : Tuple,_A : Iterable[torch.nn.Parameter],_A : float = 0.9999,_A : float = 0.0,_A : int = 0,_A : bool = False,_A : Union[float, int] = 1.0,_A : Union[float, int] = 2 / 3,_A : Optional[Any] = None,_A : Dict[str, Any] = None,**_A : str,):
"""simple docstring"""
if isinstance(_A,torch.nn.Module ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = (
"Passing a `torch.nn.Module` to `ExponentialMovingAverage` is deprecated. "
"Please pass the parameters of the module instead."
)
deprecate(
"passing a `torch.nn.Module` to `ExponentialMovingAverage`","1.0.0",_A,standard_warn=_A,)
SCREAMING_SNAKE_CASE_ : str = parameters.parameters()
# set use_ema_warmup to True if a torch.nn.Module is passed for backwards compatibility
SCREAMING_SNAKE_CASE_ : str = True
if kwargs.get("max_value",_A ) is not None:
SCREAMING_SNAKE_CASE_ : str = "The `max_value` argument is deprecated. Please use `decay` instead."
deprecate("max_value","1.0.0",_A,standard_warn=_A )
SCREAMING_SNAKE_CASE_ : Optional[int] = kwargs["max_value"]
if kwargs.get("min_value",_A ) is not None:
SCREAMING_SNAKE_CASE_ : Optional[int] = "The `min_value` argument is deprecated. Please use `min_decay` instead."
deprecate("min_value","1.0.0",_A,standard_warn=_A )
SCREAMING_SNAKE_CASE_ : int = kwargs["min_value"]
SCREAMING_SNAKE_CASE_ : Dict = list(_A )
SCREAMING_SNAKE_CASE_ : List[str] = [p.clone().detach() for p in parameters]
if kwargs.get("device",_A ) is not None:
SCREAMING_SNAKE_CASE_ : str = "The `device` argument is deprecated. Please use `to` instead."
deprecate("device","1.0.0",_A,standard_warn=_A )
self.to(device=kwargs["device"] )
SCREAMING_SNAKE_CASE_ : Dict = None
SCREAMING_SNAKE_CASE_ : Any = decay
SCREAMING_SNAKE_CASE_ : List[str] = min_decay
SCREAMING_SNAKE_CASE_ : Tuple = update_after_step
SCREAMING_SNAKE_CASE_ : List[str] = use_ema_warmup
SCREAMING_SNAKE_CASE_ : List[Any] = inv_gamma
SCREAMING_SNAKE_CASE_ : List[Any] = power
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 0
SCREAMING_SNAKE_CASE_ : Union[str, Any] = None # set in `step()`
SCREAMING_SNAKE_CASE_ : Dict = model_cls
SCREAMING_SNAKE_CASE_ : Any = model_config
@classmethod
def __UpperCamelCase ( cls : Dict,_A : Tuple,_A : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = model_cls.load_config(_A,return_unused_kwargs=_A )
SCREAMING_SNAKE_CASE_ : Tuple = model_cls.from_pretrained(_A )
SCREAMING_SNAKE_CASE_ : Optional[Any] = cls(model.parameters(),model_cls=_A,model_config=model.config )
ema_model.load_state_dict(_A )
return ema_model
def __UpperCamelCase ( self : Optional[Any],_A : int ):
"""simple docstring"""
if self.model_cls is None:
raise ValueError("`save_pretrained` can only be used if `model_cls` was defined at __init__." )
if self.model_config is None:
raise ValueError("`save_pretrained` can only be used if `model_config` was defined at __init__." )
SCREAMING_SNAKE_CASE_ : str = self.model_cls.from_config(self.model_config )
SCREAMING_SNAKE_CASE_ : Dict = self.state_dict()
state_dict.pop("shadow_params",_A )
model.register_to_config(**_A )
self.copy_to(model.parameters() )
model.save_pretrained(_A )
def __UpperCamelCase ( self : str,_A : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = max(0,optimization_step - self.update_after_step - 1 )
if step <= 0:
return 0.0
if self.use_ema_warmup:
SCREAMING_SNAKE_CASE_ : Optional[int] = 1 - (1 + step / self.inv_gamma) ** -self.power
else:
SCREAMING_SNAKE_CASE_ : List[str] = (1 + step) / (10 + step)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = min(_A,self.decay )
# make sure decay is not smaller than min_decay
SCREAMING_SNAKE_CASE_ : Any = max(_A,self.min_decay )
return cur_decay_value
@torch.no_grad()
def __UpperCamelCase ( self : str,_A : Iterable[torch.nn.Parameter] ):
"""simple docstring"""
if isinstance(_A,torch.nn.Module ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = (
"Passing a `torch.nn.Module` to `ExponentialMovingAverage.step` is deprecated. "
"Please pass the parameters of the module instead."
)
deprecate(
"passing a `torch.nn.Module` to `ExponentialMovingAverage.step`","1.0.0",_A,standard_warn=_A,)
SCREAMING_SNAKE_CASE_ : Tuple = parameters.parameters()
SCREAMING_SNAKE_CASE_ : int = list(_A )
self.optimization_step += 1
# Compute the decay factor for the exponential moving average.
SCREAMING_SNAKE_CASE_ : Any = self.get_decay(self.optimization_step )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = decay
SCREAMING_SNAKE_CASE_ : Tuple = 1 - decay
SCREAMING_SNAKE_CASE_ : str = contextlib.nullcontext
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
import deepspeed
for s_param, param in zip(self.shadow_params,_A ):
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
SCREAMING_SNAKE_CASE_ : Union[str, Any] = deepspeed.zero.GatheredParameters(_A,modifier_rank=_A )
with context_manager():
if param.requires_grad:
s_param.sub_(one_minus_decay * (s_param - param) )
else:
s_param.copy_(_A )
def __UpperCamelCase ( self : int,_A : Iterable[torch.nn.Parameter] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = list(_A )
for s_param, param in zip(self.shadow_params,_A ):
param.data.copy_(s_param.to(param.device ).data )
def __UpperCamelCase ( self : Dict,_A : Any=None,_A : Union[str, Any]=None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [
p.to(device=_A,dtype=_A ) if p.is_floating_point() else p.to(device=_A )
for p in self.shadow_params
]
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
return {
"decay": self.decay,
"min_decay": self.min_decay,
"optimization_step": self.optimization_step,
"update_after_step": self.update_after_step,
"use_ema_warmup": self.use_ema_warmup,
"inv_gamma": self.inv_gamma,
"power": self.power,
"shadow_params": self.shadow_params,
}
def __UpperCamelCase ( self : Optional[Any],_A : Iterable[torch.nn.Parameter] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = [param.detach().cpu().clone() for param in parameters]
def __UpperCamelCase ( self : int,_A : Iterable[torch.nn.Parameter] ):
"""simple docstring"""
if self.temp_stored_params is None:
raise RuntimeError("This ExponentialMovingAverage has no `store()`ed weights " "to `restore()`" )
for c_param, param in zip(self.temp_stored_params,_A ):
param.data.copy_(c_param.data )
# Better memory-wise.
SCREAMING_SNAKE_CASE_ : Dict = None
def __UpperCamelCase ( self : Union[str, Any],_A : dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = copy.deepcopy(_A )
SCREAMING_SNAKE_CASE_ : str = state_dict.get("decay",self.decay )
if self.decay < 0.0 or self.decay > 1.0:
raise ValueError("Decay must be between 0 and 1" )
SCREAMING_SNAKE_CASE_ : Dict = state_dict.get("min_decay",self.min_decay )
if not isinstance(self.min_decay,_A ):
raise ValueError("Invalid min_decay" )
SCREAMING_SNAKE_CASE_ : Any = state_dict.get("optimization_step",self.optimization_step )
if not isinstance(self.optimization_step,_A ):
raise ValueError("Invalid optimization_step" )
SCREAMING_SNAKE_CASE_ : Any = state_dict.get("update_after_step",self.update_after_step )
if not isinstance(self.update_after_step,_A ):
raise ValueError("Invalid update_after_step" )
SCREAMING_SNAKE_CASE_ : Optional[Any] = state_dict.get("use_ema_warmup",self.use_ema_warmup )
if not isinstance(self.use_ema_warmup,_A ):
raise ValueError("Invalid use_ema_warmup" )
SCREAMING_SNAKE_CASE_ : Optional[Any] = state_dict.get("inv_gamma",self.inv_gamma )
if not isinstance(self.inv_gamma,(float, int) ):
raise ValueError("Invalid inv_gamma" )
SCREAMING_SNAKE_CASE_ : int = state_dict.get("power",self.power )
if not isinstance(self.power,(float, int) ):
raise ValueError("Invalid power" )
SCREAMING_SNAKE_CASE_ : str = state_dict.get("shadow_params",_A )
if shadow_params is not None:
SCREAMING_SNAKE_CASE_ : int = shadow_params
if not isinstance(self.shadow_params,_A ):
raise ValueError("shadow_params must be a list" )
if not all(isinstance(_A,torch.Tensor ) for p in self.shadow_params ):
raise ValueError("shadow_params must all be Tensors" )
| 18 |
from __future__ import annotations
def __snake_case ( _lowerCAmelCase : list[float] ) -> bool:
if len(_lowerCAmelCase ) < 2:
raise ValueError("Monogons and Digons are not polygons in the Euclidean space" )
if any(i <= 0 for i in nums ):
raise ValueError("All values must be greater than 0" )
A_ : List[str] = nums.copy()
copy_nums.sort()
return copy_nums[-1] < sum(copy_nums[:-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 300 | 0 |
'''simple docstring'''
import argparse
import json
import pickle
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation, MaskFormerImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
__lowercase: int = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str ) -> Tuple:
'''simple docstring'''
UpperCamelCase__ = SwinConfig.from_pretrained(
"microsoft/swin-tiny-patch4-window7-224" , out_features=["stage1", "stage2", "stage3", "stage4"] )
UpperCamelCase__ = MaskFormerConfig(backbone_config=_UpperCamelCase )
UpperCamelCase__ = "huggingface/label-files"
if "ade20k-full" in model_name:
# this should be ok
UpperCamelCase__ = 8_47
UpperCamelCase__ = "maskformer-ade20k-full-id2label.json"
elif "ade" in model_name:
# this should be ok
UpperCamelCase__ = 1_50
UpperCamelCase__ = "ade20k-id2label.json"
elif "coco-stuff" in model_name:
# this should be ok
UpperCamelCase__ = 1_71
UpperCamelCase__ = "maskformer-coco-stuff-id2label.json"
elif "coco" in model_name:
# TODO
UpperCamelCase__ = 1_33
UpperCamelCase__ = "coco-panoptic-id2label.json"
elif "cityscapes" in model_name:
# this should be ok
UpperCamelCase__ = 19
UpperCamelCase__ = "cityscapes-id2label.json"
elif "vistas" in model_name:
# this should be ok
UpperCamelCase__ = 65
UpperCamelCase__ = "mapillary-vistas-id2label.json"
UpperCamelCase__ = json.load(open(hf_hub_download(_UpperCamelCase , _UpperCamelCase , repo_type="dataset" ) , "r" ) )
UpperCamelCase__ = {int(_UpperCamelCase ): v for k, v in idalabel.items()}
return config
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : int ) -> Tuple:
'''simple docstring'''
UpperCamelCase__ = []
# stem
# fmt: off
rename_keys.append(("backbone.patch_embed.proj.weight", "model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.weight") )
rename_keys.append(("backbone.patch_embed.proj.bias", "model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.bias") )
rename_keys.append(("backbone.patch_embed.norm.weight", "model.pixel_level_module.encoder.model.embeddings.norm.weight") )
rename_keys.append(("backbone.patch_embed.norm.bias", "model.pixel_level_module.encoder.model.embeddings.norm.bias") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.norm1.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.norm1.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.attn.relative_position_bias_table', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.attn.relative_position_index', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.attn.proj.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.attn.proj.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.norm2.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.norm2.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.mlp.fc1.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.mlp.fc1.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.mlp.fc2.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.weight') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.mlp.fc2.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.bias') )
if i < 3:
rename_keys.append((F'backbone.layers.{i}.downsample.reduction.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.reduction.weight') )
rename_keys.append((F'backbone.layers.{i}.downsample.norm.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.weight') )
rename_keys.append((F'backbone.layers.{i}.downsample.norm.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.bias') )
rename_keys.append((F'backbone.norm{i}.weight', F'model.pixel_level_module.encoder.hidden_states_norms.{i}.weight') )
rename_keys.append((F'backbone.norm{i}.bias', F'model.pixel_level_module.encoder.hidden_states_norms.{i}.bias') )
# FPN
rename_keys.append(("sem_seg_head.layer_4.weight", "model.pixel_level_module.decoder.fpn.stem.0.weight") )
rename_keys.append(("sem_seg_head.layer_4.norm.weight", "model.pixel_level_module.decoder.fpn.stem.1.weight") )
rename_keys.append(("sem_seg_head.layer_4.norm.bias", "model.pixel_level_module.decoder.fpn.stem.1.bias") )
for source_index, target_index in zip(range(3 , 0 , -1 ) , range(0 , 3 ) ):
rename_keys.append((F'sem_seg_head.adapter_{source_index}.weight', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.0.weight') )
rename_keys.append((F'sem_seg_head.adapter_{source_index}.norm.weight', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.weight') )
rename_keys.append((F'sem_seg_head.adapter_{source_index}.norm.bias', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.bias') )
rename_keys.append((F'sem_seg_head.layer_{source_index}.weight', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.block.0.weight') )
rename_keys.append((F'sem_seg_head.layer_{source_index}.norm.weight', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.weight') )
rename_keys.append((F'sem_seg_head.layer_{source_index}.norm.bias', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.bias') )
rename_keys.append(("sem_seg_head.mask_features.weight", "model.pixel_level_module.decoder.mask_projection.weight") )
rename_keys.append(("sem_seg_head.mask_features.bias", "model.pixel_level_module.decoder.mask_projection.bias") )
# Transformer decoder
for idx in range(config.decoder_config.decoder_layers ):
# self-attention out projection
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.weight', F'model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.bias', F'model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.bias') )
# cross-attention out projection
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.weight', F'model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.bias', F'model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.bias') )
# MLP 1
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.weight', F'model.transformer_module.decoder.layers.{idx}.fc1.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.bias', F'model.transformer_module.decoder.layers.{idx}.fc1.bias') )
# MLP 2
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.weight', F'model.transformer_module.decoder.layers.{idx}.fc2.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.bias', F'model.transformer_module.decoder.layers.{idx}.fc2.bias') )
# layernorm 1 (self-attention layernorm)
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.weight', F'model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.bias', F'model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.bias') )
# layernorm 2 (cross-attention layernorm)
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.weight', F'model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.bias', F'model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.bias') )
# layernorm 3 (final layernorm)
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.weight', F'model.transformer_module.decoder.layers.{idx}.final_layer_norm.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.bias', F'model.transformer_module.decoder.layers.{idx}.final_layer_norm.bias') )
rename_keys.append(("sem_seg_head.predictor.transformer.decoder.norm.weight", "model.transformer_module.decoder.layernorm.weight") )
rename_keys.append(("sem_seg_head.predictor.transformer.decoder.norm.bias", "model.transformer_module.decoder.layernorm.bias") )
# heads on top
rename_keys.append(("sem_seg_head.predictor.query_embed.weight", "model.transformer_module.queries_embedder.weight") )
rename_keys.append(("sem_seg_head.predictor.input_proj.weight", "model.transformer_module.input_projection.weight") )
rename_keys.append(("sem_seg_head.predictor.input_proj.bias", "model.transformer_module.input_projection.bias") )
rename_keys.append(("sem_seg_head.predictor.class_embed.weight", "class_predictor.weight") )
rename_keys.append(("sem_seg_head.predictor.class_embed.bias", "class_predictor.bias") )
for i in range(3 ):
rename_keys.append((F'sem_seg_head.predictor.mask_embed.layers.{i}.weight', F'mask_embedder.{i}.0.weight') )
rename_keys.append((F'sem_seg_head.predictor.mask_embed.layers.{i}.bias', F'mask_embedder.{i}.0.bias') )
# fmt: on
return rename_keys
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Union[str, Any] , _UpperCamelCase : List[Any] , _UpperCamelCase : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ = dct.pop(_UpperCamelCase )
UpperCamelCase__ = val
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Optional[int] , _UpperCamelCase : int ) -> int:
'''simple docstring'''
UpperCamelCase__ = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
UpperCamelCase__ = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
UpperCamelCase__ = state_dict.pop(F'backbone.layers.{i}.blocks.{j}.attn.qkv.weight' )
UpperCamelCase__ = state_dict.pop(F'backbone.layers.{i}.blocks.{j}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
UpperCamelCase__ = in_proj_weight[:dim, :]
UpperCamelCase__ = in_proj_bias[: dim]
UpperCamelCase__ = in_proj_weight[
dim : dim * 2, :
]
UpperCamelCase__ = in_proj_bias[
dim : dim * 2
]
UpperCamelCase__ = in_proj_weight[
-dim :, :
]
UpperCamelCase__ = in_proj_bias[-dim :]
# fmt: on
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str , _UpperCamelCase : Optional[Any] ) -> str:
'''simple docstring'''
UpperCamelCase__ = config.decoder_config.hidden_size
for idx in range(config.decoder_config.decoder_layers ):
# read in weights + bias of self-attention input projection layer (in the original implementation, this is a single matrix + bias)
UpperCamelCase__ = state_dict.pop(F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_weight' )
UpperCamelCase__ = state_dict.pop(F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
UpperCamelCase__ = in_proj_weight[: hidden_size, :]
UpperCamelCase__ = in_proj_bias[:config.hidden_size]
UpperCamelCase__ = in_proj_weight[hidden_size : hidden_size * 2, :]
UpperCamelCase__ = in_proj_bias[hidden_size : hidden_size * 2]
UpperCamelCase__ = in_proj_weight[-hidden_size :, :]
UpperCamelCase__ = in_proj_bias[-hidden_size :]
# read in weights + bias of cross-attention input projection layer (in the original implementation, this is a single matrix + bias)
UpperCamelCase__ = state_dict.pop(F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_weight' )
UpperCamelCase__ = state_dict.pop(F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
UpperCamelCase__ = in_proj_weight[: hidden_size, :]
UpperCamelCase__ = in_proj_bias[:config.hidden_size]
UpperCamelCase__ = in_proj_weight[hidden_size : hidden_size * 2, :]
UpperCamelCase__ = in_proj_bias[hidden_size : hidden_size * 2]
UpperCamelCase__ = in_proj_weight[-hidden_size :, :]
UpperCamelCase__ = in_proj_bias[-hidden_size :]
# fmt: on
def SCREAMING_SNAKE_CASE__( ) -> torch.Tensor:
'''simple docstring'''
UpperCamelCase__ = "http://images.cocodataset.org/val2017/000000039769.jpg"
UpperCamelCase__ = Image.open(requests.get(_UpperCamelCase , stream=_UpperCamelCase ).raw )
return im
@torch.no_grad()
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str , _UpperCamelCase : str , _UpperCamelCase : str , _UpperCamelCase : bool = False ) -> Dict:
'''simple docstring'''
UpperCamelCase__ = get_maskformer_config(_UpperCamelCase )
# load original state_dict
with open(_UpperCamelCase , "rb" ) as f:
UpperCamelCase__ = pickle.load(_UpperCamelCase )
UpperCamelCase__ = data["model"]
# for name, param in state_dict.items():
# print(name, param.shape)
# rename keys
UpperCamelCase__ = create_rename_keys(_UpperCamelCase )
for src, dest in rename_keys:
rename_key(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
read_in_swin_q_k_v(_UpperCamelCase , config.backbone_config )
read_in_decoder_q_k_v(_UpperCamelCase , _UpperCamelCase )
# update to torch tensors
for key, value in state_dict.items():
UpperCamelCase__ = torch.from_numpy(_UpperCamelCase )
# load 🤗 model
UpperCamelCase__ = MaskFormerForInstanceSegmentation(_UpperCamelCase )
model.eval()
for name, param in model.named_parameters():
print(_UpperCamelCase , param.shape )
UpperCamelCase__ , UpperCamelCase__ = model.load_state_dict(_UpperCamelCase , strict=_UpperCamelCase )
assert missing_keys == [
"model.pixel_level_module.encoder.model.layernorm.weight",
"model.pixel_level_module.encoder.model.layernorm.bias",
]
assert len(_UpperCamelCase ) == 0, F'Unexpected keys: {unexpected_keys}'
# verify results
UpperCamelCase__ = prepare_img()
if "vistas" in model_name:
UpperCamelCase__ = 65
elif "cityscapes" in model_name:
UpperCamelCase__ = 6_55_35
else:
UpperCamelCase__ = 2_55
UpperCamelCase__ = True if "ade" in model_name else False
UpperCamelCase__ = MaskFormerImageProcessor(ignore_index=_UpperCamelCase , reduce_labels=_UpperCamelCase )
UpperCamelCase__ = image_processor(_UpperCamelCase , return_tensors="pt" )
UpperCamelCase__ = model(**_UpperCamelCase )
print("Logits:" , outputs.class_queries_logits[0, :3, :3] )
if model_name == "maskformer-swin-tiny-ade":
UpperCamelCase__ = torch.tensor(
[[3.6_3_5_3, -4.4_7_7_0, -2.6_0_6_5], [0.5_0_8_1, -4.2_3_9_4, -3.5_3_4_3], [2.1_9_0_9, -5.0_3_5_3, -1.9_3_2_3]] )
assert torch.allclose(outputs.class_queries_logits[0, :3, :3] , _UpperCamelCase , atol=1e-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(F'Saving model and image processor to {pytorch_dump_folder_path}' )
Path(_UpperCamelCase ).mkdir(exist_ok=_UpperCamelCase )
model.save_pretrained(_UpperCamelCase )
image_processor.save_pretrained(_UpperCamelCase )
if push_to_hub:
print("Pushing model and image processor to the hub..." )
model.push_to_hub(F'nielsr/{model_name}' )
image_processor.push_to_hub(F'nielsr/{model_name}' )
if __name__ == "__main__":
__lowercase: Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="maskformer-swin-tiny-ade",
type=str,
help=("Name of the MaskFormer model you'd like to convert",),
)
parser.add_argument(
"--checkpoint_path",
default="/Users/nielsrogge/Documents/MaskFormer_checkpoints/MaskFormer-Swin-tiny-ADE20k/model.pkl",
type=str,
help="Path to the original state dict (.pth file).",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
__lowercase: Union[str, Any] = parser.parse_args()
convert_maskformer_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
) | 31 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : float , _UpperCamelCase : float ) -> float:
'''simple docstring'''
if density <= 0:
raise ValueError("Impossible fluid density" )
if bulk_modulus <= 0:
raise ValueError("Impossible bulk modulus" )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod() | 31 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.