code stringlengths 82 53.2k | code_codestyle int64 0 721 | style_context stringlengths 91 41.9k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
import copy
import os
from typing import TYPE_CHECKING, List, Union
if TYPE_CHECKING:
pass
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"kakaobrain/align-base": "https://huggingface.co/kakaobrain/align-base/resolve/main/config.json",
}
class lowercase ( lowerCAmelCase__ ):
_a = "align_text_model"
def __init__( self , _a=3_0522 , _a=768 , _a=12 , _a=12 , _a=3072 , _a="gelu" , _a=0.1 , _a=0.1 , _a=512 , _a=2 , _a=0.02 , _a=1e-12 , _a=0 , _a="absolute" , _a=True , **_a , ) -> Optional[int]:
super().__init__(**_A )
_A : Union[str, Any] = vocab_size
_A : Dict = hidden_size
_A : str = num_hidden_layers
_A : Union[str, Any] = num_attention_heads
_A : Optional[int] = hidden_act
_A : Any = intermediate_size
_A : Optional[int] = hidden_dropout_prob
_A : Optional[Any] = attention_probs_dropout_prob
_A : int = max_position_embeddings
_A : Union[str, Any] = type_vocab_size
_A : Optional[Any] = initializer_range
_A : Optional[int] = layer_norm_eps
_A : Optional[Any] = position_embedding_type
_A : Tuple = use_cache
_A : Union[str, Any] = pad_token_id
@classmethod
def a__ ( cls , _a , **_a ) -> Tuple:
cls._set_token_in_kwargs(_A )
_A : Dict = cls.get_config_dict(_A , **_A )
# get the text config dict if we are loading from AlignConfig
if config_dict.get("""model_type""" ) == "align":
_A : str = config_dict['''text_config''']
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(_A , **_A )
class lowercase ( lowerCAmelCase__ ):
_a = "align_vision_model"
def __init__( self , _a = 3 , _a = 600 , _a = 2.0 , _a = 3.1 , _a = 8 , _a = [3, 3, 5, 3, 5, 5, 3] , _a = [32, 16, 24, 40, 80, 112, 192] , _a = [16, 24, 40, 80, 112, 192, 320] , _a = [] , _a = [1, 2, 2, 2, 1, 2, 1] , _a = [1, 2, 2, 3, 3, 4, 1] , _a = [1, 6, 6, 6, 6, 6, 6] , _a = 0.25 , _a = "swish" , _a = 2560 , _a = "mean" , _a = 0.02 , _a = 0.001 , _a = 0.99 , _a = 0.2 , **_a , ) -> Optional[int]:
super().__init__(**_A )
_A : Optional[int] = num_channels
_A : Tuple = image_size
_A : Any = width_coefficient
_A : Union[str, Any] = depth_coefficient
_A : Tuple = depth_divisor
_A : Any = kernel_sizes
_A : Tuple = in_channels
_A : str = out_channels
_A : str = depthwise_padding
_A : List[Any] = strides
_A : List[str] = num_block_repeats
_A : Union[str, Any] = expand_ratios
_A : Union[str, Any] = squeeze_expansion_ratio
_A : int = hidden_act
_A : str = hidden_dim
_A : Union[str, Any] = pooling_type
_A : Any = initializer_range
_A : int = batch_norm_eps
_A : Optional[Any] = batch_norm_momentum
_A : Optional[int] = drop_connect_rate
_A : Optional[Any] = sum(_A ) * 4
@classmethod
def a__ ( cls , _a , **_a ) -> Optional[Any]:
cls._set_token_in_kwargs(_A )
_A : Union[str, Any] = cls.get_config_dict(_A , **_A )
# get the vision config dict if we are loading from AlignConfig
if config_dict.get("""model_type""" ) == "align":
_A : List[Any] = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(_A , **_A )
class lowercase ( lowerCAmelCase__ ):
_a = "align"
_a = True
def __init__( self , _a=None , _a=None , _a=640 , _a=1.0 , _a=0.02 , **_a , ) -> List[Any]:
super().__init__(**_A )
if text_config is None:
_A : Dict = {}
logger.info("""text_config is None. Initializing the AlignTextConfig with default values.""" )
if vision_config is None:
_A : Any = {}
logger.info("""vision_config is None. Initializing the AlignVisionConfig with default values.""" )
_A : Tuple = AlignTextConfig(**_A )
_A : int = AlignVisionConfig(**_A )
_A : Optional[int] = projection_dim
_A : str = temperature_init_value
_A : Any = initializer_range
@classmethod
def a__ ( cls , _a , _a , **_a ) -> str:
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **_A )
def a__ ( self ) -> Optional[Any]:
_A : Tuple = copy.deepcopy(self.__dict__ )
_A : Union[str, Any] = self.text_config.to_dict()
_A : Optional[int] = self.vision_config.to_dict()
_A : int = self.__class__.model_type
return output
| 307 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
lowercase_ = {"""configuration_swin""": ["""SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP""", """SwinConfig""", """SwinOnnxConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"""SWIN_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""SwinForImageClassification""",
"""SwinForMaskedImageModeling""",
"""SwinModel""",
"""SwinPreTrainedModel""",
"""SwinBackbone""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"""TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFSwinForImageClassification""",
"""TFSwinForMaskedImageModeling""",
"""TFSwinModel""",
"""TFSwinPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_swin import SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinConfig, SwinOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swin import (
SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinBackbone,
SwinForImageClassification,
SwinForMaskedImageModeling,
SwinModel,
SwinPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_swin import (
TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSwinForImageClassification,
TFSwinForMaskedImageModeling,
TFSwinModel,
TFSwinPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 74 | 0 |
"""simple docstring"""
from math import isclose, sqrt
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ = point_y / 4 / point_x
UpperCAmelCase_ = 2 * normal_gradient / (1 + normal_gradient * normal_gradient)
UpperCAmelCase_ = (1 - normal_gradient * normal_gradient) / (
1 + normal_gradient * normal_gradient
)
UpperCAmelCase_ = (sa - ca * incoming_gradient) / (ca + sa * incoming_gradient)
# to find the next point, solve the simultaeneous equations:
# y^2 + 4x^2 = 100
# y - b = m * (x - a)
# ==> A x^2 + B x + C = 0
UpperCAmelCase_ = outgoing_gradient**2 + 4
UpperCAmelCase_ = 2 * outgoing_gradient * (point_y - outgoing_gradient * point_x)
UpperCAmelCase_ = (point_y - outgoing_gradient * point_x) ** 2 - 100
UpperCAmelCase_ = (
-linear_term - sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
UpperCAmelCase_ = (
-linear_term + sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
# two solutions, one of which is our input point
UpperCAmelCase_ = x_minus if isclose(lowerCAmelCase__ , lowerCAmelCase__ ) else x_plus
UpperCAmelCase_ = point_y + outgoing_gradient * (next_x - point_x)
return next_x, next_y, outgoing_gradient
def a__ ( lowerCAmelCase__ = 1.4 , lowerCAmelCase__ = -9.6 ):
UpperCAmelCase_ = 0
UpperCAmelCase_ = first_x_coord
UpperCAmelCase_ = first_y_coord
UpperCAmelCase_ = (10.1 - point_y) / (0.0 - point_x)
while not (-0.01 <= point_x <= 0.01 and point_y > 0):
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = next_point(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
num_reflections += 1
return num_reflections
if __name__ == "__main__":
print(F"{solution() = }")
| 14 |
"""simple docstring"""
import colorsys
from PIL import Image # type: ignore
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ = x
UpperCAmelCase_ = y
for step in range(lowerCAmelCase__ ): # noqa: B007
UpperCAmelCase_ = a * a - b * b + x
UpperCAmelCase_ = 2 * a * b + y
UpperCAmelCase_ = a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def a__ ( lowerCAmelCase__ ):
if distance == 1:
return (0, 0, 0)
else:
return (255, 255, 255)
def a__ ( lowerCAmelCase__ ):
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 255 ) for i in colorsys.hsv_to_rgb(lowerCAmelCase__ , 1 , 1 ) )
def a__ ( lowerCAmelCase__ = 800 , lowerCAmelCase__ = 600 , lowerCAmelCase__ = -0.6 , lowerCAmelCase__ = 0 , lowerCAmelCase__ = 3.2 , lowerCAmelCase__ = 50 , lowerCAmelCase__ = True , ):
UpperCAmelCase_ = Image.new("RGB" , (image_width, image_height) )
UpperCAmelCase_ = img.load()
# loop through the image-coordinates
for image_x in range(lowerCAmelCase__ ):
for image_y in range(lowerCAmelCase__ ):
# determine the figure-coordinates based on the image-coordinates
UpperCAmelCase_ = figure_width / image_width * image_height
UpperCAmelCase_ = figure_center_x + (image_x / image_width - 0.5) * figure_width
UpperCAmelCase_ = figure_center_y + (image_y / image_height - 0.5) * figure_height
UpperCAmelCase_ = get_distance(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
UpperCAmelCase_ = get_color_coded_rgb(lowerCAmelCase__ )
else:
UpperCAmelCase_ = get_black_and_white_rgb(lowerCAmelCase__ )
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
lowerCamelCase = get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show()
| 14 | 1 |
'''simple docstring'''
import math
def __a(SCREAMING_SNAKE_CASE_ : int ):
'''simple docstring'''
_lowerCAmelCase = math.loga(math.sqrt(4 * positive_integer + 1 ) / 2 + 1 / 2 )
return exponent == int(SCREAMING_SNAKE_CASE_ )
def __a(SCREAMING_SNAKE_CASE_ : float = 1 / 12345 ):
'''simple docstring'''
_lowerCAmelCase = 0
_lowerCAmelCase = 0
_lowerCAmelCase = 3
while True:
_lowerCAmelCase = (integer**2 - 1) / 4
# if candidate is an integer, then there is a partition for k
if partition_candidate == int(SCREAMING_SNAKE_CASE_ ):
_lowerCAmelCase = int(SCREAMING_SNAKE_CASE_ )
total_partitions += 1
if check_partition_perfect(SCREAMING_SNAKE_CASE_ ):
perfect_partitions += 1
if perfect_partitions > 0:
if perfect_partitions / total_partitions < max_proportion:
return int(SCREAMING_SNAKE_CASE_ )
integer += 1
if __name__ == "__main__":
print(f'''{solution() = }''')
| 18 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_SCREAMING_SNAKE_CASE = {
"configuration_mctct": ["MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP", "MCTCTConfig"],
"feature_extraction_mctct": ["MCTCTFeatureExtractor"],
"processing_mctct": ["MCTCTProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
"MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST",
"MCTCTForCTC",
"MCTCTModel",
"MCTCTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig
from .feature_extraction_mctct import MCTCTFeatureExtractor
from .processing_mctct import MCTCTProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 18 | 1 |
'''simple docstring'''
import json
import os
from functools import lru_cache
from typing import TYPE_CHECKING, List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_snake_case : Optional[int] = logging.get_logger(__name__)
_snake_case : Tuple = {
"""vocab_file""": """vocab.json""",
"""merges_file""": """merges.txt""",
"""tokenizer_config_file""": """tokenizer_config.json""",
}
_snake_case : Union[str, Any] = {
"""vocab_file""": {"""facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json"""},
"""merges_file""": {"""facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt"""},
"""tokenizer_config_file""": {
"""facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json"""
},
}
_snake_case : Dict = {"""facebook/blenderbot-3B""": 128}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def _a ( ):
_SCREAMING_SNAKE_CASE = (
list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) )
)
_SCREAMING_SNAKE_CASE = bs[:]
_SCREAMING_SNAKE_CASE = 0
for b in range(2**8 ):
if b not in bs:
bs.append(_SCREAMING_SNAKE_CASE )
cs.append(2**8 + n )
n += 1
_SCREAMING_SNAKE_CASE = [chr(_SCREAMING_SNAKE_CASE ) for n in cs]
return dict(zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
def _a ( _SCREAMING_SNAKE_CASE : Tuple ):
_SCREAMING_SNAKE_CASE = set()
_SCREAMING_SNAKE_CASE = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_SCREAMING_SNAKE_CASE = char
return pairs
class lowerCAmelCase ( __UpperCAmelCase ):
a : Optional[Any] = VOCAB_FILES_NAMES
a : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
a : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a : Union[str, Any] = ['input_ids', 'attention_mask']
def __init__( self , UpperCamelCase , UpperCamelCase , UpperCamelCase="replace" , UpperCamelCase="<s>" , UpperCamelCase="</s>" , UpperCamelCase="</s>" , UpperCamelCase="<s>" , UpperCamelCase="<unk>" , UpperCamelCase="<pad>" , UpperCamelCase="<mask>" , UpperCamelCase=False , **UpperCamelCase , ):
_SCREAMING_SNAKE_CASE = AddedToken(UpperCamelCase , lstrip=UpperCamelCase , rstrip=UpperCamelCase ) if isinstance(UpperCamelCase , UpperCamelCase ) else bos_token
_SCREAMING_SNAKE_CASE = AddedToken(UpperCamelCase , lstrip=UpperCamelCase , rstrip=UpperCamelCase ) if isinstance(UpperCamelCase , UpperCamelCase ) else eos_token
_SCREAMING_SNAKE_CASE = AddedToken(UpperCamelCase , lstrip=UpperCamelCase , rstrip=UpperCamelCase ) if isinstance(UpperCamelCase , UpperCamelCase ) else sep_token
_SCREAMING_SNAKE_CASE = AddedToken(UpperCamelCase , lstrip=UpperCamelCase , rstrip=UpperCamelCase ) if isinstance(UpperCamelCase , UpperCamelCase ) else cls_token
_SCREAMING_SNAKE_CASE = AddedToken(UpperCamelCase , lstrip=UpperCamelCase , rstrip=UpperCamelCase ) if isinstance(UpperCamelCase , UpperCamelCase ) else unk_token
_SCREAMING_SNAKE_CASE = AddedToken(UpperCamelCase , lstrip=UpperCamelCase , rstrip=UpperCamelCase ) if isinstance(UpperCamelCase , UpperCamelCase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
_SCREAMING_SNAKE_CASE = AddedToken(UpperCamelCase , lstrip=UpperCamelCase , rstrip=UpperCamelCase ) if isinstance(UpperCamelCase , UpperCamelCase ) else mask_token
super().__init__(
errors=UpperCamelCase , bos_token=UpperCamelCase , eos_token=UpperCamelCase , unk_token=UpperCamelCase , sep_token=UpperCamelCase , cls_token=UpperCamelCase , pad_token=UpperCamelCase , mask_token=UpperCamelCase , add_prefix_space=UpperCamelCase , **UpperCamelCase , )
with open(UpperCamelCase , encoding="utf-8" ) as vocab_handle:
_SCREAMING_SNAKE_CASE = json.load(UpperCamelCase )
_SCREAMING_SNAKE_CASE = {v: k for k, v in self.encoder.items()}
_SCREAMING_SNAKE_CASE = errors # how to handle errors in decoding
_SCREAMING_SNAKE_CASE = bytes_to_unicode()
_SCREAMING_SNAKE_CASE = {v: k for k, v in self.byte_encoder.items()}
with open(UpperCamelCase , encoding="utf-8" ) as merges_handle:
_SCREAMING_SNAKE_CASE = merges_handle.read().split("\n" )[1:-1]
_SCREAMING_SNAKE_CASE = [tuple(merge.split() ) for merge in bpe_merges]
_SCREAMING_SNAKE_CASE = dict(zip(UpperCamelCase , range(len(UpperCamelCase ) ) ) )
_SCREAMING_SNAKE_CASE = {}
_SCREAMING_SNAKE_CASE = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
_SCREAMING_SNAKE_CASE = re.compile(R"\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" )
@property
# Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Blenderbot, RoBERTa->Blenderbot
def lowercase ( self ):
return len(self.encoder )
def lowercase ( self ):
return dict(self.encoder , **self.added_tokens_encoder )
def lowercase ( self , UpperCamelCase ):
if token in self.cache:
return self.cache[token]
_SCREAMING_SNAKE_CASE = tuple(UpperCamelCase )
_SCREAMING_SNAKE_CASE = get_pairs(UpperCamelCase )
if not pairs:
return token
while True:
_SCREAMING_SNAKE_CASE = min(UpperCamelCase , key=lambda UpperCamelCase : self.bpe_ranks.get(UpperCamelCase , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE = bigram
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = 0
while i < len(UpperCamelCase ):
try:
_SCREAMING_SNAKE_CASE = word.index(UpperCamelCase , UpperCamelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
_SCREAMING_SNAKE_CASE = j
if word[i] == first and i < len(UpperCamelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
_SCREAMING_SNAKE_CASE = tuple(UpperCamelCase )
_SCREAMING_SNAKE_CASE = new_word
if len(UpperCamelCase ) == 1:
break
else:
_SCREAMING_SNAKE_CASE = get_pairs(UpperCamelCase )
_SCREAMING_SNAKE_CASE = " ".join(UpperCamelCase )
_SCREAMING_SNAKE_CASE = word
return word
def lowercase ( self , UpperCamelCase ):
_SCREAMING_SNAKE_CASE = []
for token in re.findall(self.pat , UpperCamelCase ):
_SCREAMING_SNAKE_CASE = "".join(
self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(UpperCamelCase ).split(" " ) )
return bpe_tokens
def lowercase ( self , UpperCamelCase ):
return self.encoder.get(UpperCamelCase , self.encoder.get(self.unk_token ) )
def lowercase ( self , UpperCamelCase ):
return self.decoder.get(UpperCamelCase )
def lowercase ( self , UpperCamelCase ):
_SCREAMING_SNAKE_CASE = "".join(UpperCamelCase )
_SCREAMING_SNAKE_CASE = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" , errors=self.errors )
return text
def lowercase ( self , UpperCamelCase , UpperCamelCase = None ):
if not os.path.isdir(UpperCamelCase ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
_SCREAMING_SNAKE_CASE = os.path.join(
UpperCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
_SCREAMING_SNAKE_CASE = os.path.join(
UpperCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(UpperCamelCase , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=UpperCamelCase , ensure_ascii=UpperCamelCase ) + "\n" )
_SCREAMING_SNAKE_CASE = 0
with open(UpperCamelCase , "w" , encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda UpperCamelCase : kv[1] ):
if index != token_index:
logger.warning(
F'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
" Please check that the tokenizer is not corrupted!" )
_SCREAMING_SNAKE_CASE = token_index
writer.write(" ".join(UpperCamelCase ) + "\n" )
index += 1
return vocab_file, merge_file
def lowercase ( self , UpperCamelCase , UpperCamelCase = None , UpperCamelCase = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase , token_ids_a=UpperCamelCase , already_has_special_tokens=UpperCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(UpperCamelCase )) + [1]
return [1] + ([0] * len(UpperCamelCase )) + [1, 1] + ([0] * len(UpperCamelCase )) + [1]
def lowercase ( self , UpperCamelCase , UpperCamelCase = None ):
_SCREAMING_SNAKE_CASE = [self.sep_token_id]
_SCREAMING_SNAKE_CASE = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowercase ( self , UpperCamelCase , UpperCamelCase=False , **UpperCamelCase ):
_SCREAMING_SNAKE_CASE = kwargs.pop("add_prefix_space" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(UpperCamelCase ) > 0 and not text[0].isspace()):
_SCREAMING_SNAKE_CASE = " " + text
return (text, kwargs)
def lowercase ( self , UpperCamelCase , UpperCamelCase = None ):
return token_ids_a + [self.eos_token_id]
def lowercase ( self , UpperCamelCase ):
_SCREAMING_SNAKE_CASE = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(" " + text )
else:
# Generated responses should contain them already.
inputs.append(UpperCamelCase )
_SCREAMING_SNAKE_CASE = " ".join(UpperCamelCase )
_SCREAMING_SNAKE_CASE = self.encode(UpperCamelCase )
if len(UpperCamelCase ) > self.model_max_length:
_SCREAMING_SNAKE_CASE = input_ids[-self.model_max_length :]
logger.warning(F'Trimmed input from conversation as it was longer than {self.model_max_length} tokens.' )
return input_ids | 701 |
'''simple docstring'''
from copy import deepcopy
from typing import Optional, Union
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_tf_available, is_torch_available
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
class lowerCAmelCase ( __UpperCAmelCase ):
a : Optional[int] = ["""image_processor"""]
a : Tuple = """SamImageProcessor"""
def __init__( self , UpperCamelCase ):
super().__init__(UpperCamelCase )
_SCREAMING_SNAKE_CASE = self.image_processor
_SCREAMING_SNAKE_CASE = -10
_SCREAMING_SNAKE_CASE = self.image_processor.size["longest_edge"]
def __call__( self , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase = None , **UpperCamelCase , ):
_SCREAMING_SNAKE_CASE = self.image_processor(
UpperCamelCase , return_tensors=UpperCamelCase , **UpperCamelCase , )
# pop arguments that are not used in the foward but used nevertheless
_SCREAMING_SNAKE_CASE = encoding_image_processor["original_sizes"]
if hasattr(UpperCamelCase , "numpy" ): # Checks if Torch or TF tensor
_SCREAMING_SNAKE_CASE = original_sizes.numpy()
_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE = self._check_and_preprocess_points(
input_points=UpperCamelCase , input_labels=UpperCamelCase , input_boxes=UpperCamelCase , )
_SCREAMING_SNAKE_CASE = self._normalize_and_convert(
UpperCamelCase , UpperCamelCase , input_points=UpperCamelCase , input_labels=UpperCamelCase , input_boxes=UpperCamelCase , return_tensors=UpperCamelCase , )
return encoding_image_processor
def lowercase ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase="pt" , ):
if input_points is not None:
if len(UpperCamelCase ) != len(UpperCamelCase ):
_SCREAMING_SNAKE_CASE = [
self._normalize_coordinates(self.target_size , UpperCamelCase , original_sizes[0] ) for point in input_points
]
else:
_SCREAMING_SNAKE_CASE = [
self._normalize_coordinates(self.target_size , UpperCamelCase , UpperCamelCase )
for point, original_size in zip(UpperCamelCase , UpperCamelCase )
]
# check that all arrays have the same shape
if not all(point.shape == input_points[0].shape for point in input_points ):
if input_labels is not None:
_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE = self._pad_points_and_labels(UpperCamelCase , UpperCamelCase )
_SCREAMING_SNAKE_CASE = np.array(UpperCamelCase )
if input_labels is not None:
_SCREAMING_SNAKE_CASE = np.array(UpperCamelCase )
if input_boxes is not None:
if len(UpperCamelCase ) != len(UpperCamelCase ):
_SCREAMING_SNAKE_CASE = [
self._normalize_coordinates(self.target_size , UpperCamelCase , original_sizes[0] , is_bounding_box=UpperCamelCase )
for box in input_boxes
]
else:
_SCREAMING_SNAKE_CASE = [
self._normalize_coordinates(self.target_size , UpperCamelCase , UpperCamelCase , is_bounding_box=UpperCamelCase )
for box, original_size in zip(UpperCamelCase , UpperCamelCase )
]
_SCREAMING_SNAKE_CASE = np.array(UpperCamelCase )
if input_boxes is not None:
if return_tensors == "pt":
_SCREAMING_SNAKE_CASE = torch.from_numpy(UpperCamelCase )
# boxes batch size of 1 by default
_SCREAMING_SNAKE_CASE = input_boxes.unsqueeze(1 ) if len(input_boxes.shape ) != 3 else input_boxes
elif return_tensors == "tf":
_SCREAMING_SNAKE_CASE = tf.convert_to_tensor(UpperCamelCase )
# boxes batch size of 1 by default
_SCREAMING_SNAKE_CASE = tf.expand_dims(UpperCamelCase , 1 ) if len(input_boxes.shape ) != 3 else input_boxes
encoding_image_processor.update({"input_boxes": input_boxes} )
if input_points is not None:
if return_tensors == "pt":
_SCREAMING_SNAKE_CASE = torch.from_numpy(UpperCamelCase )
# point batch size of 1 by default
_SCREAMING_SNAKE_CASE = input_points.unsqueeze(1 ) if len(input_points.shape ) != 4 else input_points
elif return_tensors == "tf":
_SCREAMING_SNAKE_CASE = tf.convert_to_tensor(UpperCamelCase )
# point batch size of 1 by default
_SCREAMING_SNAKE_CASE = tf.expand_dims(UpperCamelCase , 1 ) if len(input_points.shape ) != 4 else input_points
encoding_image_processor.update({"input_points": input_points} )
if input_labels is not None:
if return_tensors == "pt":
_SCREAMING_SNAKE_CASE = torch.from_numpy(UpperCamelCase )
# point batch size of 1 by default
_SCREAMING_SNAKE_CASE = input_labels.unsqueeze(1 ) if len(input_labels.shape ) != 3 else input_labels
elif return_tensors == "tf":
_SCREAMING_SNAKE_CASE = tf.convert_to_tensor(UpperCamelCase )
# point batch size of 1 by default
_SCREAMING_SNAKE_CASE = tf.expand_dims(UpperCamelCase , 1 ) if len(input_labels.shape ) != 3 else input_labels
encoding_image_processor.update({"input_labels": input_labels} )
return encoding_image_processor
def lowercase ( self , UpperCamelCase , UpperCamelCase ):
_SCREAMING_SNAKE_CASE = max([point.shape[0] for point in input_points] )
_SCREAMING_SNAKE_CASE = []
for i, point in enumerate(UpperCamelCase ):
if point.shape[0] != expected_nb_points:
_SCREAMING_SNAKE_CASE = np.concatenate(
[point, np.zeros((expected_nb_points - point.shape[0], 2) ) + self.point_pad_value] , axis=0 )
_SCREAMING_SNAKE_CASE = np.append(input_labels[i] , [self.point_pad_value] )
processed_input_points.append(UpperCamelCase )
_SCREAMING_SNAKE_CASE = processed_input_points
return input_points, input_labels
def lowercase ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase=False ):
_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE = original_size
_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE = self.image_processor._get_preprocess_shape(UpperCamelCase , longest_edge=UpperCamelCase )
_SCREAMING_SNAKE_CASE = deepcopy(UpperCamelCase ).astype(UpperCamelCase )
if is_bounding_box:
_SCREAMING_SNAKE_CASE = coords.reshape(-1 , 2 , 2 )
_SCREAMING_SNAKE_CASE = coords[..., 0] * (new_w / old_w)
_SCREAMING_SNAKE_CASE = coords[..., 1] * (new_h / old_h)
if is_bounding_box:
_SCREAMING_SNAKE_CASE = coords.reshape(-1 , 4 )
return coords
def lowercase ( self , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=None , ):
if input_points is not None:
if hasattr(UpperCamelCase , "numpy" ): # Checks for TF or Torch tensor
_SCREAMING_SNAKE_CASE = input_points.numpy().tolist()
if not isinstance(UpperCamelCase , UpperCamelCase ) or not isinstance(input_points[0] , UpperCamelCase ):
raise ValueError("Input points must be a list of list of floating points." )
_SCREAMING_SNAKE_CASE = [np.array(UpperCamelCase ) for input_point in input_points]
else:
_SCREAMING_SNAKE_CASE = None
if input_labels is not None:
if hasattr(UpperCamelCase , "numpy" ):
_SCREAMING_SNAKE_CASE = input_labels.numpy().tolist()
if not isinstance(UpperCamelCase , UpperCamelCase ) or not isinstance(input_labels[0] , UpperCamelCase ):
raise ValueError("Input labels must be a list of list integers." )
_SCREAMING_SNAKE_CASE = [np.array(UpperCamelCase ) for label in input_labels]
else:
_SCREAMING_SNAKE_CASE = None
if input_boxes is not None:
if hasattr(UpperCamelCase , "numpy" ):
_SCREAMING_SNAKE_CASE = input_boxes.numpy().tolist()
if (
not isinstance(UpperCamelCase , UpperCamelCase )
or not isinstance(input_boxes[0] , UpperCamelCase )
or not isinstance(input_boxes[0][0] , UpperCamelCase )
):
raise ValueError("Input boxes must be a list of list of list of floating points." )
_SCREAMING_SNAKE_CASE = [np.array(UpperCamelCase ).astype(np.floataa ) for box in input_boxes]
else:
_SCREAMING_SNAKE_CASE = None
return input_points, input_labels, input_boxes
@property
def lowercase ( self ):
_SCREAMING_SNAKE_CASE = self.image_processor.model_input_names
return list(dict.fromkeys(UpperCamelCase ) )
def lowercase ( self , *UpperCamelCase , **UpperCamelCase ):
return self.image_processor.post_process_masks(*UpperCamelCase , **UpperCamelCase ) | 493 | 0 |
import sys
import webbrowser
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
print("""Googling.....""")
UpperCamelCase : Optional[int] = """https://www.google.com/search?q=""" + """ """.join(sys.argv[1:])
UpperCamelCase : Any = requests.get(url, headers={"""UserAgent""": UserAgent().random})
# res.raise_for_status()
with open("""project1a.html""", """wb""") as out_file: # only for knowing the class
for data in res.iter_content(1_0000):
out_file.write(data)
UpperCamelCase : Optional[Any] = BeautifulSoup(res.text, """html.parser""")
UpperCamelCase : Dict = list(soup.select(""".eZt8xd"""))[:5]
print(len(links))
for link in links:
if link.text == "Maps":
webbrowser.open(link.get("""href"""))
else:
webbrowser.open(f"""https://google.com{link.get("href")}""")
| 37 |
import os
from typing import Optional
import fsspec
from fsspec.archive import AbstractArchiveFileSystem
from fsspec.utils import DEFAULT_BLOCK_SIZE
class A__ ( A__ ):
"""simple docstring"""
_lowercase = ''
_lowercase = (
None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz
)
_lowercase = None # compression type in fsspec. ex: "gzip"
_lowercase = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz
def __init__( self : List[str] , lowerCamelCase__ : str = "" , lowerCamelCase__ : Optional[str] = None , lowerCamelCase__ : Optional[dict] = None , **lowerCamelCase__ : List[str] ):
super().__init__(self , **lowerCamelCase__ )
# always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode
a__ : str = fsspec.open(
lowerCamelCase__ , mode="rb" , protocol=lowerCamelCase__ , compression=self.compression , client_kwargs={
"requote_redirect_url": False, # see https://github.com/huggingface/datasets/pull/5459
"trust_env": True, # Enable reading proxy env variables.
**(target_options or {}).pop("client_kwargs" , {} ), # To avoid issues if it was already passed.
} , **(target_options or {}) , )
a__ : Optional[int] = os.path.basename(self.file.path.split("::" )[0] )
a__ : int = (
self.compressed_name[: self.compressed_name.rindex("." )]
if "." in self.compressed_name
else self.compressed_name
)
a__ : List[Any] = None
@classmethod
def _UpperCamelCase( cls : int , lowerCamelCase__ : int ):
# compressed file paths are always relative to the archive root
return super()._strip_protocol(lowerCamelCase__ ).lstrip("/" )
def _UpperCamelCase( self : Dict ):
if self.dir_cache is None:
a__ : Dict = {**self.file.fs.info(self.file.path ), "name": self.uncompressed_name}
a__ : int = {f["name"]: f}
def _UpperCamelCase( self : Tuple , lowerCamelCase__ : str ):
return self.file.open().read()
def _UpperCamelCase( self : List[str] , lowerCamelCase__ : str , lowerCamelCase__ : str = "rb" , lowerCamelCase__ : int=None , lowerCamelCase__ : List[str]=True , lowerCamelCase__ : List[str]=None , **lowerCamelCase__ : Optional[Any] , ):
a__ : Optional[int] = self._strip_protocol(lowerCamelCase__ )
if mode != "rb":
raise ValueError(f'''Tried to read with mode {mode} on file {self.file.path} opened with mode \'rb\'''' )
return self.file.open()
class A__ ( A__ ):
"""simple docstring"""
_lowercase = 'bz2'
_lowercase = 'bz2'
_lowercase = '.bz2'
class A__ ( A__ ):
"""simple docstring"""
_lowercase = 'gzip'
_lowercase = 'gzip'
_lowercase = '.gz'
class A__ ( A__ ):
"""simple docstring"""
_lowercase = 'lz4'
_lowercase = 'lz4'
_lowercase = '.lz4'
class A__ ( A__ ):
"""simple docstring"""
_lowercase = 'xz'
_lowercase = 'xz'
_lowercase = '.xz'
class A__ ( A__ ):
"""simple docstring"""
_lowercase = 'zstd'
_lowercase = 'zstd'
_lowercase = '.zst'
def __init__( self : Optional[int] , lowerCamelCase__ : str , lowerCamelCase__ : str = "rb" , lowerCamelCase__ : Optional[str] = None , lowerCamelCase__ : Optional[dict] = None , lowerCamelCase__ : int = DEFAULT_BLOCK_SIZE , **lowerCamelCase__ : Tuple , ):
super().__init__(
fo=lowerCamelCase__ , mode=lowerCamelCase__ , target_protocol=lowerCamelCase__ , target_options=lowerCamelCase__ , block_size=lowerCamelCase__ , **lowerCamelCase__ , )
# We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2:
#
# File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open
# out.close = close
# AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only
#
# see https://github.com/intake/filesystem_spec/issues/725
a__ : Any = self.file.__enter__
class A__ :
"""simple docstring"""
def __init__( self : Optional[int] , lowerCamelCase__ : str ):
a__ : List[Any] = file_
def __enter__( self : str ):
self._file.__enter__()
return self
def __exit__( self : int , *lowerCamelCase__ : List[str] , **lowerCamelCase__ : int ):
self._file.__exit__(*lowerCamelCase__ , **lowerCamelCase__ )
def __iter__( self : List[str] ):
return iter(self._file )
def _UpperCamelCase( self : Any ):
return next(self._file )
def __getattr__( self : Optional[Any] , lowerCamelCase__ : Tuple ):
return getattr(self._file , lowerCamelCase__ )
def fixed_enter(*lowerCamelCase__ : List[str] , **lowerCamelCase__ : str ):
return WrappedFile(_enter(*lowerCamelCase__ , **lowerCamelCase__ ) )
a__ : Any = fixed_enter
| 37 | 1 |
'''simple docstring'''
import subprocess
import sys
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
from transformers.testing_utils import TestCasePlus, require_torch
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
@require_torch
def A__ ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
lowercase : Any ='''
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
'''
lowercase : Optional[int] ='''
mname = "hf-internal-testing/tiny-random-bert"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
BertTokenizer.from_pretrained(mname)
pipe = pipeline(task="fill-mask", model=mname)
print("success")
'''
lowercase : Any ='''
import socket
def offline_socket(*args, **kwargs): raise RuntimeError("Offline mode is enabled, we shouldn\'t access internet")
socket.socket = offline_socket
'''
# Force fetching the files so that we can use the cache
lowercase : Tuple ='''hf-internal-testing/tiny-random-bert'''
BertConfig.from_pretrained(UpperCAmelCase )
BertModel.from_pretrained(UpperCAmelCase )
BertTokenizer.from_pretrained(UpperCAmelCase )
pipeline(task='''fill-mask''' , model=UpperCAmelCase )
# baseline - just load from_pretrained with normal network
lowercase : List[str] =[sys.executable, '''-c''', '''\n'''.join([load, run, mock] )]
# should succeed
lowercase : Tuple =self.get_env()
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
lowercase : Optional[Any] ='''1'''
lowercase : Any =subprocess.run(UpperCAmelCase , env=UpperCAmelCase , check=UpperCAmelCase , capture_output=UpperCAmelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
@require_torch
def A__ ( self : str ) -> List[str]:
'''simple docstring'''
lowercase : str ='''
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
'''
lowercase : Optional[Any] ='''
mname = "hf-internal-testing/tiny-random-bert"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
BertTokenizer.from_pretrained(mname)
pipe = pipeline(task="fill-mask", model=mname)
print("success")
'''
lowercase : Optional[int] ='''
import socket
def offline_socket(*args, **kwargs): raise socket.error("Faking flaky internet")
socket.socket = offline_socket
'''
# Force fetching the files so that we can use the cache
lowercase : Optional[Any] ='''hf-internal-testing/tiny-random-bert'''
BertConfig.from_pretrained(UpperCAmelCase )
BertModel.from_pretrained(UpperCAmelCase )
BertTokenizer.from_pretrained(UpperCAmelCase )
pipeline(task='''fill-mask''' , model=UpperCAmelCase )
# baseline - just load from_pretrained with normal network
lowercase : Optional[Any] =[sys.executable, '''-c''', '''\n'''.join([load, run, mock] )]
# should succeed
lowercase : str =self.get_env()
lowercase : Any =subprocess.run(UpperCAmelCase , env=UpperCAmelCase , check=UpperCAmelCase , capture_output=UpperCAmelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
@require_torch
def A__ ( self : Any ) -> Optional[Any]:
'''simple docstring'''
lowercase : Optional[Any] ='''
from transformers import BertConfig, BertModel, BertTokenizer
'''
lowercase : List[Any] ='''
mname = "hf-internal-testing/tiny-random-bert-sharded"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
print("success")
'''
lowercase : int ='''
import socket
def offline_socket(*args, **kwargs): raise ValueError("Offline mode is enabled")
socket.socket = offline_socket
'''
# baseline - just load from_pretrained with normal network
lowercase : Tuple =[sys.executable, '''-c''', '''\n'''.join([load, run] )]
# should succeed
lowercase : Optional[Any] =self.get_env()
lowercase : List[Any] =subprocess.run(UpperCAmelCase , env=UpperCAmelCase , check=UpperCAmelCase , capture_output=UpperCAmelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
# next emulate no network
lowercase : Tuple =[sys.executable, '''-c''', '''\n'''.join([load, mock, run] )]
# Doesn't fail anymore since the model is in the cache due to other tests, so commenting this.
# env["TRANSFORMERS_OFFLINE"] = "0"
# result = subprocess.run(cmd, env=env, check=False, capture_output=True)
# self.assertEqual(result.returncode, 1, result.stderr)
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
lowercase : Any ='''1'''
lowercase : Optional[Any] =subprocess.run(UpperCAmelCase , env=UpperCAmelCase , check=UpperCAmelCase , capture_output=UpperCAmelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
@require_torch
def A__ ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
lowercase : Optional[int] ='''
from transformers import pipeline
'''
lowercase : List[Any] ='''
mname = "hf-internal-testing/tiny-random-bert"
pipe = pipeline(model=mname)
'''
lowercase : Tuple ='''
import socket
def offline_socket(*args, **kwargs): raise socket.error("Offline mode is enabled")
socket.socket = offline_socket
'''
lowercase : Tuple =self.get_env()
lowercase : Optional[int] ='''1'''
lowercase : Union[str, Any] =[sys.executable, '''-c''', '''\n'''.join([load, mock, run] )]
lowercase : Dict =subprocess.run(UpperCAmelCase , env=UpperCAmelCase , check=UpperCAmelCase , capture_output=UpperCAmelCase )
self.assertEqual(result.returncode , 1 , result.stderr )
self.assertIn(
'''You cannot infer task automatically within `pipeline` when using offline mode''' , result.stderr.decode().replace('''\n''' , '''''' ) , )
@require_torch
def A__ ( self : int ) -> Optional[int]:
'''simple docstring'''
lowercase : List[str] ='''
from transformers import AutoModel
'''
lowercase : Dict ='''
mname = "hf-internal-testing/test_dynamic_model"
AutoModel.from_pretrained(mname, trust_remote_code=True)
print("success")
'''
# baseline - just load from_pretrained with normal network
lowercase : Dict =[sys.executable, '''-c''', '''\n'''.join([load, run] )]
# should succeed
lowercase : Optional[Any] =self.get_env()
lowercase : int =subprocess.run(UpperCAmelCase , env=UpperCAmelCase , check=UpperCAmelCase , capture_output=UpperCAmelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
lowercase : List[str] ='''1'''
lowercase : List[Any] =subprocess.run(UpperCAmelCase , env=UpperCAmelCase , check=UpperCAmelCase , capture_output=UpperCAmelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
| 704 |
'''simple docstring'''
SCREAMING_SNAKE_CASE = 'Alexander Joslin'
import operator as op
from .stack import Stack
def lowercase_ ( __A : str ) -> int:
"""simple docstring"""
lowercase : int ={'''*''': op.mul, '''/''': op.truediv, '''+''': op.add, '''-''': op.sub}
lowercase : Stack[int] =Stack()
lowercase : Stack[str] =Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(__A ) )
elif i in operators:
# RULE 2
operator_stack.push(__A )
elif i == ")":
# RULE 4
lowercase : Optional[Any] =operator_stack.peek()
operator_stack.pop()
lowercase : Optional[Any] =operand_stack.peek()
operand_stack.pop()
lowercase : Optional[Any] =operand_stack.peek()
operand_stack.pop()
lowercase : List[str] =operators[opr](__A , __A )
operand_stack.push(__A )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = '(5 + ((4 * 2) * (2 + 3)))'
# answer = 45
print(f"""{equation} = {dijkstras_two_stack_algorithm(equation)}""")
| 8 | 0 |
"""simple docstring"""
import re
from typing import Callable, List, Optional, Union
import tensorflow as tf
try:
from tensorflow.keras.optimizers.legacy import Adam
except ImportError:
from tensorflow.keras.optimizers import Adam
class A_ ( tf.keras.optimizers.schedules.LearningRateSchedule ):
def __init__( self: List[str] ,__lowerCAmelCase: float ,__lowerCAmelCase: Callable ,__lowerCAmelCase: int ,__lowerCAmelCase: float = 1.0 ,__lowerCAmelCase: str = None ,):
'''simple docstring'''
super().__init__()
_lowerCamelCase : str = initial_learning_rate
_lowerCamelCase : Optional[Any] = warmup_steps
_lowerCamelCase : Dict = power
_lowerCamelCase : List[Any] = decay_schedule_fn
_lowerCamelCase : Tuple = name
def __call__( self: List[str] ,__lowerCAmelCase: int ):
'''simple docstring'''
with tf.name_scope(self.name or "WarmUp" ) as name:
# Implements polynomial warmup. i.e., if global_step < warmup_steps, the
# learning rate will be `global_step/num_warmup_steps * init_lr`.
_lowerCamelCase : List[Any] = tf.cast(__lowerCAmelCase ,tf.floataa )
_lowerCamelCase : Optional[int] = tf.cast(self.warmup_steps ,tf.floataa )
_lowerCamelCase : Union[str, Any] = global_step_float / warmup_steps_float
_lowerCamelCase : Dict = self.initial_learning_rate * tf.math.pow(__lowerCAmelCase ,self.power )
return tf.cond(
global_step_float < warmup_steps_float ,lambda: warmup_learning_rate ,lambda: self.decay_schedule_fn(step - self.warmup_steps ) ,name=__lowerCAmelCase ,)
def _lowercase ( self: Any ):
'''simple docstring'''
return {
"initial_learning_rate": self.initial_learning_rate,
"decay_schedule_fn": self.decay_schedule_fn,
"warmup_steps": self.warmup_steps,
"power": self.power,
"name": self.name,
}
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = 0.0 , _lowerCamelCase = 0.9 , _lowerCamelCase = 0.9_9_9 , _lowerCamelCase = 1e-8 , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = 0.0 , _lowerCamelCase = 1.0 , _lowerCamelCase = None , ) -> Any:
'''simple docstring'''
_lowerCamelCase : Dict = tf.keras.optimizers.schedules.PolynomialDecay(
initial_learning_rate=_lowerCamelCase , decay_steps=num_train_steps - num_warmup_steps , end_learning_rate=init_lr * min_lr_ratio , power=_lowerCamelCase , )
if num_warmup_steps:
_lowerCamelCase : List[str] = WarmUp(
initial_learning_rate=_lowerCamelCase , decay_schedule_fn=_lowerCamelCase , warmup_steps=_lowerCamelCase , )
if weight_decay_rate > 0.0:
_lowerCamelCase : List[str] = AdamWeightDecay(
learning_rate=_lowerCamelCase , weight_decay_rate=_lowerCamelCase , beta_a=_lowerCamelCase , beta_a=_lowerCamelCase , epsilon=_lowerCamelCase , clipnorm=_lowerCamelCase , global_clipnorm=_lowerCamelCase , exclude_from_weight_decay=["LayerNorm", "layer_norm", "bias"] , include_in_weight_decay=_lowerCamelCase , )
else:
_lowerCamelCase : Optional[int] = tf.keras.optimizers.Adam(
learning_rate=_lowerCamelCase , beta_a=_lowerCamelCase , beta_a=_lowerCamelCase , epsilon=_lowerCamelCase , clipnorm=_lowerCamelCase , global_clipnorm=_lowerCamelCase , )
# We return the optimizer and the LR scheduler in order to better track the
# evolution of the LR independently of the optimizer.
return optimizer, lr_schedule
class A_ ( _a ):
def __init__( self: Optional[int] ,__lowerCAmelCase: Union[float, tf.keras.optimizers.schedules.LearningRateSchedule] = 0.0_01 ,__lowerCAmelCase: float = 0.9 ,__lowerCAmelCase: float = 0.9_99 ,__lowerCAmelCase: float = 1e-7 ,__lowerCAmelCase: bool = False ,__lowerCAmelCase: float = 0.0 ,__lowerCAmelCase: Optional[List[str]] = None ,__lowerCAmelCase: Optional[List[str]] = None ,__lowerCAmelCase: str = "AdamWeightDecay" ,**__lowerCAmelCase: Tuple ,):
'''simple docstring'''
super().__init__(__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,**__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = weight_decay_rate
_lowerCamelCase : Union[str, Any] = include_in_weight_decay
_lowerCamelCase : Dict = exclude_from_weight_decay
@classmethod
def _lowercase ( cls: Union[str, Any] ,__lowerCAmelCase: Union[str, Any] ):
'''simple docstring'''
_lowerCamelCase : List[Any] = {"WarmUp": WarmUp}
return super(__lowerCAmelCase ,cls ).from_config(__lowerCAmelCase ,custom_objects=__lowerCAmelCase )
def _lowercase ( self: List[str] ,__lowerCAmelCase: Dict ,__lowerCAmelCase: Tuple ,__lowerCAmelCase: Tuple ):
'''simple docstring'''
super(__lowerCAmelCase ,self )._prepare_local(__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase )
_lowerCamelCase : Optional[int] = tf.constant(
self.weight_decay_rate ,name="adam_weight_decay_rate" )
def _lowercase ( self: Any ,__lowerCAmelCase: Any ,__lowerCAmelCase: Optional[Any] ,__lowerCAmelCase: Optional[Any] ):
'''simple docstring'''
_lowerCamelCase : Dict = self._do_use_weight_decay(var.name )
if do_decay:
return var.assign_sub(
learning_rate * var * apply_state[(var.device, var.dtype.base_dtype)]["weight_decay_rate"] ,use_locking=self._use_locking ,)
return tf.no_op()
def _lowercase ( self: List[str] ,__lowerCAmelCase: Tuple ,__lowerCAmelCase: Union[str, Any]=None ,**__lowerCAmelCase: Union[str, Any] ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase : int = list(zip(*__lowerCAmelCase ) )
return super(__lowerCAmelCase ,self ).apply_gradients(zip(__lowerCAmelCase ,__lowerCAmelCase ) ,name=__lowerCAmelCase ,**__lowerCAmelCase )
def _lowercase ( self: Union[str, Any] ,__lowerCAmelCase: Optional[Any] ,__lowerCAmelCase: Dict ,__lowerCAmelCase: Optional[int] ):
'''simple docstring'''
if apply_state is None:
return self._decayed_lr_t[var_dtype], {}
_lowerCamelCase : int = apply_state or {}
_lowerCamelCase : List[Any] = apply_state.get((var_device, var_dtype) )
if coefficients is None:
_lowerCamelCase : Optional[Any] = self._fallback_apply_state(__lowerCAmelCase ,__lowerCAmelCase )
_lowerCamelCase : Optional[int] = coefficients
return coefficients["lr_t"], {"apply_state": apply_state}
def _lowercase ( self: Optional[int] ,__lowerCAmelCase: int ,__lowerCAmelCase: List[Any] ,__lowerCAmelCase: Union[str, Any]=None ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase : Dict = self._get_lr(var.device ,var.dtype.base_dtype ,__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = self._decay_weights_op(__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase )
with tf.control_dependencies([decay] ):
return super(__lowerCAmelCase ,self )._resource_apply_dense(__lowerCAmelCase ,__lowerCAmelCase ,**__lowerCAmelCase )
def _lowercase ( self: List[str] ,__lowerCAmelCase: Union[str, Any] ,__lowerCAmelCase: Optional[Any] ,__lowerCAmelCase: Any ,__lowerCAmelCase: Tuple=None ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase : Tuple = self._get_lr(var.device ,var.dtype.base_dtype ,__lowerCAmelCase )
_lowerCamelCase : Tuple = self._decay_weights_op(__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase )
with tf.control_dependencies([decay] ):
return super(__lowerCAmelCase ,self )._resource_apply_sparse(__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,**__lowerCAmelCase )
def _lowercase ( self: int ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = super().get_config()
config.update({"weight_decay_rate": self.weight_decay_rate} )
return config
def _lowercase ( self: Tuple ,__lowerCAmelCase: Any ):
'''simple docstring'''
if self.weight_decay_rate == 0:
return False
if self._include_in_weight_decay:
for r in self._include_in_weight_decay:
if re.search(__lowerCAmelCase ,__lowerCAmelCase ) is not None:
return True
if self._exclude_from_weight_decay:
for r in self._exclude_from_weight_decay:
if re.search(__lowerCAmelCase ,__lowerCAmelCase ) is not None:
return False
return True
class A_ ( _a ):
def __init__( self: Tuple ):
'''simple docstring'''
_lowerCamelCase : str = []
_lowerCamelCase : List[str] = None
@property
def _lowercase ( self: Dict ):
'''simple docstring'''
if self._accum_steps is None:
_lowerCamelCase : Any = tf.Variable(
tf.constant(0 ,dtype=tf.intaa ) ,trainable=__lowerCAmelCase ,synchronization=tf.VariableSynchronization.ON_READ ,aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA ,)
return self._accum_steps.value()
@property
def _lowercase ( self: Dict ):
'''simple docstring'''
if not self._gradients:
raise ValueError("The accumulator should be called first to initialize the gradients" )
return [gradient.value() if gradient is not None else gradient for gradient in self._gradients]
def __call__( self: str ,__lowerCAmelCase: Tuple ):
'''simple docstring'''
if not self._gradients:
_lowerCamelCase : Dict = self.step # Create the step variable.
self._gradients.extend(
[
tf.Variable(
tf.zeros_like(__lowerCAmelCase ) ,trainable=__lowerCAmelCase ,synchronization=tf.VariableSynchronization.ON_READ ,aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA ,)
if gradient is not None
else gradient
for gradient in gradients
] )
if len(__lowerCAmelCase ) != len(self._gradients ):
raise ValueError(F"""Expected {len(self._gradients )} gradients, but got {len(__lowerCAmelCase )}""" )
for accum_gradient, gradient in zip(self._gradients ,__lowerCAmelCase ):
if accum_gradient is not None and gradient is not None:
accum_gradient.assign_add(__lowerCAmelCase )
self._accum_steps.assign_add(1 )
def _lowercase ( self: Optional[int] ):
'''simple docstring'''
if not self._gradients:
return
self._accum_steps.assign(0 )
for gradient in self._gradients:
if gradient is not None:
gradient.assign(tf.zeros_like(__lowerCAmelCase ) ) | 46 |
"""simple docstring"""
def lowerCamelCase_( _lowerCamelCase = 100 ) -> int:
'''simple docstring'''
_lowerCamelCase : List[str] = set()
_lowerCamelCase : Optional[Any] = 0
_lowerCamelCase : Optional[int] = n + 1 # maximum limit
for a in range(2 , _lowerCamelCase ):
for b in range(2 , _lowerCamelCase ):
_lowerCamelCase : List[str] = a**b # calculates the current power
collect_powers.add(_lowerCamelCase ) # adds the result to the set
return len(_lowerCamelCase )
if __name__ == "__main__":
print('''Number of terms ''', solution(int(str(input()).strip()))) | 46 | 1 |
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
A = logging.get_logger(__name__)
class __a ( lowerCamelCase__ ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = ["""pixel_values"""]
def __init__( self , UpperCamelCase__ = True , UpperCamelCase__ = None , UpperCamelCase__ = PILImageResampling.BILINEAR , UpperCamelCase__ = True , UpperCamelCase__ = None , UpperCamelCase__ = True , UpperCamelCase__ = 1 / 255 , UpperCamelCase__ = True , UpperCamelCase__ = None , UpperCamelCase__ = None , **UpperCamelCase__ , ):
super().__init__(**__lowerCamelCase )
SCREAMING_SNAKE_CASE_ : List[Any] = size if size is not None else {'''shortest_edge''': 256}
SCREAMING_SNAKE_CASE_ : Any = get_size_dict(__lowerCamelCase , default_to_square=__lowerCamelCase )
SCREAMING_SNAKE_CASE_ : Optional[Any] = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
SCREAMING_SNAKE_CASE_ : Any = get_size_dict(__lowerCamelCase , param_name='crop_size' )
SCREAMING_SNAKE_CASE_ : Dict = do_resize
SCREAMING_SNAKE_CASE_ : str = size
SCREAMING_SNAKE_CASE_ : Tuple = resample
SCREAMING_SNAKE_CASE_ : List[Any] = do_center_crop
SCREAMING_SNAKE_CASE_ : Dict = crop_size
SCREAMING_SNAKE_CASE_ : Optional[int] = do_rescale
SCREAMING_SNAKE_CASE_ : List[str] = rescale_factor
SCREAMING_SNAKE_CASE_ : List[Any] = do_normalize
SCREAMING_SNAKE_CASE_ : Optional[Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
SCREAMING_SNAKE_CASE_ : str = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = PILImageResampling.BICUBIC , UpperCamelCase__ = None , **UpperCamelCase__ , ):
SCREAMING_SNAKE_CASE_ : Dict = get_size_dict(__lowerCamelCase , default_to_square=__lowerCamelCase )
if "shortest_edge" not in size:
raise ValueError(F'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' )
SCREAMING_SNAKE_CASE_ : str = get_resize_output_image_size(__lowerCamelCase , size=size['shortest_edge'] , default_to_square=__lowerCamelCase )
return resize(__lowerCamelCase , size=__lowerCamelCase , resample=__lowerCamelCase , data_format=__lowerCamelCase , **__lowerCamelCase )
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = None , **UpperCamelCase__ , ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = get_size_dict(__lowerCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(F'''The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}''' )
return center_crop(__lowerCamelCase , size=(size['height'], size['width']) , data_format=__lowerCamelCase , **__lowerCamelCase )
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = None , **UpperCamelCase__ ):
return rescale(__lowerCamelCase , scale=__lowerCamelCase , data_format=__lowerCamelCase , **__lowerCamelCase )
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = None , **UpperCamelCase__ , ):
return normalize(__lowerCamelCase , mean=__lowerCamelCase , std=__lowerCamelCase , data_format=__lowerCamelCase , **__lowerCamelCase )
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = ChannelDimension.FIRST , **UpperCamelCase__ , ):
SCREAMING_SNAKE_CASE_ : Optional[int] = do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE_ : Optional[int] = size if size is not None else self.size
SCREAMING_SNAKE_CASE_ : Union[str, Any] = get_size_dict(__lowerCamelCase , default_to_square=__lowerCamelCase )
SCREAMING_SNAKE_CASE_ : Dict = resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE_ : Union[str, Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
SCREAMING_SNAKE_CASE_ : Any = crop_size if crop_size is not None else self.crop_size
SCREAMING_SNAKE_CASE_ : List[str] = get_size_dict(__lowerCamelCase , param_name='crop_size' )
SCREAMING_SNAKE_CASE_ : List[str] = do_rescale if do_rescale is not None else self.do_rescale
SCREAMING_SNAKE_CASE_ : str = rescale_factor if rescale_factor is not None else self.rescale_factor
SCREAMING_SNAKE_CASE_ : int = do_normalize if do_normalize is not None else self.do_normalize
SCREAMING_SNAKE_CASE_ : Optional[Any] = image_mean if image_mean is not None else self.image_mean
SCREAMING_SNAKE_CASE_ : List[Any] = image_std if image_std is not None else self.image_std
SCREAMING_SNAKE_CASE_ : Any = make_list_of_images(__lowerCamelCase )
if not valid_images(__lowerCamelCase ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [to_numpy_array(__lowerCamelCase ) for image in images]
if do_resize:
SCREAMING_SNAKE_CASE_ : str = [self.resize(image=__lowerCamelCase , size=__lowerCamelCase , resample=__lowerCamelCase ) for image in images]
if do_center_crop:
SCREAMING_SNAKE_CASE_ : List[Any] = [self.center_crop(image=__lowerCamelCase , size=__lowerCamelCase ) for image in images]
if do_rescale:
SCREAMING_SNAKE_CASE_ : Any = [self.rescale(image=__lowerCamelCase , scale=__lowerCamelCase ) for image in images]
if do_normalize:
SCREAMING_SNAKE_CASE_ : Tuple = [self.normalize(image=__lowerCamelCase , mean=__lowerCamelCase , std=__lowerCamelCase ) for image in images]
SCREAMING_SNAKE_CASE_ : int = [to_channel_dimension_format(__lowerCamelCase , __lowerCamelCase ) for image in images]
SCREAMING_SNAKE_CASE_ : int = {'''pixel_values''': images}
return BatchFeature(data=__lowerCamelCase , tensor_type=__lowerCamelCase )
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ = None ):
SCREAMING_SNAKE_CASE_ : Optional[int] = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(__lowerCamelCase ) != len(__lowerCamelCase ):
raise ValueError(
'Make sure that you pass in as many target sizes as the batch dimension of the logits' )
if is_torch_tensor(__lowerCamelCase ):
SCREAMING_SNAKE_CASE_ : str = target_sizes.numpy()
SCREAMING_SNAKE_CASE_ : Optional[Any] = []
for idx in range(len(__lowerCamelCase ) ):
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='bilinear' , align_corners=__lowerCamelCase )
SCREAMING_SNAKE_CASE_ : Optional[Any] = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(__lowerCamelCase )
else:
SCREAMING_SNAKE_CASE_ : Optional[Any] = logits.argmax(dim=1 )
SCREAMING_SNAKE_CASE_ : int = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation | 702 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
A = logging.get_logger(__name__)
A = {'vocab_file': 'spm_char.model'}
A = {
'vocab_file': {
'microsoft/speecht5_asr': 'https://huggingface.co/microsoft/speecht5_asr/resolve/main/spm_char.model',
'microsoft/speecht5_tts': 'https://huggingface.co/microsoft/speecht5_tts/resolve/main/spm_char.model',
'microsoft/speecht5_vc': 'https://huggingface.co/microsoft/speecht5_vc/resolve/main/spm_char.model',
}
}
A = {
'microsoft/speecht5_asr': 1024,
'microsoft/speecht5_tts': 1024,
'microsoft/speecht5_vc': 1024,
}
class __a ( __A ):
'''simple docstring'''
UpperCAmelCase__ : str = VOCAB_FILES_NAMES
UpperCAmelCase__ : List[str] = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ : Dict = ["""input_ids""", """attention_mask"""]
def __init__( self , UpperCamelCase__ , UpperCamelCase__="<s>" , UpperCamelCase__="</s>" , UpperCamelCase__="<unk>" , UpperCamelCase__="<pad>" , UpperCamelCase__ = None , **UpperCamelCase__ , ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , sp_model_kwargs=self.sp_model_kwargs , **UpperCamelCase__ , )
SCREAMING_SNAKE_CASE_ : Optional[Any] = vocab_file
SCREAMING_SNAKE_CASE_ : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(UpperCamelCase__ )
@property
def __snake_case ( self ):
return self.sp_model.get_piece_size()
def __snake_case ( self ):
SCREAMING_SNAKE_CASE_ : List[str] = {self.convert_ids_to_tokens(UpperCamelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
SCREAMING_SNAKE_CASE_ : str = self.__dict__.copy()
SCREAMING_SNAKE_CASE_ : List[str] = None
return state
def __setstate__( self , UpperCamelCase__ ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
SCREAMING_SNAKE_CASE_ : Any = {}
SCREAMING_SNAKE_CASE_ : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __snake_case ( self , UpperCamelCase__ ):
return self.sp_model.encode(UpperCamelCase__ , out_type=UpperCamelCase__ )
def __snake_case ( self , UpperCamelCase__ ):
return self.sp_model.piece_to_id(UpperCamelCase__ )
def __snake_case ( self , UpperCamelCase__ ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.sp_model.IdToPiece(UpperCamelCase__ )
return token
def __snake_case ( self , UpperCamelCase__ ):
SCREAMING_SNAKE_CASE_ : int = []
SCREAMING_SNAKE_CASE_ : int = ''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(UpperCamelCase__ ) + token
SCREAMING_SNAKE_CASE_ : Any = []
else:
current_sub_tokens.append(UpperCamelCase__ )
out_string += self.sp_model.decode(UpperCamelCase__ )
return out_string.strip()
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__=None ):
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ = None , UpperCamelCase__ = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase__ , token_ids_a=UpperCamelCase__ , already_has_special_tokens=UpperCamelCase__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [1]
if token_ids_a is None:
return ([0] * len(UpperCamelCase__ )) + suffix_ones
return ([0] * len(UpperCamelCase__ )) + ([0] * len(UpperCamelCase__ )) + suffix_ones
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ = None ):
if not os.path.isdir(UpperCamelCase__ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
SCREAMING_SNAKE_CASE_ : Optional[int] = os.path.join(
UpperCamelCase__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCamelCase__ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCamelCase__ , 'wb' ) as fi:
SCREAMING_SNAKE_CASE_ : Tuple = self.sp_model.serialized_model_proto()
fi.write(UpperCamelCase__ )
return (out_vocab_file,) | 97 | 0 |
'''simple docstring'''
import math
def lowerCamelCase__ ( A_ ):
return math.sqrt(__UpperCamelCase ) * math.sqrt(__UpperCamelCase ) == num
def lowerCamelCase__ ( A_ ):
UpperCAmelCase_ = 0
UpperCAmelCase_ = n
while left <= right:
UpperCAmelCase_ = (left + right) // 2
if mid**2 == n:
return True
elif mid**2 > n:
UpperCAmelCase_ = mid - 1
else:
UpperCAmelCase_ = mid + 1
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 660 |
'''simple docstring'''
def lowerCamelCase_ ( __UpperCamelCase : int , __UpperCamelCase : int ) -> int:
"""simple docstring"""
while second != 0:
_A = first & second
first ^= second
_A = c << 1
return first
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase = int(input("""Enter the first number: """).strip())
lowerCAmelCase = int(input("""Enter the second number: """).strip())
print(F'{add(first, second) = }')
| 292 | 0 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import DDIMPipeline, DDIMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow, torch_device
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class UpperCAmelCase ( UpperCAmelCase_ , unittest.TestCase ):
_A : Tuple = DDIMPipeline
_A : List[str] = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
_A : Optional[int] = PipelineTesterMixin.required_optional_params - {
"""num_images_per_prompt""",
"""latents""",
"""callback""",
"""callback_steps""",
}
_A : Tuple = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
_A : Union[str, Any] = False
def __lowerCamelCase ( self ):
torch.manual_seed(0 )
__UpperCAmelCase = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('DownBlock2D', 'AttnDownBlock2D') , up_block_types=('AttnUpBlock2D', 'UpBlock2D') , )
__UpperCAmelCase = DDIMScheduler()
__UpperCAmelCase = {'unet': unet, 'scheduler': scheduler}
return components
def __lowerCamelCase ( self , __A , __A=0 ):
if str(__A ).startswith('mps' ):
__UpperCAmelCase = torch.manual_seed(__A )
else:
__UpperCAmelCase = torch.Generator(device=__A ).manual_seed(__A )
__UpperCAmelCase = {
'batch_size': 1,
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
def __lowerCamelCase ( self ):
__UpperCAmelCase = 'cpu'
__UpperCAmelCase = self.get_dummy_components()
__UpperCAmelCase = self.pipeline_class(**__A )
pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
__UpperCAmelCase = self.get_dummy_inputs(__A )
__UpperCAmelCase = pipe(**__A ).images
__UpperCAmelCase = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 32, 32, 3) )
__UpperCAmelCase = np.array(
[1.000E00, 5.717E-01, 4.717E-01, 1.000E00, 0.000E00, 1.000E00, 3.000E-04, 0.000E00, 9.000E-04] )
__UpperCAmelCase = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(__A , 1E-3 )
def __lowerCamelCase ( self ):
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
def __lowerCamelCase ( self ):
super().test_save_load_local(expected_max_difference=3E-3 )
def __lowerCamelCase ( self ):
super().test_save_load_optional_components(expected_max_difference=3E-3 )
def __lowerCamelCase ( self ):
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class UpperCAmelCase ( unittest.TestCase ):
def __lowerCamelCase ( self ):
__UpperCAmelCase = 'google/ddpm-cifar10-32'
__UpperCAmelCase = UNetaDModel.from_pretrained(__A )
__UpperCAmelCase = DDIMScheduler()
__UpperCAmelCase = DDIMPipeline(unet=__A , scheduler=__A )
ddim.to(__A )
ddim.set_progress_bar_config(disable=__A )
__UpperCAmelCase = torch.manual_seed(0 )
__UpperCAmelCase = ddim(generator=__A , eta=0.0 , output_type='numpy' ).images
__UpperCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__UpperCAmelCase = np.array([0.1_7_2_3, 0.1_6_1_7, 0.1_6_0_0, 0.1_6_2_6, 0.1_4_9_7, 0.1_5_1_3, 0.1_5_0_5, 0.1_4_4_2, 0.1_4_5_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __lowerCamelCase ( self ):
__UpperCAmelCase = 'google/ddpm-ema-bedroom-256'
__UpperCAmelCase = UNetaDModel.from_pretrained(__A )
__UpperCAmelCase = DDIMScheduler.from_pretrained(__A )
__UpperCAmelCase = DDIMPipeline(unet=__A , scheduler=__A )
ddpm.to(__A )
ddpm.set_progress_bar_config(disable=__A )
__UpperCAmelCase = torch.manual_seed(0 )
__UpperCAmelCase = ddpm(generator=__A , output_type='numpy' ).images
__UpperCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
__UpperCAmelCase = np.array([0.0_0_6_0, 0.0_2_0_1, 0.0_3_4_4, 0.0_0_2_4, 0.0_0_1_8, 0.0_0_0_2, 0.0_0_2_2, 0.0_0_0_0, 0.0_0_6_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 617 |
'''simple docstring'''
from __future__ import annotations
def _lowerCAmelCase ( _lowerCAmelCase )-> int:
if not nums:
return 0
__UpperCAmelCase = nums[0]
__UpperCAmelCase = 0
for num in nums[1:]:
__UpperCAmelCase , __UpperCAmelCase = (
max_excluding + num,
max(_lowerCAmelCase , _lowerCAmelCase ),
)
return max(_lowerCAmelCase , _lowerCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 617 | 1 |
'''simple docstring'''
import argparse
import re
import requests
import torch
# git clone https://github.com/salesforce/BLIP.git
from models.blip import blip_decoder
from models.blip_itm import blip_itm
from models.blip_vqa import blip_vqa
from PIL import Image
from torchvision import transforms
from torchvision.transforms.functional import InterpolationMode
from transformers import (
BertTokenizer,
BlipConfig,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
)
def A_( A : Optional[int] , A : Optional[Any]):
UpperCamelCase = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg'
UpperCamelCase = Image.open(requests.get(A , stream=A).raw).convert('RGB')
UpperCamelCase = transforms.Compose(
[
transforms.Resize((image_size, image_size) , interpolation=InterpolationMode.BICUBIC),
transforms.ToTensor(),
transforms.Normalize((0.48_145_466, 0.4_578_275, 0.40_821_073) , (0.26_862_954, 0.26_130_258, 0.27_577_711)),
])
UpperCamelCase = transform(A).unsqueeze(0).to(A)
return image
def A_( A : Any):
if "visual_encoder" in key:
UpperCamelCase = re.sub('visual_encoder*' , 'vision_model.encoder' , A)
if "blocks" in key:
UpperCamelCase = re.sub(r'blocks' , 'layers' , A)
if "attn" in key:
UpperCamelCase = re.sub(r'attn' , 'self_attn' , A)
if "norm1" in key:
UpperCamelCase = re.sub(r'norm1' , 'layer_norm1' , A)
if "norm2" in key:
UpperCamelCase = re.sub(r'norm2' , 'layer_norm2' , A)
if "encoder.norm" in key:
UpperCamelCase = re.sub(r'encoder.norm' , 'post_layernorm' , A)
if "encoder.patch_embed.proj" in key:
UpperCamelCase = re.sub(r'encoder.patch_embed.proj' , 'embeddings.patch_embedding' , A)
if "encoder.pos_embed" in key:
UpperCamelCase = re.sub(r'encoder.pos_embed' , 'embeddings.position_embedding' , A)
if "encoder.cls_token" in key:
UpperCamelCase = re.sub(r'encoder.cls_token' , 'embeddings.class_embedding' , A)
if "self_attn" in key:
UpperCamelCase = re.sub(r'self_attn.proj' , 'self_attn.projection' , A)
return key
@torch.no_grad()
def A_( A : List[str] , A : Any=None):
if config_path is not None:
UpperCamelCase = BlipConfig.from_pretrained(A)
else:
UpperCamelCase = BlipConfig(projection_dim=512 , text_config={} , vision_config={})
UpperCamelCase = BlipForConditionalGeneration(A).eval()
UpperCamelCase = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_capfilt_large.pth'
UpperCamelCase = blip_decoder(pretrained=A , image_size=384 , vit='base')
UpperCamelCase = pt_model.eval()
UpperCamelCase = pt_model.state_dict()
for key in modified_state_dict.copy():
UpperCamelCase = modified_state_dict.pop(A)
UpperCamelCase = rename_key(A)
UpperCamelCase = value
hf_model.load_state_dict(A)
UpperCamelCase = 384
UpperCamelCase = load_demo_image(image_size=A , device='cpu')
UpperCamelCase = BertTokenizer.from_pretrained('bert-base-uncased')
UpperCamelCase = tokenizer(['a picture of']).input_ids
UpperCamelCase = hf_model.generate(A , A)
assert out[0].tolist() == [3_0522, 1037, 3861, 1997, 1037, 2450, 3564, 2006, 1996, 3509, 2007, 2014, 3899, 102]
UpperCamelCase = hf_model.generate(A)
assert out[0].tolist() == [3_0522, 1037, 2450, 3564, 2006, 1996, 3509, 2007, 2014, 3899, 102]
if pytorch_dump_folder_path is not None:
hf_model.save_pretrained(A)
# model_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_vqa.pth'
UpperCamelCase = (
'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_vqa_capfilt_large.pth'
)
UpperCamelCase = blip_vqa(pretrained=A , image_size=A , vit='base')
vqa_model.eval()
UpperCamelCase = vqa_model.state_dict()
for key in modified_state_dict.copy():
UpperCamelCase = modified_state_dict.pop(A)
UpperCamelCase = rename_key(A)
UpperCamelCase = value
UpperCamelCase = BlipForQuestionAnswering(A)
hf_vqa_model.load_state_dict(A)
UpperCamelCase = ['How many dogs are in this image?']
UpperCamelCase = tokenizer(A , return_tensors='pt').input_ids
UpperCamelCase = hf_vqa_model.generate(A , A)
print(tokenizer.decode(answer[0]))
assert tokenizer.decode(answer[0]) == "[UNK] 1 [SEP]"
if pytorch_dump_folder_path is not None:
hf_vqa_model.save_pretrained(pytorch_dump_folder_path + '_vqa')
UpperCamelCase = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_retrieval_coco.pth'
UpperCamelCase = blip_itm(pretrained=A , image_size=A , vit='base')
itm_model.eval()
UpperCamelCase = itm_model.state_dict()
for key in modified_state_dict.copy():
UpperCamelCase = modified_state_dict.pop(A)
UpperCamelCase = rename_key(A)
UpperCamelCase = value
UpperCamelCase = BlipForImageTextRetrieval(A)
UpperCamelCase = ['A picture of a woman with a dog sitting in a beach']
UpperCamelCase = tokenizer(
A , return_tensors='pt' , padding='max_length' , truncation=A , max_length=35 , ).input_ids
hf_itm_model.load_state_dict(A)
hf_itm_model.eval()
UpperCamelCase = hf_itm_model(A , A , use_itm_head=A)
UpperCamelCase = hf_itm_model(A , A , use_itm_head=A)
assert out[0].item() == 0.2_110_687_494_277_954
assert torch.nn.functional.softmax(out_itm[0] , dim=1)[:, 1].item() == 0.45_698_845_386_505_127
if pytorch_dump_folder_path is not None:
hf_itm_model.save_pretrained(pytorch_dump_folder_path + '_itm')
if __name__ == "__main__":
lowerCAmelCase : List[str] = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
lowerCAmelCase : Optional[Any] = parser.parse_args()
convert_blip_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 3 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
A_ = {
"configuration_ctrl": ["CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP", "CTRLConfig"],
"tokenization_ctrl": ["CTRLTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
"CTRL_PRETRAINED_MODEL_ARCHIVE_LIST",
"CTRLForSequenceClassification",
"CTRLLMHeadModel",
"CTRLModel",
"CTRLPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
"TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFCTRLForSequenceClassification",
"TFCTRLLMHeadModel",
"TFCTRLModel",
"TFCTRLPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_ctrl import CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRLConfig
from .tokenization_ctrl import CTRLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ctrl import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
CTRLPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_ctrl import (
TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCTRLForSequenceClassification,
TFCTRLLMHeadModel,
TFCTRLModel,
TFCTRLPreTrainedModel,
)
else:
import sys
A_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 391 | 0 |
"""simple docstring"""
import os
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from huggingface_hub.file_download import http_get
from requests.exceptions import HTTPError
from transformers import (
AlbertTokenizer,
AutoTokenizer,
BertTokenizer,
BertTokenizerFast,
GPTaTokenizerFast,
is_tokenizers_available,
)
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_tokenizers
from transformers.tokenization_utils import Trie
sys.path.append(str(Path(__file__).parent.parent / 'utils'))
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class snake_case ( unittest.TestCase ):
def lowerCamelCase__ ( self : str ):
'''simple docstring'''
a : Optional[int] = mock.Mock()
a : int = 5_0_0
a : Dict = {}
a : Dict = HTTPError
a : Dict = {}
# Download this model to make sure it's in the cache.
a : List[Any] = BertTokenizer.from_pretrained('hf-internal-testing/tiny-random-bert' )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('requests.Session.request' , return_value=A ) as mock_head:
a : Tuple = BertTokenizer.from_pretrained('hf-internal-testing/tiny-random-bert' )
# This check we did call the fake head request
mock_head.assert_called()
@require_tokenizers
def lowerCamelCase__ ( self : List[Any] ):
'''simple docstring'''
a : str = mock.Mock()
a : Tuple = 5_0_0
a : Any = {}
a : Optional[Any] = HTTPError
a : Any = {}
# Download this model to make sure it's in the cache.
a : str = GPTaTokenizerFast.from_pretrained('gpt2' )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('requests.Session.request' , return_value=A ) as mock_head:
a : Any = GPTaTokenizerFast.from_pretrained('gpt2' )
# This check we did call the fake head request
mock_head.assert_called()
def lowerCamelCase__ ( self : List[str] ):
'''simple docstring'''
try:
a : Any = tempfile.mktemp()
with open(A , 'wb' ) as f:
http_get('https://huggingface.co/albert-base-v1/resolve/main/spiece.model' , A )
a : Union[str, Any] = AlbertTokenizer.from_pretrained(A )
finally:
os.remove(A )
# Supporting this legacy load introduced a weird bug where the tokenizer would load local files if they are in
# the current folder and have the right name.
if os.path.isfile('tokenizer.json' ):
# We skip the test if the user has a `tokenizer.json` in this folder to avoid deleting it.
return
try:
with open('tokenizer.json' , 'wb' ) as f:
http_get('https://huggingface.co/hf-internal-testing/tiny-random-bert/blob/main/tokenizer.json' , A )
a : Union[str, Any] = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-gpt2' )
# The tiny random BERT has a vocab size of 1024, tiny gpt2 as a vocab size of 1000
self.assertEqual(tokenizer.vocab_size , 1_0_0_0 )
# Tokenizer should depend on the remote checkpoint, not the local tokenizer.json file.
finally:
os.remove('tokenizer.json' )
def lowerCamelCase__ ( self : Dict ):
'''simple docstring'''
a : Union[str, Any] = AlbertTokenizer.from_pretrained('https://huggingface.co/albert-base-v1/resolve/main/spiece.model' )
@is_staging_test
class snake_case ( unittest.TestCase ):
__magic_name__ = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''bla''', '''blou''']
@classmethod
def lowerCamelCase__ ( cls : List[Any] ):
'''simple docstring'''
a : Optional[Any] = TOKEN
HfFolder.save_token(A )
@classmethod
def lowerCamelCase__ ( cls : Tuple ):
'''simple docstring'''
try:
delete_repo(token=cls._token , repo_id='test-tokenizer' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='valid_org/test-tokenizer-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='test-dynamic-tokenizer' )
except HTTPError:
pass
def lowerCamelCase__ ( self : List[Any] ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
a : Tuple = os.path.join(A , 'vocab.txt' )
with open(A , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
a : List[Any] = BertTokenizer(A )
tokenizer.push_to_hub('test-tokenizer' , use_auth_token=self._token )
a : List[Any] = BertTokenizer.from_pretrained(F'''{USER}/test-tokenizer''' )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
# Reset repo
delete_repo(token=self._token , repo_id='test-tokenizer' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(A , repo_id='test-tokenizer' , push_to_hub=A , use_auth_token=self._token )
a : Optional[Any] = BertTokenizer.from_pretrained(F'''{USER}/test-tokenizer''' )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
def lowerCamelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
a : Optional[int] = os.path.join(A , 'vocab.txt' )
with open(A , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
a : int = BertTokenizer(A )
tokenizer.push_to_hub('valid_org/test-tokenizer-org' , use_auth_token=self._token )
a : int = BertTokenizer.from_pretrained('valid_org/test-tokenizer-org' )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
# Reset repo
delete_repo(token=self._token , repo_id='valid_org/test-tokenizer-org' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(
A , repo_id='valid_org/test-tokenizer-org' , push_to_hub=A , use_auth_token=self._token )
a : List[Any] = BertTokenizer.from_pretrained('valid_org/test-tokenizer-org' )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
@require_tokenizers
def lowerCamelCase__ ( self : List[str] ):
'''simple docstring'''
CustomTokenizer.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
a : int = os.path.join(A , 'vocab.txt' )
with open(A , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
a : Union[str, Any] = CustomTokenizer(A )
# No fast custom tokenizer
tokenizer.push_to_hub('test-dynamic-tokenizer' , use_auth_token=self._token )
a : str = AutoTokenizer.from_pretrained(F'''{USER}/test-dynamic-tokenizer''' , trust_remote_code=A )
# Can't make an isinstance check because the new_model.config is from the CustomTokenizer class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , 'CustomTokenizer' )
# Fast and slow custom tokenizer
CustomTokenizerFast.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
a : Optional[Any] = os.path.join(A , 'vocab.txt' )
with open(A , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
a : str = BertTokenizerFast.from_pretrained(A )
bert_tokenizer.save_pretrained(A )
a : List[str] = CustomTokenizerFast.from_pretrained(A )
tokenizer.push_to_hub('test-dynamic-tokenizer' , use_auth_token=self._token )
a : str = AutoTokenizer.from_pretrained(F'''{USER}/test-dynamic-tokenizer''' , trust_remote_code=A )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , 'CustomTokenizerFast' )
a : str = AutoTokenizer.from_pretrained(
F'''{USER}/test-dynamic-tokenizer''' , use_fast=A , trust_remote_code=A )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , 'CustomTokenizer' )
class snake_case ( unittest.TestCase ):
def lowerCamelCase__ ( self : Optional[Any] ):
'''simple docstring'''
a : int = Trie()
trie.add('Hello 友達' )
self.assertEqual(trie.data , {'H': {'e': {'l': {'l': {'o': {' ': {'友': {'達': {'': 1}}}}}}}}} )
trie.add('Hello' )
trie.data
self.assertEqual(trie.data , {'H': {'e': {'l': {'l': {'o': {'': 1, ' ': {'友': {'達': {'': 1}}}}}}}}} )
def lowerCamelCase__ ( self : List[Any] ):
'''simple docstring'''
a : List[str] = Trie()
self.assertEqual(trie.split('[CLS] This is a extra_id_100' ) , ['[CLS] This is a extra_id_100'] )
trie.add('[CLS]' )
trie.add('extra_id_1' )
trie.add('extra_id_100' )
self.assertEqual(trie.split('[CLS] This is a extra_id_100' ) , ['[CLS]', ' This is a ', 'extra_id_100'] )
def lowerCamelCase__ ( self : List[str] ):
'''simple docstring'''
a : Tuple = Trie()
trie.add('A' )
self.assertEqual(trie.split('ABC' ) , ['A', 'BC'] )
self.assertEqual(trie.split('BCA' ) , ['BC', 'A'] )
def lowerCamelCase__ ( self : List[Any] ):
'''simple docstring'''
a : List[str] = Trie()
trie.add('TOKEN]' )
trie.add('[SPECIAL_TOKEN]' )
self.assertEqual(trie.split('This is something [SPECIAL_TOKEN]' ) , ['This is something ', '[SPECIAL_TOKEN]'] )
def lowerCamelCase__ ( self : Tuple ):
'''simple docstring'''
a : int = Trie()
trie.add('A' )
trie.add('P' )
trie.add('[SPECIAL_TOKEN]' )
self.assertEqual(trie.split('This is something [SPECIAL_TOKEN]' ) , ['This is something ', '[SPECIAL_TOKEN]'] )
def lowerCamelCase__ ( self : Any ):
'''simple docstring'''
a : Optional[Any] = Trie()
trie.add('AB' )
trie.add('B' )
trie.add('C' )
self.assertEqual(trie.split('ABC' ) , ['AB', 'C'] )
def lowerCamelCase__ ( self : List[str] ):
'''simple docstring'''
a : Optional[Any] = Trie()
trie.add('ABC' )
trie.add('B' )
trie.add('CD' )
self.assertEqual(trie.split('ABCD' ) , ['ABC', 'D'] )
def lowerCamelCase__ ( self : List[str] ):
'''simple docstring'''
a : int = Trie()
a : List[Any] = trie.cut_text('ABC' , [0, 0, 2, 1, 2, 3] )
self.assertEqual(A , ['AB', 'C'] )
| 118 |
"""simple docstring"""
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_UpperCamelCase : Optional[Any] = {
'configuration_informer': [
'INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'InformerConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : Tuple = [
'INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'InformerForPrediction',
'InformerModel',
'InformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_informer import INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, InformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_informer import (
INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
InformerForPrediction,
InformerModel,
InformerPreTrainedModel,
)
else:
import sys
_UpperCamelCase : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 118 | 1 |
from __future__ import annotations
__A = 1.6021e-19 # units = C
def __A ( _lowercase , _lowercase , _lowercase , ):
'''simple docstring'''
if (conductivity, electron_conc, mobility).count(0 ) != 1:
raise ValueError('''You cannot supply more or less than 2 values''' )
elif conductivity < 0:
raise ValueError('''Conductivity cannot be negative''' )
elif electron_conc < 0:
raise ValueError('''Electron concentration cannot be negative''' )
elif mobility < 0:
raise ValueError('''mobility cannot be negative''' )
elif conductivity == 0:
return (
"conductivity",
mobility * electron_conc * ELECTRON_CHARGE,
)
elif electron_conc == 0:
return (
"electron_conc",
conductivity / (mobility * ELECTRON_CHARGE),
)
else:
return (
"mobility",
conductivity / (electron_conc * ELECTRON_CHARGE),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 484 |
from scipy.stats import spearmanr
import datasets
__A = '\nThe Spearman rank-order correlation coefficient is a measure of the\nrelationship between two datasets. Like other correlation coefficients,\nthis one varies between -1 and +1 with 0 implying no correlation.\nPositive correlations imply that as data in dataset x increases, so\ndoes data in dataset y. Negative correlations imply that as x increases,\ny decreases. Correlations of -1 or +1 imply an exact monotonic relationship.\n\nUnlike the Pearson correlation, the Spearman correlation does not\nassume that both datasets are normally distributed.\n\nThe p-value roughly indicates the probability of an uncorrelated system\nproducing datasets that have a Spearman correlation at least as extreme\nas the one computed from these datasets. The p-values are not entirely\nreliable but are probably reasonable for datasets larger than 500 or so.\n'
__A = '\nArgs:\n predictions (`List[float]`): Predicted labels, as returned by a model.\n references (`List[float]`): Ground truth labels.\n return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns\n only the spearmanr score. Defaults to `False`.\nReturns:\n spearmanr (`float`): Spearman correlation coefficient.\n p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input.\nExamples:\n Example 1:\n >>> spearmanr_metric = datasets.load_metric("spearmanr")\n >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4])\n >>> print(results)\n {\'spearmanr\': -0.7}\n\n Example 2:\n >>> spearmanr_metric = datasets.load_metric("spearmanr")\n >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5],\n ... predictions=[10, 9, 2.5, 6, 4],\n ... return_pvalue=True)\n >>> print(results[\'spearmanr\'])\n -0.7\n >>> print(round(results[\'spearmanr_pvalue\'], 2))\n 0.19\n'
__A = R'\\n@book{kokoska2000crc,\n title={CRC standard probability and statistics tables and formulae},\n author={Kokoska, Stephen and Zwillinger, Daniel},\n year={2000},\n publisher={Crc Press}\n}\n@article{2020SciPy-NMeth,\n author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and\n Haberland, Matt and Reddy, Tyler and Cournapeau, David and\n Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and\n Bright, Jonathan and {van der Walt}, St{\'e}fan J. and\n Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and\n Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and\n Kern, Robert and Larson, Eric and Carey, C J and\n Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and\n {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and\n Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and\n Harris, Charles R. and Archibald, Anne M. and\n Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and\n {van Mulbregt}, Paul and {SciPy 1.0 Contributors}},\n title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific\n Computing in Python}},\n journal = {Nature Methods},\n year = {2020},\n volume = {17},\n pages = {261--272},\n adsurl = {https://rdcu.be/b08Wh},\n doi = {10.1038/s41592-019-0686-2},\n}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE ( datasets.Metric ):
"""simple docstring"""
def __A ( self: Any ) -> Union[str, Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''float''' ),
'''references''': datasets.Value('''float''' ),
} ) , reference_urls=['''https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html'''] , )
def __A ( self: str , __A: Optional[Any] , __A: Union[str, Any] , __A: Tuple=False ) -> List[Any]:
_A = spearmanr(__A , __A )
if return_pvalue:
return {"spearmanr": results[0], "spearmanr_pvalue": results[1]}
else:
return {"spearmanr": results[0]}
| 484 | 1 |
from __future__ import annotations
from typing import Generic, TypeVar
__UpperCAmelCase : Optional[Any] = TypeVar('T')
class lowerCamelCase ( Generic[T] ):
def __init__( self : List[str] , __snake_case : T ) -> None:
_a : Dict = data
_a : List[str] = self
_a : Tuple = 0
class lowerCamelCase ( Generic[T] ):
def __init__( self : List[Any] ) -> None:
# map from node name to the node object
_a : dict[T, DisjointSetTreeNode[T]] = {}
def snake_case_ ( self : str , __snake_case : T ) -> None:
# create a new set with x as its member
_a : int = DisjointSetTreeNode(__snake_case )
def snake_case_ ( self : Optional[int] , __snake_case : T ) -> DisjointSetTreeNode[T]:
# find the set x belongs to (with path-compression)
_a : Optional[Any] = self.map[data]
if elem_ref != elem_ref.parent:
_a : List[Any] = self.find_set(elem_ref.parent.data )
return elem_ref.parent
def snake_case_ ( self : List[str] , __snake_case : DisjointSetTreeNode[T] , __snake_case : DisjointSetTreeNode[T] ) -> None:
# helper function for union operation
if nodea.rank > nodea.rank:
_a : Union[str, Any] = nodea
else:
_a : List[str] = nodea
if nodea.rank == nodea.rank:
nodea.rank += 1
def snake_case_ ( self : Any , __snake_case : T , __snake_case : T ) -> None:
# merge 2 disjoint sets
self.link(self.find_set(__snake_case ) , self.find_set(__snake_case ) )
class lowerCamelCase ( Generic[T] ):
def __init__( self : Dict ) -> None:
# connections: map from the node to the neighbouring nodes (with weights)
_a : dict[T, dict[T, int]] = {}
def snake_case_ ( self : Tuple , __snake_case : T ) -> None:
# add a node ONLY if its not present in the graph
if node not in self.connections:
_a : List[Any] = {}
def snake_case_ ( self : Any , __snake_case : T , __snake_case : T , __snake_case : int ) -> None:
# add an edge with the given weight
self.add_node(__snake_case )
self.add_node(__snake_case )
_a : Dict = weight
_a : List[Any] = weight
def snake_case_ ( self : Optional[Any] ) -> GraphUndirectedWeighted[T]:
_a : Any = []
_a : Dict = set()
for start in self.connections:
for end in self.connections[start]:
if (start, end) not in seen:
seen.add((end, start) )
edges.append((start, end, self.connections[start][end]) )
edges.sort(key=lambda __snake_case : x[2] )
# creating the disjoint set
_a : Any = DisjointSetTree[T]()
for node in self.connections:
disjoint_set.make_set(__snake_case )
# MST generation
_a : Any = 0
_a : List[Any] = 0
_a : int = GraphUndirectedWeighted[T]()
while num_edges < len(self.connections ) - 1:
_a : Union[str, Any] = edges[index]
index += 1
_a : Any = disjoint_set.find_set(__snake_case )
_a : List[str] = disjoint_set.find_set(__snake_case )
if parent_u != parent_v:
num_edges += 1
graph.add_edge(__snake_case , __snake_case , __snake_case )
disjoint_set.union(__snake_case , __snake_case )
return graph
| 719 |
import unittest
from transformers import LiltConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
)
from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCamelCase :
def __init__( self : Any , __snake_case : int , __snake_case : Optional[Any]=13 , __snake_case : int=7 , __snake_case : Dict=True , __snake_case : str=True , __snake_case : List[str]=True , __snake_case : int=True , __snake_case : str=99 , __snake_case : Dict=24 , __snake_case : int=2 , __snake_case : Dict=6 , __snake_case : str=37 , __snake_case : str="gelu" , __snake_case : List[str]=0.1 , __snake_case : Optional[int]=0.1 , __snake_case : Optional[int]=512 , __snake_case : Any=16 , __snake_case : Optional[int]=2 , __snake_case : List[Any]=0.02 , __snake_case : str=3 , __snake_case : List[Any]=None , __snake_case : Any=1000 , ) -> str:
_a : Dict = parent
_a : Tuple = batch_size
_a : Optional[int] = seq_length
_a : Optional[int] = is_training
_a : Dict = use_input_mask
_a : Optional[Any] = use_token_type_ids
_a : List[Any] = use_labels
_a : List[str] = vocab_size
_a : int = hidden_size
_a : List[str] = num_hidden_layers
_a : Optional[Any] = num_attention_heads
_a : Dict = intermediate_size
_a : str = hidden_act
_a : str = hidden_dropout_prob
_a : Union[str, Any] = attention_probs_dropout_prob
_a : Tuple = max_position_embeddings
_a : List[str] = type_vocab_size
_a : List[Any] = type_sequence_label_size
_a : Optional[int] = initializer_range
_a : str = num_labels
_a : int = scope
_a : Tuple = range_bbox
def snake_case_ ( self : Any ) -> Any:
_a : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_a : str = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
_a : Any = bbox[i, j, 3]
_a : Any = bbox[i, j, 1]
_a : Optional[int] = t
if bbox[i, j, 2] < bbox[i, j, 0]:
_a : int = bbox[i, j, 2]
_a : str = bbox[i, j, 0]
_a : List[Any] = t
_a : Any = None
if self.use_input_mask:
_a : Any = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
_a : Optional[Any] = None
if self.use_token_type_ids:
_a : Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_a : Any = None
_a : Union[str, Any] = None
if self.use_labels:
_a : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_a : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_a : List[Any] = self.get_config()
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels
def snake_case_ ( self : str ) -> List[str]:
return LiltConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def snake_case_ ( self : int , __snake_case : List[str] , __snake_case : str , __snake_case : Union[str, Any] , __snake_case : Optional[Any] , __snake_case : List[Any] , __snake_case : Any , __snake_case : int , ) -> Any:
_a : Union[str, Any] = LiltModel(config=__snake_case )
model.to(__snake_case )
model.eval()
_a : Union[str, Any] = model(__snake_case , bbox=__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case )
_a : List[Any] = model(__snake_case , bbox=__snake_case , token_type_ids=__snake_case )
_a : List[Any] = model(__snake_case , bbox=__snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def snake_case_ ( self : Dict , __snake_case : Dict , __snake_case : List[Any] , __snake_case : Tuple , __snake_case : Dict , __snake_case : Union[str, Any] , __snake_case : List[str] , __snake_case : int , ) -> Tuple:
_a : List[str] = self.num_labels
_a : Optional[Any] = LiltForTokenClassification(config=__snake_case )
model.to(__snake_case )
model.eval()
_a : Optional[Any] = model(
__snake_case , bbox=__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def snake_case_ ( self : Tuple , __snake_case : int , __snake_case : int , __snake_case : List[str] , __snake_case : List[str] , __snake_case : int , __snake_case : Optional[int] , __snake_case : Optional[int] , ) -> Optional[int]:
_a : List[str] = LiltForQuestionAnswering(config=__snake_case )
model.to(__snake_case )
model.eval()
_a : int = model(
__snake_case , bbox=__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , start_positions=__snake_case , end_positions=__snake_case , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def snake_case_ ( self : Any ) -> Optional[int]:
_a : List[Any] = self.prepare_config_and_inputs()
(
(
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) ,
) : Optional[Any] = config_and_inputs
_a : List[Any] = {
'''input_ids''': input_ids,
'''bbox''': bbox,
'''token_type_ids''': token_type_ids,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_torch
class lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
UpperCAmelCase : List[Any] = (
(
LiltModel,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltForQuestionAnswering,
)
if is_torch_available()
else ()
)
UpperCAmelCase : Optional[Any] = (
{
'feature-extraction': LiltModel,
'question-answering': LiltForQuestionAnswering,
'text-classification': LiltForSequenceClassification,
'token-classification': LiltForTokenClassification,
'zero-shot': LiltForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCAmelCase : Any = False
UpperCAmelCase : Optional[Any] = False
def snake_case_ ( self : Optional[int] , __snake_case : List[Any] , __snake_case : Tuple , __snake_case : Tuple , __snake_case : Dict , __snake_case : Optional[int] ) -> List[str]:
return True
def snake_case_ ( self : int ) -> Dict:
_a : Union[str, Any] = LiltModelTester(self )
_a : List[str] = ConfigTester(self , config_class=__snake_case , hidden_size=37 )
def snake_case_ ( self : Dict ) -> Union[str, Any]:
self.config_tester.run_common_tests()
def snake_case_ ( self : str ) -> Tuple:
_a : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__snake_case )
def snake_case_ ( self : str ) -> str:
_a : str = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_a : Any = type
self.model_tester.create_and_check_model(*__snake_case )
def snake_case_ ( self : Optional[Any] ) -> List[Any]:
_a : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__snake_case )
def snake_case_ ( self : List[Any] ) -> List[Any]:
_a : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__snake_case )
@slow
def snake_case_ ( self : Dict ) -> Optional[Any]:
for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_a : List[str] = LiltModel.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
@require_torch
@slow
class lowerCamelCase ( unittest.TestCase ):
def snake_case_ ( self : Optional[Any] ) -> str:
_a : str = LiltModel.from_pretrained('''SCUT-DLVCLab/lilt-roberta-en-base''' ).to(__snake_case )
_a : List[str] = torch.tensor([[1, 2]] , device=__snake_case )
_a : List[Any] = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=__snake_case )
# forward pass
with torch.no_grad():
_a : List[Any] = model(input_ids=__snake_case , bbox=__snake_case )
_a : Optional[Any] = torch.Size([1, 2, 768] )
_a : Optional[Any] = torch.tensor(
[[-0.0_653, 0.0_950, -0.0_061], [-0.0_545, 0.0_926, -0.0_324]] , device=__snake_case , )
self.assertTrue(outputs.last_hidden_state.shape , __snake_case )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , __snake_case , atol=1E-3 ) )
| 249 | 0 |
'''simple docstring'''
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing the experiment tracking capability,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
_lowercase = 16
_lowercase = 32
def lowerCamelCase__ ( a , a = 16 ):
__snake_case = AutoTokenizer.from_pretrained('bert-base-cased' )
__snake_case = load_dataset('glue' , 'mrpc' )
def tokenize_function(a ):
# max_length=None => use the model max length (it's actually the default)
__snake_case = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=a , max_length=a )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
__snake_case = datasets.map(
a , batched=a , remove_columns=['idx', 'sentence1', 'sentence2'] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__snake_case = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(a ):
# On TPU it's best to pad everything to the same length or training will be very slow.
__snake_case = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
__snake_case = 16
elif accelerator.mixed_precision != "no":
__snake_case = 8
else:
__snake_case = None
return tokenizer.pad(
a , padding='longest' , max_length=a , pad_to_multiple_of=a , return_tensors='pt' , )
# Instantiate dataloaders.
__snake_case = DataLoader(
tokenized_datasets['train'] , shuffle=a , collate_fn=a , batch_size=a )
__snake_case = DataLoader(
tokenized_datasets['validation'] , shuffle=a , collate_fn=a , batch_size=a )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
_lowercase = mocked_dataloaders # noqa: F811
def lowerCamelCase__ ( a , a ):
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS' , a ) == "1":
__snake_case = 2
# Initialize Accelerator
# New Code #
# We pass in "all" to `log_with` to grab all available trackers in the environment
# Note: If using a custom `Tracker` class, should be passed in here such as:
# >>> log_with = ["all", MyCustomTrackerClassInstance()]
if args.with_tracking:
__snake_case = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with='all' , project_dir=args.project_dir )
else:
__snake_case = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__snake_case = config['lr']
__snake_case = int(config['num_epochs'] )
__snake_case = int(config['seed'] )
__snake_case = int(config['batch_size'] )
set_seed(a )
__snake_case , __snake_case = get_dataloaders(a , a )
__snake_case = evaluate.load('glue' , 'mrpc' )
# If the batch size is too big we use gradient accumulation
__snake_case = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
__snake_case = batch_size // MAX_GPU_BATCH_SIZE
__snake_case = MAX_GPU_BATCH_SIZE
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__snake_case = AutoModelForSequenceClassification.from_pretrained('bert-base-cased' , return_dict=a )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
__snake_case = model.to(accelerator.device )
# Instantiate optimizer
__snake_case = AdamW(params=model.parameters() , lr=a )
# Instantiate scheduler
__snake_case = get_linear_schedule_with_warmup(
optimizer=a , num_warmup_steps=100 , num_training_steps=(len(a ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case = accelerator.prepare(
a , a , a , a , a )
# New Code #
# We need to initialize the trackers we use. Overall configurations can also be stored
if args.with_tracking:
__snake_case = os.path.split(a )[-1].split('.' )[0]
accelerator.init_trackers(a , a )
# Now we train the model
for epoch in range(a ):
model.train()
# New Code #
# For our tracking example, we will log the total loss of each epoch
if args.with_tracking:
__snake_case = 0
for step, batch in enumerate(a ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
__snake_case = model(**a )
__snake_case = outputs.loss
# New Code #
if args.with_tracking:
total_loss += loss.detach().float()
__snake_case = loss / gradient_accumulation_steps
accelerator.backward(a )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(a ):
# We could avoid this line since we set the accelerator with `device_placement=True` (the default).
batch.to(accelerator.device )
with torch.no_grad():
__snake_case = model(**a )
__snake_case = outputs.logits.argmax(dim=-1 )
__snake_case , __snake_case = accelerator.gather_for_metrics((predictions, batch['labels']) )
metric.add_batch(
predictions=a , references=a , )
__snake_case = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'epoch {epoch}:' , a )
# New Code #
# To actually log, we call `Accelerator.log`
# The values passed can be of `str`, `int`, `float` or `dict` of `str` to `float`/`int`
if args.with_tracking:
accelerator.log(
{
'accuracy': eval_metric['accuracy'],
'f1': eval_metric['f1'],
'train_loss': total_loss.item() / len(a ),
'epoch': epoch,
} , step=a , )
# New Code #
# When a run is finished, you should call `accelerator.end_training()`
# to close all of the open trackers
if args.with_tracking:
accelerator.end_training()
def lowerCamelCase__ ( ):
__snake_case = argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument(
'--mixed_precision' , type=a , default=a , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.' , )
parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' )
parser.add_argument(
'--with_tracking' , action='store_true' , help='Whether to load in all available experiment trackers from the environment and use them for logging.' , )
parser.add_argument(
'--project_dir' , type=a , default='logs' , help='Location on where to store experiment tracking logs` and relevent project information' , )
__snake_case = parser.parse_args()
__snake_case = {'lr': 2E-5, 'num_epochs': 3, 'seed': 42, 'batch_size': 16}
training_function(a , a )
if __name__ == "__main__":
main()
| 356 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_lowercase = {
"""configuration_biogpt""": ["""BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """BioGptConfig"""],
"""tokenization_biogpt""": ["""BioGptTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
"""BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BioGptForCausalLM""",
"""BioGptForTokenClassification""",
"""BioGptForSequenceClassification""",
"""BioGptModel""",
"""BioGptPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig
from .tokenization_biogpt import BioGptTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_biogpt import (
BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST,
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptPreTrainedModel,
)
else:
import sys
_lowercase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 356 | 1 |
from __future__ import annotations
import time
import numpy as np
__A : int = [8, 5, 9, 7]
__A : Any = [
[2, 0, 1, 1],
[0, 1, 2, 1],
[4, 0, 0, 3],
[0, 2, 1, 0],
[1, 0, 3, 0],
]
__A : List[str] = [
[3, 2, 1, 4],
[0, 2, 5, 2],
[5, 1, 0, 5],
[1, 5, 3, 0],
[3, 0, 3, 3],
]
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self : List[str] , a : list[int] , a : list[list[int]] , a : list[list[int]] , ) -> None:
SCREAMING_SNAKE_CASE = claim_vector
SCREAMING_SNAKE_CASE = allocated_resources_table
SCREAMING_SNAKE_CASE = maximum_claim_table
def _UpperCAmelCase ( self : List[str] ) -> list[int]:
return [
sum(p_item[i] for p_item in self.__allocated_resources_table )
for i in range(len(self.__allocated_resources_table[0] ) )
]
def _UpperCAmelCase ( self : List[Any] ) -> list[int]:
return np.array(self.__claim_vector ) - np.array(
self.__processes_resource_summation() )
def _UpperCAmelCase ( self : Tuple ) -> list[list[int]]:
return [
list(np.array(self.__maximum_claim_table[i] ) - np.array(a ) )
for i, allocated_resource in enumerate(self.__allocated_resources_table )
]
def _UpperCAmelCase ( self : Tuple ) -> dict[int, list[int]]:
return {self.__need().index(a ): i for i in self.__need()}
def _UpperCAmelCase ( self : List[str] , **a : Optional[int] ) -> None:
SCREAMING_SNAKE_CASE = self.__need()
SCREAMING_SNAKE_CASE = self.__allocated_resources_table
SCREAMING_SNAKE_CASE = self.__available_resources()
SCREAMING_SNAKE_CASE = self.__need_index_manager()
for kw, val in kwargs.items():
if kw and val is True:
self.__pretty_data()
print("""_""" * 50 + """\n""" )
while need_list:
SCREAMING_SNAKE_CASE = False
for each_need in need_list:
SCREAMING_SNAKE_CASE = True
for index, need in enumerate(a ):
if need > available_resources[index]:
SCREAMING_SNAKE_CASE = False
break
if execution:
SCREAMING_SNAKE_CASE = True
# get the original index of the process from ind_ctrl db
for original_need_index, need_clone in need_index_manager.items():
if each_need == need_clone:
SCREAMING_SNAKE_CASE = original_need_index
print(f"""Process {process_number + 1} is executing.""" )
# remove the process run from stack
need_list.remove(a )
# update available/freed resources stack
SCREAMING_SNAKE_CASE = np.array(a ) + np.array(
alloc_resources_table[process_number] )
print(
"""Updated available resource stack for processes: """
+ """ """.join([str(a ) for x in available_resources] ) )
break
if safe:
print("""The process is in a safe state.\n""" )
else:
print("""System in unsafe state. Aborting...\n""" )
break
def _UpperCAmelCase ( self : Tuple ) -> str:
print(""" """ * 9 + """Allocated Resource Table""" )
for item in self.__allocated_resources_table:
print(
f"""P{self.__allocated_resources_table.index(a ) + 1}"""
+ """ """.join(f"""{it:>8}""" for it in item )
+ """\n""" )
print(""" """ * 9 + """System Resource Table""" )
for item in self.__maximum_claim_table:
print(
f"""P{self.__maximum_claim_table.index(a ) + 1}"""
+ """ """.join(f"""{it:>8}""" for it in item )
+ """\n""" )
print(
"""Current Usage by Active Processes: """
+ """ """.join(str(a ) for x in self.__claim_vector ) )
print(
"""Initial Available Resources: """
+ """ """.join(str(a ) for x in self.__available_resources() ) )
time.sleep(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 450 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class UpperCAmelCase_ ( A ):
'''simple docstring'''
a__ = ['''image_processor''', '''tokenizer''']
a__ = '''ViTImageProcessor'''
a__ = ('''CLIPTokenizer''', '''CLIPTokenizerFast''')
def __init__( self : List[str] , a : Dict=None , a : List[str]=None , **a : List[str] ) -> Optional[Any]:
SCREAMING_SNAKE_CASE = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , a , )
SCREAMING_SNAKE_CASE = kwargs.pop("""feature_extractor""" )
SCREAMING_SNAKE_CASE = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(a , a )
def __call__( self : List[Any] , a : int=None , a : Dict=None , a : int=None , a : Tuple=None , **a : Any ) -> Any:
if text is None and visual_prompt is None and images is None:
raise ValueError("""You have to specify either text, visual prompt or images.""" )
if text is not None and visual_prompt is not None:
raise ValueError("""You have to specify exactly one type of prompt. Either text or visual prompt.""" )
if text is not None:
SCREAMING_SNAKE_CASE = self.tokenizer(a , return_tensors=a , **a )
if visual_prompt is not None:
SCREAMING_SNAKE_CASE = self.image_processor(a , return_tensors=a , **a )
if images is not None:
SCREAMING_SNAKE_CASE = self.image_processor(a , return_tensors=a , **a )
if visual_prompt is not None and images is not None:
SCREAMING_SNAKE_CASE = {
"""pixel_values""": image_features.pixel_values,
"""conditional_pixel_values""": prompt_features.pixel_values,
}
return encoding
elif text is not None and images is not None:
SCREAMING_SNAKE_CASE = image_features.pixel_values
return encoding
elif text is not None:
return encoding
elif visual_prompt is not None:
SCREAMING_SNAKE_CASE = {
"""conditional_pixel_values""": prompt_features.pixel_values,
}
return encoding
else:
return BatchEncoding(data=dict(**a ) , tensor_type=a )
def _UpperCAmelCase ( self : Dict , *a : Dict , **a : Union[str, Any] ) -> Any:
return self.tokenizer.batch_decode(*a , **a )
def _UpperCAmelCase ( self : Tuple , *a : List[Any] , **a : Union[str, Any] ) -> Dict:
return self.tokenizer.decode(*a , **a )
@property
def _UpperCAmelCase ( self : Any ) -> Optional[Any]:
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , a , )
return self.image_processor_class
@property
def _UpperCAmelCase ( self : Optional[int] ) -> str:
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , a , )
return self.image_processor
| 450 | 1 |
"""simple docstring"""
import inspect
import os
import unittest
from dataclasses import dataclass
import torch
from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs
from accelerate.state import AcceleratorState
from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu
from accelerate.utils import KwargsHandler
@dataclass
class a ( a__ ):
snake_case__ = 0
snake_case__ = False
snake_case__ = 3.0
class a ( unittest.TestCase ):
def UpperCamelCase__ ( self ):
"""simple docstring"""
self.assertDictEqual(MockClass().to_kwargs() , {} )
self.assertDictEqual(MockClass(a=2 ).to_kwargs() , {'a': 2} )
self.assertDictEqual(MockClass(a=2 , b=_snake_case ).to_kwargs() , {'a': 2, 'b': True} )
self.assertDictEqual(MockClass(a=2 , c=2.25 ).to_kwargs() , {'a': 2, 'c': 2.25} )
@require_cuda
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = GradScalerKwargs(init_scale=10_24 , growth_factor=2 )
AcceleratorState._reset_state()
lowerCAmelCase = Accelerator(mixed_precision='fp16' , kwargs_handlers=[scaler_handler] )
print(accelerator.use_fpaa )
lowerCAmelCase = accelerator.scaler
# Check the kwargs have been applied
self.assertEqual(scaler._init_scale , 1_024.0 )
self.assertEqual(scaler._growth_factor , 2.0 )
# Check the other values are at the default
self.assertEqual(scaler._backoff_factor , 0.5 )
self.assertEqual(scaler._growth_interval , 20_00 )
self.assertEqual(scaler._enabled , _snake_case )
@require_multi_gpu
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = ['torchrun', F'--nproc_per_node={torch.cuda.device_count()}', inspect.getfile(self.__class__ )]
execute_subprocess_async(_snake_case , env=os.environ.copy() )
if __name__ == "__main__":
__UpperCamelCase : List[str] = DistributedDataParallelKwargs(bucket_cap_mb=15, find_unused_parameters=True)
__UpperCamelCase : Optional[Any] = Accelerator(kwargs_handlers=[ddp_scaler])
__UpperCamelCase : Dict = torch.nn.Linear(100, 200)
__UpperCamelCase : Optional[Any] = accelerator.prepare(model)
# Check the values changed in kwargs
__UpperCamelCase : Dict = ''''''
__UpperCamelCase : Tuple = model.bucket_bytes_cap // (1024 * 1024)
if observed_bucket_cap_map != 15:
error_msg += f"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n"
if model.find_unused_parameters is not True:
error_msg += f"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n"
# Check the values of the defaults
if model.dim != 0:
error_msg += f"Default value not respected, should have `0` but found {model.dim}.\n"
if model.broadcast_buffers is not True:
error_msg += f"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n"
if model.gradient_as_bucket_view is not False:
error_msg += f"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n"
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 4 |
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ASTConfig
from transformers.testing_utils import require_torch, require_torchaudio, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_torchaudio_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ASTForAudioClassification, ASTModel
from transformers.models.audio_spectrogram_transformer.modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_torchaudio_available():
import torchaudio
from transformers import ASTFeatureExtractor
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self , __UpperCamelCase , __UpperCamelCase=13 , __UpperCamelCase=2 , __UpperCamelCase=24 , __UpperCamelCase=16 , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=32 , __UpperCamelCase=5 , __UpperCamelCase=4 , __UpperCamelCase=37 , __UpperCamelCase="gelu" , __UpperCamelCase=0.1 , __UpperCamelCase=0.1 , __UpperCamelCase=10 , __UpperCamelCase=0.02 , __UpperCamelCase=None , __UpperCamelCase=2 , __UpperCamelCase=2 , ):
"""simple docstring"""
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = patch_size
snake_case_ = max_length
snake_case_ = num_mel_bins
snake_case_ = is_training
snake_case_ = use_labels
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = intermediate_size
snake_case_ = hidden_act
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = type_sequence_label_size
snake_case_ = initializer_range
snake_case_ = scope
snake_case_ = frequency_stride
snake_case_ = time_stride
# in AST, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
snake_case_ = (self.num_mel_bins - self.patch_size) // self.frequency_stride + 1
snake_case_ = (self.max_length - self.patch_size) // self.time_stride + 1
snake_case_ = frequency_out_dimension * time_out_dimension
snake_case_ = num_patches + 2
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = floats_tensor([self.batch_size, self.max_length, self.num_mel_bins] )
snake_case_ = None
if self.use_labels:
snake_case_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case_ = self.get_config()
return config, input_values, labels
def __lowerCAmelCase ( self ):
"""simple docstring"""
return ASTConfig(
patch_size=self.patch_size , max_length=self.max_length , num_mel_bins=self.num_mel_bins , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__UpperCamelCase , initializer_range=self.initializer_range , frequency_stride=self.frequency_stride , time_stride=self.time_stride , )
def __lowerCAmelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
snake_case_ = ASTModel(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
snake_case_ = model(__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = self.prepare_config_and_inputs()
(
(
snake_case_
) , (
snake_case_
) , (
snake_case_
) ,
) = config_and_inputs
snake_case_ = {'input_values': input_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE ( __snake_case , __snake_case , unittest.TestCase ):
"""simple docstring"""
__A = (
(
ASTModel,
ASTForAudioClassification,
)
if is_torch_available()
else ()
)
__A = (
{"""audio-classification""": ASTForAudioClassification, """feature-extraction""": ASTModel}
if is_torch_available()
else {}
)
__A = False
__A = False
__A = False
__A = False
def __lowerCAmelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
if pipeline_test_casse_name == "AudioClassificationPipelineTests":
return True
return False
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = ASTModelTester(self )
snake_case_ = ConfigTester(self , config_class=__UpperCamelCase , has_text_modality=__UpperCamelCase , hidden_size=37 )
def __lowerCAmelCase ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='AST does not use inputs_embeds' )
def __lowerCAmelCase ( self ):
"""simple docstring"""
pass
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ = model_class(__UpperCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
snake_case_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__UpperCamelCase , nn.Linear ) )
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ = model_class(__UpperCamelCase )
snake_case_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case_ = [*signature.parameters.keys()]
snake_case_ = ['input_values']
self.assertListEqual(arg_names[:1] , __UpperCamelCase )
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCamelCase )
@slow
def __lowerCAmelCase ( self ):
"""simple docstring"""
for model_name in AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ = ASTModel.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
def a():
'''simple docstring'''
snake_case_ = hf_hub_download(
repo_id='nielsr/audio-spectogram-transformer-checkpoint' , filename='sample_audio.flac' , repo_type='dataset' )
snake_case_ , snake_case_ = torchaudio.load(lowercase__ )
return audio, sampling_rate
@require_torch
@require_torchaudio
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __lowerCAmelCase ( self ):
"""simple docstring"""
return (
ASTFeatureExtractor.from_pretrained('MIT/ast-finetuned-audioset-10-10-0.4593' )
if is_torchaudio_available()
else None
)
@slow
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = self.default_feature_extractor
snake_case_ = ASTForAudioClassification.from_pretrained('MIT/ast-finetuned-audioset-10-10-0.4593' ).to(__UpperCamelCase )
snake_case_ = self.default_feature_extractor
snake_case_ , snake_case_ = prepare_audio()
snake_case_ = audio.squeeze().numpy()
snake_case_ = feature_extractor(__UpperCamelCase , sampling_rate=__UpperCamelCase , return_tensors='pt' ).to(__UpperCamelCase )
# forward pass
with torch.no_grad():
snake_case_ = model(**__UpperCamelCase )
# verify the logits
snake_case_ = torch.Size((1, 5_27) )
self.assertEqual(outputs.logits.shape , __UpperCamelCase )
snake_case_ = torch.tensor([-0.8760, -7.0042, -8.6602] ).to(__UpperCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __UpperCamelCase , atol=1E-4 ) )
| 187 | 0 |
'''simple docstring'''
from typing import Dict, Optional
import numpy as np
import datasets
UpperCamelCase ="\nIoU is the area of overlap between the predicted segmentation and the ground truth divided by the area of union\nbetween the predicted segmentation and the ground truth. For binary (two classes) or multi-class segmentation,\nthe mean IoU of the image is calculated by taking the IoU of each class and averaging them.\n"
UpperCamelCase ="\nArgs:\n predictions (`List[ndarray]`):\n List of predicted segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.\n references (`List[ndarray]`):\n List of ground truth segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.\n num_labels (`int`):\n Number of classes (categories).\n ignore_index (`int`):\n Index that will be ignored during evaluation.\n nan_to_num (`int`, *optional*):\n If specified, NaN values will be replaced by the number defined by the user.\n label_map (`dict`, *optional*):\n If specified, dictionary mapping old label indices to new label indices.\n reduce_labels (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is used for background,\n and background itself is not included in all classes of a dataset (e.g. ADE20k). The background label will be replaced by 255.\n\nReturns:\n `Dict[str, float | ndarray]` comprising various elements:\n - *mean_iou* (`float`):\n Mean Intersection-over-Union (IoU averaged over all categories).\n - *mean_accuracy* (`float`):\n Mean accuracy (averaged over all categories).\n - *overall_accuracy* (`float`):\n Overall accuracy on all images.\n - *per_category_accuracy* (`ndarray` of shape `(num_labels,)`):\n Per category accuracy.\n - *per_category_iou* (`ndarray` of shape `(num_labels,)`):\n Per category IoU.\n\nExamples:\n\n >>> import numpy as np\n\n >>> mean_iou = datasets.load_metric(\"mean_iou\")\n\n >>> # suppose one has 3 different segmentation maps predicted\n >>> predicted_1 = np.array([[1, 2], [3, 4], [5, 255]])\n >>> actual_1 = np.array([[0, 3], [5, 4], [6, 255]])\n\n >>> predicted_2 = np.array([[2, 7], [9, 2], [3, 6]])\n >>> actual_2 = np.array([[1, 7], [9, 2], [3, 6]])\n\n >>> predicted_3 = np.array([[2, 2, 3], [8, 2, 4], [3, 255, 2]])\n >>> actual_3 = np.array([[1, 2, 2], [8, 2, 1], [3, 255, 1]])\n\n >>> predicted = [predicted_1, predicted_2, predicted_3]\n >>> ground_truth = [actual_1, actual_2, actual_3]\n\n >>> results = mean_iou.compute(predictions=predicted, references=ground_truth, num_labels=10, ignore_index=255, reduce_labels=False)\n >>> print(results) # doctest: +NORMALIZE_WHITESPACE\n {'mean_iou': 0.47750000000000004, 'mean_accuracy': 0.5916666666666666, 'overall_accuracy': 0.5263157894736842, 'per_category_iou': array([0. , 0. , 0.375, 0.4 , 0.5 , 0. , 0.5 , 1. , 1. , 1. ]), 'per_category_accuracy': array([0. , 0. , 0.75 , 0.66666667, 1. , 0. , 0.5 , 1. , 1. , 1. ])}\n"
UpperCamelCase ="\\n@software{MMSegmentation_Contributors_OpenMMLab_Semantic_Segmentation_2020,\nauthor = {{MMSegmentation Contributors}},\nlicense = {Apache-2.0},\nmonth = {7},\ntitle = {{OpenMMLab Semantic Segmentation Toolbox and Benchmark}},\nurl = {https://github.com/open-mmlab/mmsegmentation},\nyear = {2020}\n}"
def snake_case ( a_ : int , a_ : Tuple , a_ : List[Any] , a_ : bool , a_ : Optional[Dict[int, int]] = None , a_ : bool = False , ) -> List[Any]:
"""simple docstring"""
if label_map is not None:
for old_id, new_id in label_map.items():
UpperCamelCase_ : Any = new_id
# turn into Numpy arrays
UpperCamelCase_ : Union[str, Any] = np.array(a_ )
UpperCamelCase_ : int = np.array(a_ )
if reduce_labels:
UpperCamelCase_ : Optional[Any] = 255
UpperCamelCase_ : List[Any] = label - 1
UpperCamelCase_ : Optional[int] = 255
UpperCamelCase_ : Optional[Any] = label != ignore_index
UpperCamelCase_ : Optional[Any] = np.not_equal(a_ , a_ )
UpperCamelCase_ : int = pred_label[mask]
UpperCamelCase_ : Optional[Any] = np.array(a_ )[mask]
UpperCamelCase_ : Optional[Any] = pred_label[pred_label == label]
UpperCamelCase_ : str = np.histogram(a_ , bins=a_ , range=(0, num_labels - 1) )[0]
UpperCamelCase_ : Optional[Any] = np.histogram(a_ , bins=a_ , range=(0, num_labels - 1) )[0]
UpperCamelCase_ : Any = np.histogram(a_ , bins=a_ , range=(0, num_labels - 1) )[0]
UpperCamelCase_ : str = area_pred_label + area_label - area_intersect
return area_intersect, area_union, area_pred_label, area_label
def snake_case ( a_ : Dict , a_ : List[str] , a_ : Any , a_ : bool , a_ : Optional[Dict[int, int]] = None , a_ : bool = False , ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase_ : Any = np.zeros((num_labels,) , dtype=np.floataa )
UpperCamelCase_ : Union[str, Any] = np.zeros((num_labels,) , dtype=np.floataa )
UpperCamelCase_ : Any = np.zeros((num_labels,) , dtype=np.floataa )
UpperCamelCase_ : int = np.zeros((num_labels,) , dtype=np.floataa )
for result, gt_seg_map in zip(a_ , a_ ):
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ : int = intersect_and_union(
a_ , a_ , a_ , a_ , a_ , a_ )
total_area_intersect += area_intersect
total_area_union += area_union
total_area_pred_label += area_pred_label
total_area_label += area_label
return total_area_intersect, total_area_union, total_area_pred_label, total_area_label
def snake_case ( a_ : Optional[int] , a_ : List[str] , a_ : Dict , a_ : bool , a_ : Optional[int] = None , a_ : Optional[Dict[int, int]] = None , a_ : bool = False , ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ : Union[str, Any] = total_intersect_and_union(
a_ , a_ , a_ , a_ , a_ , a_ )
# compute metrics
UpperCamelCase_ : Any = {}
UpperCamelCase_ : str = total_area_intersect.sum() / total_area_label.sum()
UpperCamelCase_ : Dict = total_area_intersect / total_area_union
UpperCamelCase_ : Optional[Any] = total_area_intersect / total_area_label
UpperCamelCase_ : int = np.nanmean(a_ )
UpperCamelCase_ : Tuple = np.nanmean(a_ )
UpperCamelCase_ : Dict = all_acc
UpperCamelCase_ : Optional[Any] = iou
UpperCamelCase_ : List[Any] = acc
if nan_to_num is not None:
UpperCamelCase_ : Optional[int] = {metric: np.nan_to_num(a_ , nan=a_ ) for metric, metric_value in metrics.items()}
return metrics
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class A ( datasets.Metric ):
"""simple docstring"""
def _UpperCAmelCase ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
# 1st Seq - height dim, 2nd - width dim
{
"""predictions""": datasets.Sequence(datasets.Sequence(datasets.Value("""uint16""" ) ) ),
"""references""": datasets.Sequence(datasets.Sequence(datasets.Value("""uint16""" ) ) ),
} ) , reference_urls=[
"""https://github.com/open-mmlab/mmsegmentation/blob/71c201b1813267d78764f306a297ca717827c4bf/mmseg/core/evaluation/metrics.py"""
] , )
def _UpperCAmelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = False , ):
UpperCamelCase_ : str = mean_iou(
results=__lowerCAmelCase , gt_seg_maps=__lowerCAmelCase , num_labels=__lowerCAmelCase , ignore_index=__lowerCAmelCase , nan_to_num=__lowerCAmelCase , label_map=__lowerCAmelCase , reduce_labels=__lowerCAmelCase , )
return iou_result
| 543 |
'''simple docstring'''
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def snake_case ( a_ : List[Any] ) -> Any:
"""simple docstring"""
for param in module.parameters():
UpperCamelCase_ : Dict = False
def snake_case ( ) -> List[str]:
"""simple docstring"""
UpperCamelCase_ : List[str] = """cuda""" if torch.cuda.is_available() else """cpu"""
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
UpperCamelCase_ : int = """mps"""
if device == "mps":
print(
"""WARNING: MPS currently doesn't seem to work, and messes up backpropagation without any visible torch"""
""" errors. I recommend using CUDA on a colab notebook or CPU instead if you're facing inexplicable issues"""
""" with generations.""" )
return device
def snake_case ( a_ : str ) -> Dict:
"""simple docstring"""
UpperCamelCase_ : Optional[int] = plt.imshow(a_ )
fig.axes.get_xaxis().set_visible(a_ )
fig.axes.get_yaxis().set_visible(a_ )
plt.show()
def snake_case ( ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase_ : Optional[Any] = datetime.now()
UpperCamelCase_ : int = current_time.strftime("""%H:%M:%S""" )
return timestamp
| 543 | 1 |
"""simple docstring"""
from PIL import Image
def __A (_SCREAMING_SNAKE_CASE ) ->Image:
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ :Dict = image.size
lowerCAmelCase__ :Dict = 0
lowerCAmelCase__ :Tuple = image.load()
for i in range(_SCREAMING_SNAKE_CASE ):
for j in range(_SCREAMING_SNAKE_CASE ):
lowerCAmelCase__ :Optional[int] = pixels[j, i]
mean += pixel
mean //= width * height
for j in range(_SCREAMING_SNAKE_CASE ):
for i in range(_SCREAMING_SNAKE_CASE ):
lowerCAmelCase__ :Dict = 255 if pixels[i, j] > mean else 0
return image
if __name__ == "__main__":
__A = mean_threshold(Image.open("""path_to_image""").convert("""L"""))
image.save("""output_image_path""")
| 93 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.utils import ComputeEnvironment
from .cluster import get_cluster_input
from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
from .sagemaker import get_sagemaker_input
_a : str = "Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine"
def _a () -> Dict:
"""simple docstring"""
__snake_case = _ask_options(
'In which compute environment are you running?' , ['This machine', 'AWS (Amazon SageMaker)'] , _convert_compute_environment , )
if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
__snake_case = get_sagemaker_input()
else:
__snake_case = get_cluster_input()
return config
def _a (lowercase__ : Union[str, Any]=None ) -> int:
"""simple docstring"""
if subparsers is not None:
__snake_case = subparsers.add_parser('config' , description=lowercase__ )
else:
__snake_case = argparse.ArgumentParser('Accelerate config command' , description=lowercase__ )
parser.add_argument(
'--config_file' , default=lowercase__ , help=(
'The path to use to store the config file. Will default to a file named default_config.yaml in the cache '
'location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '
'such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '
'with \'huggingface\'.'
) , )
if subparsers is not None:
parser.set_defaults(func=lowercase__ )
return parser
def _a (lowercase__ : List[str] ) -> Union[str, Any]:
"""simple docstring"""
__snake_case = get_user_input()
if args.config_file is not None:
__snake_case = args.config_file
else:
if not os.path.isdir(lowercase__ ):
os.makedirs(lowercase__ )
__snake_case = default_yaml_config_file
if config_file.endswith('.json' ):
config.to_json_file(lowercase__ )
else:
config.to_yaml_file(lowercase__ )
print(f'accelerate configuration saved at {config_file}' )
def _a () -> int:
"""simple docstring"""
__snake_case = config_command_parser()
__snake_case = parser.parse_args()
config_command(lowercase__ )
if __name__ == "__main__":
main()
| 56 | 0 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.25.0')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
)
else:
from .modeling_text_unet import UNetFlatConditionModel
from .pipeline_versatile_diffusion import VersatileDiffusionPipeline
from .pipeline_versatile_diffusion_dual_guided import VersatileDiffusionDualGuidedPipeline
from .pipeline_versatile_diffusion_image_variation import VersatileDiffusionImageVariationPipeline
from .pipeline_versatile_diffusion_text_to_image import VersatileDiffusionTextToImagePipeline | 701 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.25.0')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel | 678 | 0 |
'''simple docstring'''
def __UpperCAmelCase ( A : list , A : int = 0 ) -> Any:
UpperCAmelCase_ : List[Any] = length or len(UpperCamelCase__ )
UpperCAmelCase_ : str = False
for i in range(length - 1 ):
if list_data[i] > list_data[i + 1]:
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = list_data[i + 1], list_data[i]
UpperCAmelCase_ : Tuple = True
return list_data if not swapped else bubble_sort(UpperCamelCase__ , length - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 541 |
"""simple docstring"""
def lowerCAmelCase_ ( UpperCamelCase__ : int , UpperCamelCase__ : int ):
"""simple docstring"""
return x if y == 0 else greatest_common_divisor(UpperCamelCase__ , x % y )
def lowerCAmelCase_ ( UpperCamelCase__ : int , UpperCamelCase__ : int ):
"""simple docstring"""
return (x * y) // greatest_common_divisor(UpperCamelCase__ , UpperCamelCase__ )
def lowerCAmelCase_ ( UpperCamelCase__ : int = 20 ):
"""simple docstring"""
__lowercase = 1
for i in range(1 , n + 1 ):
__lowercase = lcm(UpperCamelCase__ , UpperCamelCase__ )
return g
if __name__ == "__main__":
print(f"""{solution() = }""")
| 616 | 0 |
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def UpperCAmelCase_( a__ , a__ , a__ , a__ , a__ ):
"""simple docstring"""
with open(a__ ) as metadata_file:
SCREAMING_SNAKE_CASE : Dict = json.load(a__ )
SCREAMING_SNAKE_CASE : str = LukeConfig(use_entity_aware_attention=a__ , **metadata['''model_config'''] )
# Load in the weights from the checkpoint_path
SCREAMING_SNAKE_CASE : Dict = torch.load(a__ , map_location='''cpu''' )['''module''']
# Load the entity vocab file
SCREAMING_SNAKE_CASE : Any = load_original_entity_vocab(a__ )
# add an entry for [MASK2]
SCREAMING_SNAKE_CASE : Union[str, Any] = max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
SCREAMING_SNAKE_CASE : int = XLMRobertaTokenizer.from_pretrained(metadata['''model_config''']['''bert_model_name'''] )
# Add special tokens to the token vocabulary for downstream tasks
SCREAMING_SNAKE_CASE : Dict = AddedToken('''<ent>''' , lstrip=a__ , rstrip=a__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = AddedToken('''<ent2>''' , lstrip=a__ , rstrip=a__ )
tokenizer.add_special_tokens({'''additional_special_tokens''': [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(F"""Saving tokenizer to {pytorch_dump_folder_path}""" )
tokenizer.save_pretrained(a__ )
with open(os.path.join(a__ , '''tokenizer_config.json''' ) , '''r''' ) as f:
SCREAMING_SNAKE_CASE : Union[str, Any] = json.load(a__ )
SCREAMING_SNAKE_CASE : str = '''MLukeTokenizer'''
with open(os.path.join(a__ , '''tokenizer_config.json''' ) , '''w''' ) as f:
json.dump(a__ , a__ )
with open(os.path.join(a__ , MLukeTokenizer.vocab_files_names['''entity_vocab_file'''] ) , '''w''' ) as f:
json.dump(a__ , a__ )
SCREAMING_SNAKE_CASE : Optional[Any] = MLukeTokenizer.from_pretrained(a__ )
# Initialize the embeddings of the special tokens
SCREAMING_SNAKE_CASE : List[str] = tokenizer.convert_tokens_to_ids(['''@'''] )[0]
SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.convert_tokens_to_ids(['''#'''] )[0]
SCREAMING_SNAKE_CASE : Optional[Any] = state_dict['''embeddings.word_embeddings.weight''']
SCREAMING_SNAKE_CASE : Dict = word_emb[ent_init_index].unsqueeze(0 )
SCREAMING_SNAKE_CASE : List[Any] = word_emb[enta_init_index].unsqueeze(0 )
SCREAMING_SNAKE_CASE : str = torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
SCREAMING_SNAKE_CASE : Dict = state_dict[bias_name]
SCREAMING_SNAKE_CASE : Union[str, Any] = decoder_bias[ent_init_index].unsqueeze(0 )
SCREAMING_SNAKE_CASE : str = decoder_bias[enta_init_index].unsqueeze(0 )
SCREAMING_SNAKE_CASE : Optional[Any] = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
SCREAMING_SNAKE_CASE : List[str] = F"""encoder.layer.{layer_index}.attention.self."""
SCREAMING_SNAKE_CASE : Optional[int] = state_dict[prefix + matrix_name]
SCREAMING_SNAKE_CASE : Union[str, Any] = state_dict[prefix + matrix_name]
SCREAMING_SNAKE_CASE : Any = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
SCREAMING_SNAKE_CASE : List[str] = state_dict['''entity_embeddings.entity_embeddings.weight''']
SCREAMING_SNAKE_CASE : Union[str, Any] = entity_emb[entity_vocab['''[MASK]''']].unsqueeze(0 )
SCREAMING_SNAKE_CASE : Any = torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
SCREAMING_SNAKE_CASE : Dict = state_dict['''entity_predictions.bias''']
SCREAMING_SNAKE_CASE : Tuple = entity_prediction_bias[entity_vocab['''[MASK]''']].unsqueeze(0 )
SCREAMING_SNAKE_CASE : List[Any] = torch.cat([entity_prediction_bias, entity_mask_bias] )
SCREAMING_SNAKE_CASE : Optional[int] = LukeForMaskedLM(config=a__ ).eval()
state_dict.pop('''entity_predictions.decoder.weight''' )
state_dict.pop('''lm_head.decoder.weight''' )
state_dict.pop('''lm_head.decoder.bias''' )
SCREAMING_SNAKE_CASE : List[Any] = OrderedDict()
for key, value in state_dict.items():
if not (key.startswith('''lm_head''' ) or key.startswith('''entity_predictions''' )):
SCREAMING_SNAKE_CASE : str = state_dict[key]
else:
SCREAMING_SNAKE_CASE : List[str] = state_dict[key]
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = model.load_state_dict(a__ , strict=a__ )
if set(a__ ) != {"luke.embeddings.position_ids"}:
raise ValueError(F"""Unexpected unexpected_keys: {unexpected_keys}""" )
if set(a__ ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(F"""Unexpected missing_keys: {missing_keys}""" )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
SCREAMING_SNAKE_CASE : List[str] = MLukeTokenizer.from_pretrained(a__ , task='''entity_classification''' )
SCREAMING_SNAKE_CASE : Dict = '''ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan).'''
SCREAMING_SNAKE_CASE : int = (0, 9)
SCREAMING_SNAKE_CASE : List[str] = tokenizer(a__ , entity_spans=[span] , return_tensors='''pt''' )
SCREAMING_SNAKE_CASE : Optional[int] = model(**a__ )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
SCREAMING_SNAKE_CASE : Any = torch.Size((1, 33, 768) )
SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor([[0.0_892, 0.0_596, -0.2_819], [0.0_134, 0.1_199, 0.0_573], [-0.0_169, 0.0_927, 0.0_644]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
F"""Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}""" )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , a__ , atol=1e-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
SCREAMING_SNAKE_CASE : str = torch.Size((1, 1, 768) )
SCREAMING_SNAKE_CASE : str = torch.tensor([[-0.1_482, 0.0_609, 0.0_322]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
F"""Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is"""
F""" {expected_shape}""" )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , a__ , atol=1e-4 ):
raise ValueError
# Verify masked word/entity prediction
SCREAMING_SNAKE_CASE : int = MLukeTokenizer.from_pretrained(a__ )
SCREAMING_SNAKE_CASE : int = '''Tokyo is the capital of <mask>.'''
SCREAMING_SNAKE_CASE : Optional[Any] = (24, 30)
SCREAMING_SNAKE_CASE : List[Any] = tokenizer(a__ , entity_spans=[span] , return_tensors='''pt''' )
SCREAMING_SNAKE_CASE : Union[str, Any] = model(**a__ )
SCREAMING_SNAKE_CASE : List[str] = encoding['''input_ids'''][0].tolist()
SCREAMING_SNAKE_CASE : Any = input_ids.index(tokenizer.convert_tokens_to_ids('''<mask>''' ) )
SCREAMING_SNAKE_CASE : List[Any] = outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(a__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = outputs.entity_logits[0][0].argmax().item()
SCREAMING_SNAKE_CASE : Optional[int] = [
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith('''en:''' )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print('''Saving PyTorch model to {}'''.format(a__ ) )
model.save_pretrained(a__ )
def UpperCAmelCase_( a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = ['''[MASK]''', '''[PAD]''', '''[UNK]''']
SCREAMING_SNAKE_CASE : Optional[int] = [json.loads(a__ ) for line in open(a__ )]
SCREAMING_SNAKE_CASE : Dict = {}
for entry in data:
SCREAMING_SNAKE_CASE : Optional[int] = entry['''id''']
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
SCREAMING_SNAKE_CASE : Any = entity_id
break
SCREAMING_SNAKE_CASE : Any = F"""{language}:{entity_name}"""
SCREAMING_SNAKE_CASE : Dict = entity_id
return new_mapping
if __name__ == "__main__":
a__ : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--checkpoint_path''', type=str, help='''Path to a pytorch_model.bin file.''')
parser.add_argument(
'''--metadata_path''', default=None, type=str, help='''Path to a metadata.json file, defining the configuration.'''
)
parser.add_argument(
'''--entity_vocab_path''',
default=None,
type=str,
help='''Path to an entity_vocab.tsv file, containing the entity vocabulary.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to where to dump the output PyTorch model.'''
)
parser.add_argument(
'''--model_size''', default='''base''', type=str, choices=['''base''', '''large'''], help='''Size of the model to be converted.'''
)
a__ : List[str] = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 333 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a__ : Any = {
'''configuration_rembert''': ['''REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''RemBertConfig''', '''RemBertOnnxConfig''']
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Tuple = ['''RemBertTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Dict = ['''RemBertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Dict = [
'''REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''RemBertForCausalLM''',
'''RemBertForMaskedLM''',
'''RemBertForMultipleChoice''',
'''RemBertForQuestionAnswering''',
'''RemBertForSequenceClassification''',
'''RemBertForTokenClassification''',
'''RemBertLayer''',
'''RemBertModel''',
'''RemBertPreTrainedModel''',
'''load_tf_weights_in_rembert''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : List[str] = [
'''TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFRemBertForCausalLM''',
'''TFRemBertForMaskedLM''',
'''TFRemBertForMultipleChoice''',
'''TFRemBertForQuestionAnswering''',
'''TFRemBertForSequenceClassification''',
'''TFRemBertForTokenClassification''',
'''TFRemBertLayer''',
'''TFRemBertModel''',
'''TFRemBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_rembert import REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RemBertConfig, RemBertOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert import RemBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert_fast import RemBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rembert import (
REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RemBertForCausalLM,
RemBertForMaskedLM,
RemBertForMultipleChoice,
RemBertForQuestionAnswering,
RemBertForSequenceClassification,
RemBertForTokenClassification,
RemBertLayer,
RemBertModel,
RemBertPreTrainedModel,
load_tf_weights_in_rembert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rembert import (
TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRemBertForCausalLM,
TFRemBertForMaskedLM,
TFRemBertForMultipleChoice,
TFRemBertForQuestionAnswering,
TFRemBertForSequenceClassification,
TFRemBertForTokenClassification,
TFRemBertLayer,
TFRemBertModel,
TFRemBertPreTrainedModel,
)
else:
import sys
a__ : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 333 | 1 |
'''simple docstring'''
from pathlib import Path
from typing import List
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import get_tests_dir, is_tool_test
from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
__lowercase : List[str] = ['text', 'image', 'audio']
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Tuple ):
__a : int = []
for input_type in input_types:
if input_type == "text":
inputs.append('Text input' )
elif input_type == "image":
inputs.append(
Image.open(Path(get_tests_dir('fixtures/tests_samples/COCO' ) ) / '000000039769.png' ).resize((512, 512) ) )
elif input_type == "audio":
inputs.append(torch.ones(3_000 ) )
elif isinstance(_lowercase , _lowercase ):
inputs.append(create_inputs(_lowercase ) )
else:
raise ValueError(F"""Invalid type requested: {input_type}""" )
return inputs
def lowerCamelCase (_SCREAMING_SNAKE_CASE : int ):
__a : Dict = []
for output in outputs:
if isinstance(_lowercase , (str, AgentText) ):
output_types.append('text' )
elif isinstance(_lowercase , (Image.Image, AgentImage) ):
output_types.append('image' )
elif isinstance(_lowercase , (torch.Tensor, AgentAudio) ):
output_types.append('audio' )
else:
raise ValueError(F"""Invalid output: {output}""" )
return output_types
@is_tool_test
class __UpperCamelCase :
def __UpperCAmelCase ( self ):
'''simple docstring'''
self.assertTrue(hasattr(self.tool , 'inputs' ) )
self.assertTrue(hasattr(self.tool , 'outputs' ) )
__a : str = self.tool.inputs
for _input in inputs:
if isinstance(_input , _SCREAMING_SNAKE_CASE ):
for __input in _input:
self.assertTrue(__input in authorized_types )
else:
self.assertTrue(_input in authorized_types )
__a : Union[str, Any] = self.tool.outputs
for _output in outputs:
self.assertTrue(_output in authorized_types )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Union[str, Any] = create_inputs(self.tool.inputs )
__a : Any = self.tool(*_SCREAMING_SNAKE_CASE )
# There is a single output
if len(self.tool.outputs ) == 1:
__a : Optional[int] = [outputs]
self.assertListEqual(output_types(_SCREAMING_SNAKE_CASE ) , self.tool.outputs )
def __UpperCAmelCase ( self ):
'''simple docstring'''
self.assertTrue(hasattr(self.tool , 'description' ) )
self.assertTrue(hasattr(self.tool , 'default_checkpoint' ) )
self.assertTrue(self.tool.description.startswith('This is a tool that' ) )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Optional[Any] = create_inputs(self.tool.inputs )
__a : str = self.tool(*_SCREAMING_SNAKE_CASE )
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__a : Union[str, Any] = [outputs]
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , len(self.tool.outputs ) )
for output, output_type in zip(_SCREAMING_SNAKE_CASE , self.tool.outputs ):
__a : Union[str, Any] = AGENT_TYPE_MAPPING[output_type]
self.assertTrue(isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Union[str, Any] = create_inputs(self.tool.inputs )
__a : Tuple = []
for _input, input_type in zip(_SCREAMING_SNAKE_CASE , self.tool.inputs ):
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_inputs.append([AGENT_TYPE_MAPPING[_input_type](_input ) for _input_type in input_type] )
else:
_inputs.append(AGENT_TYPE_MAPPING[input_type](_input ) )
# Should not raise an error
__a : List[Any] = self.tool(*_SCREAMING_SNAKE_CASE )
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__a : Optional[Any] = [outputs]
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , len(self.tool.outputs ) )
| 476 |
import unittest
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
if is_torch_available():
import torch
from transformers import AutoModelForImageClassification
if is_vision_available():
from transformers import AutoImageProcessor
@require_torch
@require_vision
class __a( unittest.TestCase ):
"""simple docstring"""
@slow
def a__ ( self ) -> List[str]:
UpperCAmelCase_ : Tuple = AutoImageProcessor.from_pretrained('''microsoft/dit-base-finetuned-rvlcdip''' )
UpperCAmelCase_ : Union[str, Any] = AutoModelForImageClassification.from_pretrained('''microsoft/dit-base-finetuned-rvlcdip''' )
model.to(_SCREAMING_SNAKE_CASE )
from datasets import load_dataset
UpperCAmelCase_ : Optional[int] = load_dataset('''nielsr/rvlcdip-demo''' )
UpperCAmelCase_ : Optional[Any] = dataset['''train'''][0]['''image'''].convert('''RGB''' )
UpperCAmelCase_ : str = image_processor(_SCREAMING_SNAKE_CASE ,return_tensors='''pt''' ).to(_SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
UpperCAmelCase_ : Optional[int] = model(**_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[Any] = outputs.logits
UpperCAmelCase_ : Tuple = torch.Size((1, 16) )
self.assertEqual(logits.shape ,_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : int = torch.tensor(
[-0.41_58, -0.40_92, -0.43_47] ,device=_SCREAMING_SNAKE_CASE ,dtype=torch.float ,)
self.assertTrue(torch.allclose(logits[0, :3] ,_SCREAMING_SNAKE_CASE ,atol=1e-4 ) ) | 30 | 0 |
from __future__ import annotations
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : list[list[int]] = []
create_all_state(1 , a__ , a__ , [] , a__ )
return result
def UpperCAmelCase_( a__ , a__ , a__ , a__ , a__ , ):
"""simple docstring"""
if level == 0:
total_list.append(current_list[:] )
return
for i in range(a__ , total_number - level + 2 ):
current_list.append(a__ )
create_all_state(i + 1 , a__ , level - 1 , a__ , a__ )
current_list.pop()
def UpperCAmelCase_( a__ ):
"""simple docstring"""
for i in total_list:
print(*a__ )
if __name__ == "__main__":
a__ : Optional[Any] = 4
a__ : Dict = 2
a__ : Tuple = generate_all_combinations(n, k)
print_all_state(total_list)
| 333 |
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import KarrasVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class a_ ( a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : UNetaDModel
__SCREAMING_SNAKE_CASE : KarrasVeScheduler
def __init__( self , _lowerCamelCase , _lowerCamelCase ) ->Dict:
super().__init__()
self.register_modules(unet=_lowerCamelCase , scheduler=_lowerCamelCase )
@torch.no_grad()
def __call__( self , _lowerCamelCase = 1 , _lowerCamelCase = 50 , _lowerCamelCase = None , _lowerCamelCase = "pil" , _lowerCamelCase = True , **_lowerCamelCase , ) ->Union[Tuple, ImagePipelineOutput]:
SCREAMING_SNAKE_CASE : Union[str, Any] = self.unet.config.sample_size
SCREAMING_SNAKE_CASE : List[str] = (batch_size, 3, img_size, img_size)
SCREAMING_SNAKE_CASE : Optional[int] = self.unet
# sample x_0 ~ N(0, sigma_0^2 * I)
SCREAMING_SNAKE_CASE : Optional[int] = randn_tensor(_lowerCamelCase , generator=_lowerCamelCase , device=self.device ) * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(_lowerCamelCase )
for t in self.progress_bar(self.scheduler.timesteps ):
# here sigma_t == t_i from the paper
SCREAMING_SNAKE_CASE : List[Any] = self.scheduler.schedule[t]
SCREAMING_SNAKE_CASE : Any = self.scheduler.schedule[t - 1] if t > 0 else 0
# 1. Select temporarily increased noise level sigma_hat
# 2. Add new noise to move from sample_i to sample_hat
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str = self.scheduler.add_noise_to_input(_lowerCamelCase , _lowerCamelCase , generator=_lowerCamelCase )
# 3. Predict the noise residual given the noise magnitude `sigma_hat`
# The model inputs and output are adjusted by following eq. (213) in [1].
SCREAMING_SNAKE_CASE : List[str] = (sigma_hat / 2) * model((sample_hat + 1) / 2 , sigma_hat / 2 ).sample
# 4. Evaluate dx/dt at sigma_hat
# 5. Take Euler step from sigma to sigma_prev
SCREAMING_SNAKE_CASE : Optional[Any] = self.scheduler.step(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
if sigma_prev != 0:
# 6. Apply 2nd order correction
# The model inputs and output are adjusted by following eq. (213) in [1].
SCREAMING_SNAKE_CASE : Union[str, Any] = (sigma_prev / 2) * model((step_output.prev_sample + 1) / 2 , sigma_prev / 2 ).sample
SCREAMING_SNAKE_CASE : Optional[Any] = self.scheduler.step_correct(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , step_output.prev_sample , step_output['''derivative'''] , )
SCREAMING_SNAKE_CASE : Optional[int] = step_output.prev_sample
SCREAMING_SNAKE_CASE : Any = (sample / 2 + 0.5).clamp(0 , 1 )
SCREAMING_SNAKE_CASE : Tuple = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
SCREAMING_SNAKE_CASE : Any = self.numpy_to_pil(_lowerCamelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_lowerCamelCase )
| 333 | 1 |
"""simple docstring"""
import argparse
import json
import torch
from diffusers import DDPMScheduler, LDMPipeline, UNetaDModel, VQModel
def snake_case ( lowerCAmelCase_ , lowerCAmelCase_=1 ) -> List[str]:
if n_shave_prefix_segments >= 0:
return ".".join(path.split('''.''' )[n_shave_prefix_segments:] )
else:
return ".".join(path.split('''.''' )[:n_shave_prefix_segments] )
def snake_case ( lowerCAmelCase_ , lowerCAmelCase_=0 ) -> Tuple:
_snake_case = []
for old_item in old_list:
_snake_case = old_item.replace('''in_layers.0''' , '''norm1''' )
_snake_case = new_item.replace('''in_layers.2''' , '''conv1''' )
_snake_case = new_item.replace('''out_layers.0''' , '''norm2''' )
_snake_case = new_item.replace('''out_layers.3''' , '''conv2''' )
_snake_case = new_item.replace('''emb_layers.1''' , '''time_emb_proj''' )
_snake_case = new_item.replace('''skip_connection''' , '''conv_shortcut''' )
_snake_case = shave_segments(lowerCAmelCase_ , n_shave_prefix_segments=lowerCAmelCase_ )
mapping.append({'''old''': old_item, '''new''': new_item} )
return mapping
def snake_case ( lowerCAmelCase_ , lowerCAmelCase_=0 ) -> Dict:
_snake_case = []
for old_item in old_list:
_snake_case = old_item
_snake_case = new_item.replace('''norm.weight''' , '''group_norm.weight''' )
_snake_case = new_item.replace('''norm.bias''' , '''group_norm.bias''' )
_snake_case = new_item.replace('''proj_out.weight''' , '''proj_attn.weight''' )
_snake_case = new_item.replace('''proj_out.bias''' , '''proj_attn.bias''' )
_snake_case = shave_segments(lowerCAmelCase_ , n_shave_prefix_segments=lowerCAmelCase_ )
mapping.append({'''old''': old_item, '''new''': new_item} )
return mapping
def snake_case ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None ) -> List[Any]:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "Paths should be a list of dicts containing 'old' and 'new' keys."
# Splits the attention layers into three variables.
if attention_paths_to_split is not None:
for path, path_map in attention_paths_to_split.items():
_snake_case = old_checkpoint[path]
_snake_case = old_tensor.shape[0] // 3
_snake_case = (-1, channels) if len(old_tensor.shape ) == 3 else (-1)
_snake_case = old_tensor.shape[0] // config['''num_head_channels'''] // 3
_snake_case = old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:] )
_snake_case , _snake_case , _snake_case = old_tensor.split(channels // num_heads , dim=1 )
_snake_case = query.reshape(lowerCAmelCase_ )
_snake_case = key.reshape(lowerCAmelCase_ )
_snake_case = value.reshape(lowerCAmelCase_ )
for path in paths:
_snake_case = path['''new''']
# These have already been assigned
if attention_paths_to_split is not None and new_path in attention_paths_to_split:
continue
# Global renaming happens here
_snake_case = new_path.replace('''middle_block.0''' , '''mid_block.resnets.0''' )
_snake_case = new_path.replace('''middle_block.1''' , '''mid_block.attentions.0''' )
_snake_case = new_path.replace('''middle_block.2''' , '''mid_block.resnets.1''' )
if additional_replacements is not None:
for replacement in additional_replacements:
_snake_case = new_path.replace(replacement['''old'''] , replacement['''new'''] )
# proj_attn.weight has to be converted from conv 1D to linear
if "proj_attn.weight" in new_path:
_snake_case = old_checkpoint[path['''old''']][:, :, 0]
else:
_snake_case = old_checkpoint[path['''old''']]
def snake_case ( lowerCAmelCase_ , lowerCAmelCase_ ) -> Dict:
_snake_case = {}
_snake_case = checkpoint['''time_embed.0.weight''']
_snake_case = checkpoint['''time_embed.0.bias''']
_snake_case = checkpoint['''time_embed.2.weight''']
_snake_case = checkpoint['''time_embed.2.bias''']
_snake_case = checkpoint['''input_blocks.0.0.weight''']
_snake_case = checkpoint['''input_blocks.0.0.bias''']
_snake_case = checkpoint['''out.0.weight''']
_snake_case = checkpoint['''out.0.bias''']
_snake_case = checkpoint['''out.2.weight''']
_snake_case = checkpoint['''out.2.bias''']
# Retrieves the keys for the input blocks only
_snake_case = len({'''.'''.join(layer.split('''.''' )[:2] ) for layer in checkpoint if '''input_blocks''' in layer} )
_snake_case = {
layer_id: [key for key in checkpoint if f"""input_blocks.{layer_id}""" in key]
for layer_id in range(lowerCAmelCase_ )
}
# Retrieves the keys for the middle blocks only
_snake_case = len({'''.'''.join(layer.split('''.''' )[:2] ) for layer in checkpoint if '''middle_block''' in layer} )
_snake_case = {
layer_id: [key for key in checkpoint if f"""middle_block.{layer_id}""" in key]
for layer_id in range(lowerCAmelCase_ )
}
# Retrieves the keys for the output blocks only
_snake_case = len({'''.'''.join(layer.split('''.''' )[:2] ) for layer in checkpoint if '''output_blocks''' in layer} )
_snake_case = {
layer_id: [key for key in checkpoint if f"""output_blocks.{layer_id}""" in key]
for layer_id in range(lowerCAmelCase_ )
}
for i in range(1 , lowerCAmelCase_ ):
_snake_case = (i - 1) // (config['''num_res_blocks'''] + 1)
_snake_case = (i - 1) % (config['''num_res_blocks'''] + 1)
_snake_case = [key for key in input_blocks[i] if f"""input_blocks.{i}.0""" in key]
_snake_case = [key for key in input_blocks[i] if f"""input_blocks.{i}.1""" in key]
if f"""input_blocks.{i}.0.op.weight""" in checkpoint:
_snake_case = checkpoint[
f"""input_blocks.{i}.0.op.weight"""
]
_snake_case = checkpoint[
f"""input_blocks.{i}.0.op.bias"""
]
continue
_snake_case = renew_resnet_paths(lowerCAmelCase_ )
_snake_case = {'''old''': f"""input_blocks.{i}.0""", '''new''': f"""down_blocks.{block_id}.resnets.{layer_in_block_id}"""}
_snake_case = {'''old''': '''resnets.2.op''', '''new''': '''downsamplers.0.op'''}
assign_to_checkpoint(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , additional_replacements=[meta_path, resnet_op] , config=lowerCAmelCase_ )
if len(lowerCAmelCase_ ):
_snake_case = renew_attention_paths(lowerCAmelCase_ )
_snake_case = {
'''old''': f"""input_blocks.{i}.1""",
'''new''': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}""",
}
_snake_case = {
f"""input_blocks.{i}.1.qkv.bias""": {
'''key''': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias""",
'''query''': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias""",
'''value''': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias""",
},
f"""input_blocks.{i}.1.qkv.weight""": {
'''key''': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight""",
'''query''': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight""",
'''value''': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight""",
},
}
assign_to_checkpoint(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , additional_replacements=[meta_path] , attention_paths_to_split=lowerCAmelCase_ , config=lowerCAmelCase_ , )
_snake_case = middle_blocks[0]
_snake_case = middle_blocks[1]
_snake_case = middle_blocks[2]
_snake_case = renew_resnet_paths(lowerCAmelCase_ )
assign_to_checkpoint(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , config=lowerCAmelCase_ )
_snake_case = renew_resnet_paths(lowerCAmelCase_ )
assign_to_checkpoint(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , config=lowerCAmelCase_ )
_snake_case = renew_attention_paths(lowerCAmelCase_ )
_snake_case = {
'''middle_block.1.qkv.bias''': {
'''key''': '''mid_block.attentions.0.key.bias''',
'''query''': '''mid_block.attentions.0.query.bias''',
'''value''': '''mid_block.attentions.0.value.bias''',
},
'''middle_block.1.qkv.weight''': {
'''key''': '''mid_block.attentions.0.key.weight''',
'''query''': '''mid_block.attentions.0.query.weight''',
'''value''': '''mid_block.attentions.0.value.weight''',
},
}
assign_to_checkpoint(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , attention_paths_to_split=lowerCAmelCase_ , config=lowerCAmelCase_ )
for i in range(lowerCAmelCase_ ):
_snake_case = i // (config['''num_res_blocks'''] + 1)
_snake_case = i % (config['''num_res_blocks'''] + 1)
_snake_case = [shave_segments(lowerCAmelCase_ , 2 ) for name in output_blocks[i]]
_snake_case = {}
for layer in output_block_layers:
_snake_case , _snake_case = layer.split('''.''' )[0], shave_segments(lowerCAmelCase_ , 1 )
if layer_id in output_block_list:
output_block_list[layer_id].append(lowerCAmelCase_ )
else:
_snake_case = [layer_name]
if len(lowerCAmelCase_ ) > 1:
_snake_case = [key for key in output_blocks[i] if f"""output_blocks.{i}.0""" in key]
_snake_case = [key for key in output_blocks[i] if f"""output_blocks.{i}.1""" in key]
_snake_case = renew_resnet_paths(lowerCAmelCase_ )
_snake_case = renew_resnet_paths(lowerCAmelCase_ )
_snake_case = {'''old''': f"""output_blocks.{i}.0""", '''new''': f"""up_blocks.{block_id}.resnets.{layer_in_block_id}"""}
assign_to_checkpoint(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , additional_replacements=[meta_path] , config=lowerCAmelCase_ )
if ["conv.weight", "conv.bias"] in output_block_list.values():
_snake_case = list(output_block_list.values() ).index(['''conv.weight''', '''conv.bias'''] )
_snake_case = checkpoint[
f"""output_blocks.{i}.{index}.conv.weight"""
]
_snake_case = checkpoint[
f"""output_blocks.{i}.{index}.conv.bias"""
]
# Clear attentions as they have been attributed above.
if len(lowerCAmelCase_ ) == 2:
_snake_case = []
if len(lowerCAmelCase_ ):
_snake_case = renew_attention_paths(lowerCAmelCase_ )
_snake_case = {
'''old''': f"""output_blocks.{i}.1""",
'''new''': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}""",
}
_snake_case = {
f"""output_blocks.{i}.1.qkv.bias""": {
'''key''': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias""",
'''query''': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias""",
'''value''': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias""",
},
f"""output_blocks.{i}.1.qkv.weight""": {
'''key''': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight""",
'''query''': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight""",
'''value''': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight""",
},
}
assign_to_checkpoint(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , additional_replacements=[meta_path] , attention_paths_to_split=to_split if any('''qkv''' in key for key in attentions ) else None , config=lowerCAmelCase_ , )
else:
_snake_case = renew_resnet_paths(lowerCAmelCase_ , n_shave_prefix_segments=1 )
for path in resnet_0_paths:
_snake_case = '''.'''.join(['''output_blocks''', str(lowerCAmelCase_ ), path['''old''']] )
_snake_case = '''.'''.join(['''up_blocks''', str(lowerCAmelCase_ ), '''resnets''', str(lowerCAmelCase_ ), path['''new''']] )
_snake_case = checkpoint[old_path]
return new_checkpoint
if __name__ == "__main__":
snake_case = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the checkpoint to convert.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the architecture.''',
)
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
snake_case = parser.parse_args()
snake_case = torch.load(args.checkpoint_path)
with open(args.config_file) as f:
snake_case = json.loads(f.read())
snake_case = convert_ldm_checkpoint(checkpoint, config)
if "ldm" in config:
del config["ldm"]
snake_case = UNetaDModel(**config)
model.load_state_dict(converted_checkpoint)
try:
snake_case = DDPMScheduler.from_config('''/'''.join(args.checkpoint_path.split('''/''')[:-1]))
snake_case = VQModel.from_pretrained('''/'''.join(args.checkpoint_path.split('''/''')[:-1]))
snake_case = LDMPipeline(unet=model, scheduler=scheduler, vae=vqvae)
pipe.save_pretrained(args.dump_path)
except: # noqa: E722
model.save_pretrained(args.dump_path)
| 103 | import uuid
from typing import Any, Dict, List, Optional, Union
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
_lowercase : Optional[Any] =logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE_ :
'''simple docstring'''
def __init__( self : Tuple , SCREAMING_SNAKE_CASE__ : str = None , SCREAMING_SNAKE_CASE__ : uuid.UUID = None , SCREAMING_SNAKE_CASE__ : Optional[int]=None , SCREAMING_SNAKE_CASE__ : Union[str, Any]=None ) -> Any:
if not conversation_id:
A : int =uuid.uuida()
if past_user_inputs is None:
A : Tuple =[]
if generated_responses is None:
A : Any =[]
A : uuid.UUID =conversation_id
A : List[str] =past_user_inputs
A : List[str] =generated_responses
A : Optional[str] =text
def __eq__( self : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[int] ) -> int:
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
return False
if self.uuid == other.uuid:
return True
return (
self.new_user_input == other.new_user_input
and self.past_user_inputs == other.past_user_inputs
and self.generated_responses == other.generated_responses
)
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : bool = False ) -> Any:
if self.new_user_input:
if overwrite:
logger.warning(
f'User input added while unprocessed input was existing: "{self.new_user_input}" was overwritten '
f'with: "{text}".' )
A : Union[str, Any] =text
else:
logger.warning(
f'User input added while unprocessed input was existing: "{self.new_user_input}" new input '
f'ignored: "{text}". Set `overwrite` to True to overwrite unprocessed user input' )
else:
A : List[str] =text
def SCREAMING_SNAKE_CASE_ ( self : Any ) -> Optional[int]:
if self.new_user_input:
self.past_user_inputs.append(self.new_user_input )
A : List[str] =None
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : str ) -> List[Any]:
self.generated_responses.append(SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> Any:
for user_input, generated_response in zip(self.past_user_inputs , self.generated_responses ):
yield True, user_input
yield False, generated_response
if self.new_user_input:
yield True, self.new_user_input
def __repr__( self : Optional[Any] ) -> List[str]:
A : int =f'Conversation id: {self.uuid} \n'
for is_user, text in self.iter_texts():
A : List[str] ='user' if is_user else 'bot'
output += f'{name} >> {text} \n'
return output
@add_end_docstrings(
lowerCAmelCase_ , r"\n min_length_for_response (`int`, *optional*, defaults to 32):\n The minimum length (in number of tokens) for a response.\n minimum_tokens (`int`, *optional*, defaults to 10):\n The minimum length of tokens to leave for a response.\n " , )
class SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_ ):
'''simple docstring'''
def __init__( self : Optional[Any] , *SCREAMING_SNAKE_CASE__ : Any , **SCREAMING_SNAKE_CASE__ : int ) -> Any:
super().__init__(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
if self.tokenizer.pad_token_id is None:
A : Optional[int] =self.tokenizer.eos_token
def SCREAMING_SNAKE_CASE_ ( self : int , SCREAMING_SNAKE_CASE__ : Union[str, Any]=None , SCREAMING_SNAKE_CASE__ : Optional[int]=None , SCREAMING_SNAKE_CASE__ : List[str]=None , **SCREAMING_SNAKE_CASE__ : str ) -> int:
A : List[Any] ={}
A : str ={}
A : Any ={}
if min_length_for_response is not None:
A : Dict =min_length_for_response
if minimum_tokens is not None:
A : Union[str, Any] =minimum_tokens
if "max_length" in generate_kwargs:
A : str =generate_kwargs['max_length']
# self.max_length = generate_kwargs.get("max_length", self.model.config.max_length)
if clean_up_tokenization_spaces is not None:
A : Dict =clean_up_tokenization_spaces
if generate_kwargs:
forward_params.update(SCREAMING_SNAKE_CASE__ )
return preprocess_params, forward_params, postprocess_params
def __call__( self : int , SCREAMING_SNAKE_CASE__ : Union[Conversation, List[Conversation]] , SCREAMING_SNAKE_CASE__ : Tuple=0 , **SCREAMING_SNAKE_CASE__ : str ) -> Optional[Any]:
A : int =super().__call__(SCREAMING_SNAKE_CASE__ , num_workers=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and len(SCREAMING_SNAKE_CASE__ ) == 1:
return outputs[0]
return outputs
def SCREAMING_SNAKE_CASE_ ( self : str , SCREAMING_SNAKE_CASE__ : Conversation , SCREAMING_SNAKE_CASE__ : Optional[int]=32 ) -> Dict[str, Any]:
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
raise ValueError('ConversationalPipeline, expects Conversation as inputs' )
if conversation.new_user_input is None:
raise ValueError(
f'Conversation with UUID {type(conversation.uuid )} does not contain new user input to process. '
'Add user inputs with the conversation\'s `add_user_input` method' )
if hasattr(self.tokenizer , '_build_conversation_input_ids' ):
A : str =self.tokenizer._build_conversation_input_ids(SCREAMING_SNAKE_CASE__ )
else:
# If the tokenizer cannot handle conversations, we default to only the old version
A : Any =self._legacy_parse_and_tokenize(SCREAMING_SNAKE_CASE__ )
if self.framework == "pt":
A : str =torch.LongTensor([input_ids] )
elif self.framework == "tf":
A : List[Any] =tf.constant([input_ids] )
return {"input_ids": input_ids, "conversation": conversation}
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : int=10 , **SCREAMING_SNAKE_CASE__ : int ) -> Union[str, Any]:
A : List[Any] =generate_kwargs.get('max_length' , self.model.config.max_length )
A : Any =model_inputs['input_ids'].shape[1]
if max_length - minimum_tokens < n:
logger.warning(f'Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})' )
A : Dict =max_length - minimum_tokens
A : Dict =model_inputs['input_ids'][:, -trim:]
if "attention_mask" in model_inputs:
A : int =model_inputs['attention_mask'][:, -trim:]
A : Union[str, Any] =model_inputs.pop('conversation' )
A : Optional[int] =max_length
A : str =self.model.generate(**SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
if self.model.config.is_encoder_decoder:
A : str =1
else:
A : Any =n
return {"output_ids": output_ids[:, start_position:], "conversation": conversation}
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : str=True ) -> Optional[Any]:
A : Any =model_outputs['output_ids']
A : Dict =self.tokenizer.decode(
output_ids[0] , skip_special_tokens=SCREAMING_SNAKE_CASE__ , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE__ , )
A : int =model_outputs['conversation']
conversation.mark_processed()
conversation.append_response(SCREAMING_SNAKE_CASE__ )
return conversation
def SCREAMING_SNAKE_CASE_ ( self : Dict , SCREAMING_SNAKE_CASE__ : Conversation ) -> Dict:
A : List[str] =self.tokenizer.eos_token_id
A : str =[]
for is_user, text in conversation.iter_texts():
if eos_token_id is not None:
input_ids.extend(self.tokenizer.encode(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ ) + [eos_token_id] )
else:
input_ids.extend(self.tokenizer.encode(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ ) )
if len(SCREAMING_SNAKE_CASE__ ) > self.tokenizer.model_max_length:
A : Dict =input_ids[-self.tokenizer.model_max_length :]
return input_ids
| 305 | 0 |
import unittest
from transformers import AutoConfig, AutoTokenizer, BertConfig, TensorType, is_flax_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, slow
if is_flax_available():
import jax
from transformers.models.auto.modeling_flax_auto import FlaxAutoModel
from transformers.models.bert.modeling_flax_bert import FlaxBertModel
from transformers.models.roberta.modeling_flax_roberta import FlaxRobertaModel
@require_flax
class lowerCAmelCase__ ( unittest.TestCase ):
@slow
def _lowercase ( self : int):
for model_name in ["bert-base-cased", "bert-large-uncased"]:
with self.subTest(_A):
A__ : int = AutoConfig.from_pretrained(_A)
self.assertIsNotNone(_A)
self.assertIsInstance(_A , _A)
A__ : List[Any] = FlaxAutoModel.from_pretrained(_A)
self.assertIsNotNone(_A)
self.assertIsInstance(_A , _A)
@slow
def _lowercase ( self : Dict):
for model_name in ["roberta-base", "roberta-large"]:
with self.subTest(_A):
A__ : Optional[int] = AutoConfig.from_pretrained(_A)
self.assertIsNotNone(_A)
self.assertIsInstance(_A , _A)
A__ : Tuple = FlaxAutoModel.from_pretrained(_A)
self.assertIsNotNone(_A)
self.assertIsInstance(_A , _A)
@slow
def _lowercase ( self : str):
for model_name in ["bert-base-cased", "bert-large-uncased"]:
A__ : List[Any] = AutoTokenizer.from_pretrained(_A)
A__ : Optional[Any] = FlaxBertModel.from_pretrained(_A)
A__ : List[str] = tokenizer("Do you support jax jitted function?" , return_tensors=TensorType.JAX)
@jax.jit
def eval(**_A : int):
return model(**_A)
eval(**_A).block_until_ready()
@slow
def _lowercase ( self : int):
for model_name in ["roberta-base", "roberta-large"]:
A__ : Dict = AutoTokenizer.from_pretrained(_A)
A__ : int = FlaxRobertaModel.from_pretrained(_A)
A__ : Union[str, Any] = tokenizer("Do you support jax jitted function?" , return_tensors=TensorType.JAX)
@jax.jit
def eval(**_A : List[str]):
return model(**_A)
eval(**_A).block_until_ready()
def _lowercase ( self : List[str]):
with self.assertRaisesRegex(
_A , "bert-base is not a local folder and is not a valid model identifier"):
A__ : List[str] = FlaxAutoModel.from_pretrained("bert-base")
def _lowercase ( self : Optional[int]):
with self.assertRaisesRegex(
_A , r"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)"):
A__ : Optional[int] = FlaxAutoModel.from_pretrained(_A , revision="aaaaaa")
def _lowercase ( self : Optional[int]):
with self.assertRaisesRegex(
_A , "hf-internal-testing/config-no-model does not appear to have a file named flax_model.msgpack" , ):
A__ : Dict = FlaxAutoModel.from_pretrained("hf-internal-testing/config-no-model")
def _lowercase ( self : int):
with self.assertRaisesRegex(_A , "Use `from_pt=True` to load this model"):
A__ : Optional[int] = FlaxAutoModel.from_pretrained("hf-internal-testing/tiny-bert-pt-only") | 182 |
import os
import sys
from contextlib import contextmanager
# Windows only
if os.name == "nt":
import ctypes
import msvcrt # noqa
class lowerCAmelCase__ ( ctypes.Structure ):
# _fields is a specific attr expected by ctypes
__A : Optional[Any] = [('size', ctypes.c_int), ('visible', ctypes.c_byte)]
def snake_case__ ( ) -> List[Any]:
"""simple docstring"""
if os.name == "nt":
A__ : Optional[Any] = CursorInfo()
A__ : Tuple = ctypes.windll.kernelaa.GetStdHandle(-1_1 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(__lowercase , ctypes.byref(__lowercase ) )
A__ : Any = False
ctypes.windll.kernelaa.SetConsoleCursorInfo(__lowercase , ctypes.byref(__lowercase ) )
elif os.name == "posix":
sys.stdout.write("\033[?25l" )
sys.stdout.flush()
def snake_case__ ( ) -> Dict:
"""simple docstring"""
if os.name == "nt":
A__ : List[str] = CursorInfo()
A__ : Optional[int] = ctypes.windll.kernelaa.GetStdHandle(-1_1 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(__lowercase , ctypes.byref(__lowercase ) )
A__ : int = True
ctypes.windll.kernelaa.SetConsoleCursorInfo(__lowercase , ctypes.byref(__lowercase ) )
elif os.name == "posix":
sys.stdout.write("\033[?25h" )
sys.stdout.flush()
@contextmanager
def snake_case__ ( ) -> Optional[int]:
"""simple docstring"""
try:
hide_cursor()
yield
finally:
show_cursor() | 182 | 1 |
"""simple docstring"""
import argparse
import json
import logging
import os
import sys
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, get_gpu_count, slow
lowerCamelCase_ = [
os.path.join(os.path.dirname(__file__), dirname)
for dirname in [
'''text-classification''',
'''language-modeling''',
'''summarization''',
'''token-classification''',
'''question-answering''',
]
]
sys.path.extend(SRC_DIRS)
if SRC_DIRS is not None:
import run_clm_flax
import run_flax_glue
import run_flax_ner
import run_mlm_flax
import run_qa
import run_summarization_flax
import run_ta_mlm_flax
logging.basicConfig(level=logging.DEBUG)
lowerCamelCase_ = logging.getLogger()
def snake_case ( ):
UpperCAmelCase_ : int = argparse.ArgumentParser()
parser.add_argument("-f" )
UpperCAmelCase_ : Optional[Any] = parser.parse_args()
return args.f
def snake_case ( A__ ,A__="eval" ):
UpperCAmelCase_ : Union[str, Any] = os.path.join(A__ ,F"""{split}_results.json""" )
if os.path.exists(A__ ):
with open(A__ ,"r" ) as f:
return json.load(A__ )
raise ValueError(F"""can't find {path}""" )
lowerCamelCase_ = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class UpperCamelCase_ (__A ):
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[str]:
UpperCAmelCase_ : Dict = self.get_auto_remove_tmp_dir()
UpperCAmelCase_ : Any = f"""
run_glue.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--eval_steps=2
--warmup_steps=2
--seed=42
--max_seq_length=128
""".split()
with patch.object(lowerCAmelCase_ , "argv" , lowerCAmelCase_ ):
run_flax_glue.main()
UpperCAmelCase_ : List[str] = get_results(lowerCAmelCase_ )
self.assertGreaterEqual(result["eval_accuracy"] , 0.7_5 )
@slow
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Dict:
UpperCAmelCase_ : Dict = self.get_auto_remove_tmp_dir()
UpperCAmelCase_ : List[Any] = f"""
run_clm_flax.py
--model_name_or_path distilgpt2
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--block_size 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--num_train_epochs 2
--logging_steps 2 --eval_steps 2
--output_dir {tmp_dir}
--overwrite_output_dir
""".split()
with patch.object(lowerCAmelCase_ , "argv" , lowerCAmelCase_ ):
run_clm_flax.main()
UpperCAmelCase_ : Optional[Any] = get_results(lowerCAmelCase_ )
self.assertLess(result["eval_perplexity"] , 100 )
@slow
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Union[str, Any]:
UpperCAmelCase_ : Optional[Any] = self.get_auto_remove_tmp_dir()
UpperCAmelCase_ : List[Any] = f"""
run_summarization.py
--model_name_or_path t5-small
--train_file tests/fixtures/tests_samples/xsum/sample.json
--validation_file tests/fixtures/tests_samples/xsum/sample.json
--test_file tests/fixtures/tests_samples/xsum/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--num_train_epochs=3
--warmup_steps=8
--do_train
--do_eval
--do_predict
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--predict_with_generate
""".split()
with patch.object(lowerCAmelCase_ , "argv" , lowerCAmelCase_ ):
run_summarization_flax.main()
UpperCAmelCase_ : Dict = get_results(lowerCAmelCase_ , split="test" )
self.assertGreaterEqual(result["test_rouge1"] , 10 )
self.assertGreaterEqual(result["test_rouge2"] , 2 )
self.assertGreaterEqual(result["test_rougeL"] , 7 )
self.assertGreaterEqual(result["test_rougeLsum"] , 7 )
@slow
def _SCREAMING_SNAKE_CASE ( self : Any ) -> int:
UpperCAmelCase_ : List[Any] = self.get_auto_remove_tmp_dir()
UpperCAmelCase_ : List[str] = f"""
run_mlm.py
--model_name_or_path distilroberta-base
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--output_dir {tmp_dir}
--overwrite_output_dir
--max_seq_length 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--logging_steps 2 --eval_steps 2
--do_train
--do_eval
--num_train_epochs=1
""".split()
with patch.object(lowerCAmelCase_ , "argv" , lowerCAmelCase_ ):
run_mlm_flax.main()
UpperCAmelCase_ : Dict = get_results(lowerCAmelCase_ )
self.assertLess(result["eval_perplexity"] , 42 )
@slow
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Union[str, Any]:
UpperCAmelCase_ : Optional[Any] = self.get_auto_remove_tmp_dir()
UpperCAmelCase_ : Dict = f"""
run_t5_mlm_flax.py
--model_name_or_path t5-small
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--max_seq_length 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--num_train_epochs 2
--logging_steps 2 --eval_steps 2
--output_dir {tmp_dir}
--overwrite_output_dir
""".split()
with patch.object(lowerCAmelCase_ , "argv" , lowerCAmelCase_ ):
run_ta_mlm_flax.main()
UpperCAmelCase_ : str = get_results(lowerCAmelCase_ )
self.assertGreaterEqual(result["eval_accuracy"] , 0.4_2 )
@slow
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Any:
# with so little data distributed training needs more epochs to get the score on par with 0/1 gpu
UpperCAmelCase_ : List[Any] = 7 if get_gpu_count() > 1 else 2
UpperCAmelCase_ : Optional[int] = self.get_auto_remove_tmp_dir()
UpperCAmelCase_ : Dict = f"""
run_flax_ner.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/conll/sample.json
--validation_file tests/fixtures/tests_samples/conll/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--do_train
--do_eval
--warmup_steps=2
--learning_rate=2e-4
--logging_steps 2 --eval_steps 2
--per_device_train_batch_size=2
--per_device_eval_batch_size=2
--num_train_epochs={epochs}
--seed 7
""".split()
with patch.object(lowerCAmelCase_ , "argv" , lowerCAmelCase_ ):
run_flax_ner.main()
UpperCAmelCase_ : Tuple = get_results(lowerCAmelCase_ )
self.assertGreaterEqual(result["eval_accuracy"] , 0.7_5 )
self.assertGreaterEqual(result["eval_f1"] , 0.3 )
@slow
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[Any]:
UpperCAmelCase_ : Dict = self.get_auto_remove_tmp_dir()
UpperCAmelCase_ : int = f"""
run_qa.py
--model_name_or_path bert-base-uncased
--version_2_with_negative
--train_file tests/fixtures/tests_samples/SQUAD/sample.json
--validation_file tests/fixtures/tests_samples/SQUAD/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--num_train_epochs=3
--warmup_steps=2
--do_train
--do_eval
--logging_steps 2 --eval_steps 2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
""".split()
with patch.object(lowerCAmelCase_ , "argv" , lowerCAmelCase_ ):
run_qa.main()
UpperCAmelCase_ : List[str] = get_results(lowerCAmelCase_ )
self.assertGreaterEqual(result["eval_f1"] , 30 )
self.assertGreaterEqual(result["eval_exact"] , 30 )
| 95 | import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mvp import MvpTokenizer
a_ = logging.get_logger(__name__)
a_ = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
# See all MVP models at https://huggingface.co/models?filter=mvp
a_ = {
'vocab_file': {
'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/vocab.json',
},
'added_tokens.json': {
'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/added_tokens.json',
},
'merges_file': {
'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/merges.txt',
},
'tokenizer_file': {
'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/tokenizer.json',
},
}
a_ = {
'RUCAIBox/mvp': 1_024,
}
class _lowercase ( snake_case_ ):
lowercase = VOCAB_FILES_NAMES
lowercase = PRETRAINED_VOCAB_FILES_MAP
lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase = ['input_ids', 'attention_mask']
lowercase = MvpTokenizer
def __init__( self : int , snake_case : str=None , snake_case : int=None , snake_case : Optional[Any]=None , snake_case : Union[str, Any]="replace" , snake_case : Optional[int]="<s>" , snake_case : List[Any]="</s>" , snake_case : Dict="</s>" , snake_case : Tuple="<s>" , snake_case : Any="<unk>" , snake_case : Tuple="<pad>" , snake_case : List[str]="<mask>" , snake_case : int=False , snake_case : Tuple=True , **snake_case : Dict , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(
snake_case , snake_case , tokenizer_file=snake_case , errors=snake_case , bos_token=snake_case , eos_token=snake_case , sep_token=snake_case , cls_token=snake_case , unk_token=snake_case , pad_token=snake_case , mask_token=snake_case , add_prefix_space=snake_case , trim_offsets=snake_case , **snake_case , )
UpperCamelCase_ : List[Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , snake_case ) != add_prefix_space:
UpperCamelCase_ : Optional[int] = getattr(snake_case , pre_tok_state.pop('type' ) )
UpperCamelCase_ : Optional[Any] = add_prefix_space
UpperCamelCase_ : int = pre_tok_class(**snake_case )
UpperCamelCase_ : Any = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
UpperCamelCase_ : Optional[int] = 'post_processor'
UpperCamelCase_ : Optional[Any] = getattr(self.backend_tokenizer , snake_case , snake_case )
if tokenizer_component_instance:
UpperCamelCase_ : List[Any] = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
UpperCamelCase_ : Any = tuple(state['sep'] )
if "cls" in state:
UpperCamelCase_ : int = tuple(state['cls'] )
UpperCamelCase_ : Optional[int] = False
if state.get('add_prefix_space' , snake_case ) != add_prefix_space:
UpperCamelCase_ : Union[str, Any] = add_prefix_space
UpperCamelCase_ : Optional[int] = True
if state.get('trim_offsets' , snake_case ) != trim_offsets:
UpperCamelCase_ : Dict = trim_offsets
UpperCamelCase_ : Optional[int] = True
if changes_to_apply:
UpperCamelCase_ : str = getattr(snake_case , state.pop('type' ) )
UpperCamelCase_ : Union[str, Any] = component_class(**snake_case )
setattr(self.backend_tokenizer , snake_case , snake_case )
@property
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> str:
"""simple docstring"""
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.' )
return None
return str(self._mask_token )
@mask_token.setter
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , snake_case : List[Any] ) -> Tuple:
"""simple docstring"""
UpperCamelCase_ : Dict = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else value
UpperCamelCase_ : Tuple = value
def SCREAMING_SNAKE_CASE__ ( self : Any , *snake_case : int , **snake_case : Dict ) -> BatchEncoding:
"""simple docstring"""
UpperCamelCase_ : Optional[int] = kwargs.get('is_split_into_words' , snake_case )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
'to use it with pretokenized inputs.' )
return super()._batch_encode_plus(*snake_case , **snake_case )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , *snake_case : Optional[int] , **snake_case : int ) -> BatchEncoding:
"""simple docstring"""
UpperCamelCase_ : Optional[int] = kwargs.get('is_split_into_words' , snake_case )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
'to use it with pretokenized inputs.' )
return super()._encode_plus(*snake_case , **snake_case )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , snake_case : str , snake_case : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
UpperCamelCase_ : str = self._tokenizer.model.save(snake_case , name=snake_case )
return tuple(snake_case )
def SCREAMING_SNAKE_CASE__ ( self : str , snake_case : Union[str, Any] , snake_case : Union[str, Any]=None ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase_ : Tuple = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def SCREAMING_SNAKE_CASE__ ( self : Any , snake_case : List[int] , snake_case : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
UpperCamelCase_ : List[str] = [self.sep_token_id]
UpperCamelCase_ : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 417 | 0 |
'''simple docstring'''
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
__lowerCamelCase : Optional[Any] = {
"""Acehnese Arabic""": """ace_Arab""",
"""Acehnese Latin""": """ace_Latn""",
"""Mesopotamian Arabic""": """acm_Arab""",
"""Ta'izzi-Adeni Arabic""": """acq_Arab""",
"""Tunisian Arabic""": """aeb_Arab""",
"""Afrikaans""": """afr_Latn""",
"""South Levantine Arabic""": """ajp_Arab""",
"""Akan""": """aka_Latn""",
"""Amharic""": """amh_Ethi""",
"""North Levantine Arabic""": """apc_Arab""",
"""Modern Standard Arabic""": """arb_Arab""",
"""Modern Standard Arabic Romanized""": """arb_Latn""",
"""Najdi Arabic""": """ars_Arab""",
"""Moroccan Arabic""": """ary_Arab""",
"""Egyptian Arabic""": """arz_Arab""",
"""Assamese""": """asm_Beng""",
"""Asturian""": """ast_Latn""",
"""Awadhi""": """awa_Deva""",
"""Central Aymara""": """ayr_Latn""",
"""South Azerbaijani""": """azb_Arab""",
"""North Azerbaijani""": """azj_Latn""",
"""Bashkir""": """bak_Cyrl""",
"""Bambara""": """bam_Latn""",
"""Balinese""": """ban_Latn""",
"""Belarusian""": """bel_Cyrl""",
"""Bemba""": """bem_Latn""",
"""Bengali""": """ben_Beng""",
"""Bhojpuri""": """bho_Deva""",
"""Banjar Arabic""": """bjn_Arab""",
"""Banjar Latin""": """bjn_Latn""",
"""Standard Tibetan""": """bod_Tibt""",
"""Bosnian""": """bos_Latn""",
"""Buginese""": """bug_Latn""",
"""Bulgarian""": """bul_Cyrl""",
"""Catalan""": """cat_Latn""",
"""Cebuano""": """ceb_Latn""",
"""Czech""": """ces_Latn""",
"""Chokwe""": """cjk_Latn""",
"""Central Kurdish""": """ckb_Arab""",
"""Crimean Tatar""": """crh_Latn""",
"""Welsh""": """cym_Latn""",
"""Danish""": """dan_Latn""",
"""German""": """deu_Latn""",
"""Southwestern Dinka""": """dik_Latn""",
"""Dyula""": """dyu_Latn""",
"""Dzongkha""": """dzo_Tibt""",
"""Greek""": """ell_Grek""",
"""English""": """eng_Latn""",
"""Esperanto""": """epo_Latn""",
"""Estonian""": """est_Latn""",
"""Basque""": """eus_Latn""",
"""Ewe""": """ewe_Latn""",
"""Faroese""": """fao_Latn""",
"""Fijian""": """fij_Latn""",
"""Finnish""": """fin_Latn""",
"""Fon""": """fon_Latn""",
"""French""": """fra_Latn""",
"""Friulian""": """fur_Latn""",
"""Nigerian Fulfulde""": """fuv_Latn""",
"""Scottish Gaelic""": """gla_Latn""",
"""Irish""": """gle_Latn""",
"""Galician""": """glg_Latn""",
"""Guarani""": """grn_Latn""",
"""Gujarati""": """guj_Gujr""",
"""Haitian Creole""": """hat_Latn""",
"""Hausa""": """hau_Latn""",
"""Hebrew""": """heb_Hebr""",
"""Hindi""": """hin_Deva""",
"""Chhattisgarhi""": """hne_Deva""",
"""Croatian""": """hrv_Latn""",
"""Hungarian""": """hun_Latn""",
"""Armenian""": """hye_Armn""",
"""Igbo""": """ibo_Latn""",
"""Ilocano""": """ilo_Latn""",
"""Indonesian""": """ind_Latn""",
"""Icelandic""": """isl_Latn""",
"""Italian""": """ita_Latn""",
"""Javanese""": """jav_Latn""",
"""Japanese""": """jpn_Jpan""",
"""Kabyle""": """kab_Latn""",
"""Jingpho""": """kac_Latn""",
"""Kamba""": """kam_Latn""",
"""Kannada""": """kan_Knda""",
"""Kashmiri Arabic""": """kas_Arab""",
"""Kashmiri Devanagari""": """kas_Deva""",
"""Georgian""": """kat_Geor""",
"""Central Kanuri Arabic""": """knc_Arab""",
"""Central Kanuri Latin""": """knc_Latn""",
"""Kazakh""": """kaz_Cyrl""",
"""Kabiyè""": """kbp_Latn""",
"""Kabuverdianu""": """kea_Latn""",
"""Khmer""": """khm_Khmr""",
"""Kikuyu""": """kik_Latn""",
"""Kinyarwanda""": """kin_Latn""",
"""Kyrgyz""": """kir_Cyrl""",
"""Kimbundu""": """kmb_Latn""",
"""Northern Kurdish""": """kmr_Latn""",
"""Kikongo""": """kon_Latn""",
"""Korean""": """kor_Hang""",
"""Lao""": """lao_Laoo""",
"""Ligurian""": """lij_Latn""",
"""Limburgish""": """lim_Latn""",
"""Lingala""": """lin_Latn""",
"""Lithuanian""": """lit_Latn""",
"""Lombard""": """lmo_Latn""",
"""Latgalian""": """ltg_Latn""",
"""Luxembourgish""": """ltz_Latn""",
"""Luba-Kasai""": """lua_Latn""",
"""Ganda""": """lug_Latn""",
"""Luo""": """luo_Latn""",
"""Mizo""": """lus_Latn""",
"""Standard Latvian""": """lvs_Latn""",
"""Magahi""": """mag_Deva""",
"""Maithili""": """mai_Deva""",
"""Malayalam""": """mal_Mlym""",
"""Marathi""": """mar_Deva""",
"""Minangkabau Arabic """: """min_Arab""",
"""Minangkabau Latin""": """min_Latn""",
"""Macedonian""": """mkd_Cyrl""",
"""Plateau Malagasy""": """plt_Latn""",
"""Maltese""": """mlt_Latn""",
"""Meitei Bengali""": """mni_Beng""",
"""Halh Mongolian""": """khk_Cyrl""",
"""Mossi""": """mos_Latn""",
"""Maori""": """mri_Latn""",
"""Burmese""": """mya_Mymr""",
"""Dutch""": """nld_Latn""",
"""Norwegian Nynorsk""": """nno_Latn""",
"""Norwegian Bokmål""": """nob_Latn""",
"""Nepali""": """npi_Deva""",
"""Northern Sotho""": """nso_Latn""",
"""Nuer""": """nus_Latn""",
"""Nyanja""": """nya_Latn""",
"""Occitan""": """oci_Latn""",
"""West Central Oromo""": """gaz_Latn""",
"""Odia""": """ory_Orya""",
"""Pangasinan""": """pag_Latn""",
"""Eastern Panjabi""": """pan_Guru""",
"""Papiamento""": """pap_Latn""",
"""Western Persian""": """pes_Arab""",
"""Polish""": """pol_Latn""",
"""Portuguese""": """por_Latn""",
"""Dari""": """prs_Arab""",
"""Southern Pashto""": """pbt_Arab""",
"""Ayacucho Quechua""": """quy_Latn""",
"""Romanian""": """ron_Latn""",
"""Rundi""": """run_Latn""",
"""Russian""": """rus_Cyrl""",
"""Sango""": """sag_Latn""",
"""Sanskrit""": """san_Deva""",
"""Santali""": """sat_Olck""",
"""Sicilian""": """scn_Latn""",
"""Shan""": """shn_Mymr""",
"""Sinhala""": """sin_Sinh""",
"""Slovak""": """slk_Latn""",
"""Slovenian""": """slv_Latn""",
"""Samoan""": """smo_Latn""",
"""Shona""": """sna_Latn""",
"""Sindhi""": """snd_Arab""",
"""Somali""": """som_Latn""",
"""Southern Sotho""": """sot_Latn""",
"""Spanish""": """spa_Latn""",
"""Tosk Albanian""": """als_Latn""",
"""Sardinian""": """srd_Latn""",
"""Serbian""": """srp_Cyrl""",
"""Swati""": """ssw_Latn""",
"""Sundanese""": """sun_Latn""",
"""Swedish""": """swe_Latn""",
"""Swahili""": """swh_Latn""",
"""Silesian""": """szl_Latn""",
"""Tamil""": """tam_Taml""",
"""Tatar""": """tat_Cyrl""",
"""Telugu""": """tel_Telu""",
"""Tajik""": """tgk_Cyrl""",
"""Tagalog""": """tgl_Latn""",
"""Thai""": """tha_Thai""",
"""Tigrinya""": """tir_Ethi""",
"""Tamasheq Latin""": """taq_Latn""",
"""Tamasheq Tifinagh""": """taq_Tfng""",
"""Tok Pisin""": """tpi_Latn""",
"""Tswana""": """tsn_Latn""",
"""Tsonga""": """tso_Latn""",
"""Turkmen""": """tuk_Latn""",
"""Tumbuka""": """tum_Latn""",
"""Turkish""": """tur_Latn""",
"""Twi""": """twi_Latn""",
"""Central Atlas Tamazight""": """tzm_Tfng""",
"""Uyghur""": """uig_Arab""",
"""Ukrainian""": """ukr_Cyrl""",
"""Umbundu""": """umb_Latn""",
"""Urdu""": """urd_Arab""",
"""Northern Uzbek""": """uzn_Latn""",
"""Venetian""": """vec_Latn""",
"""Vietnamese""": """vie_Latn""",
"""Waray""": """war_Latn""",
"""Wolof""": """wol_Latn""",
"""Xhosa""": """xho_Latn""",
"""Eastern Yiddish""": """ydd_Hebr""",
"""Yoruba""": """yor_Latn""",
"""Yue Chinese""": """yue_Hant""",
"""Chinese Simplified""": """zho_Hans""",
"""Chinese Traditional""": """zho_Hant""",
"""Standard Malay""": """zsm_Latn""",
"""Zulu""": """zul_Latn""",
}
class A__ ( __snake_case ):
_UpperCAmelCase :int = 'facebook/nllb-200-distilled-600M'
_UpperCAmelCase :int = (
'This is a tool that translates text from a language to another. It takes three inputs: `text`, which should '
'be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, '
'which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in '
'plain English, such as \'Romanian\', or \'Albanian\'. It returns the text translated in `tgt_lang`.'
)
_UpperCAmelCase :List[Any] = 'translator'
_UpperCAmelCase :str = AutoTokenizer
_UpperCAmelCase :List[Any] = AutoModelForSeqaSeqLM
_UpperCAmelCase :int = LANGUAGE_CODES
_UpperCAmelCase :int = ['text', 'text', 'text']
_UpperCAmelCase :Any = ['text']
def __UpperCamelCase( self , A_ , A_ , A_ ):
'''simple docstring'''
if src_lang not in self.lang_to_code:
raise ValueError(F"""{src_lang} is not a supported language.""" )
if tgt_lang not in self.lang_to_code:
raise ValueError(F"""{tgt_lang} is not a supported language.""" )
UpperCamelCase : Any = self.lang_to_code[src_lang]
UpperCamelCase : List[str] = self.lang_to_code[tgt_lang]
return self.pre_processor._build_translation_inputs(
__a , return_tensors="pt" , src_lang=__a , tgt_lang=__a )
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
return self.model.generate(**__a )
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
return self.post_processor.decode(outputs[0].tolist() , skip_special_tokens=__a )
| 711 |
from math import ceil
from typing import List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
from ...utils import TensorType, logging
__lowerCamelCase : Dict = logging.get_logger(__name__)
class A__ ( __snake_case ):
_UpperCAmelCase :Tuple = ['audio_values', 'audio_mask']
def __init__( self , A_=2048 , A_=1 , A_=[16, 16] , A_=128 , A_=4_4100 , A_=86 , A_=2048 , A_=0.0 , **A_ , ):
'''simple docstring'''
super().__init__(
feature_size=A_ , sampling_rate=A_ , padding_value=A_ , **A_ , )
UpperCamelCase : Optional[int] = spectrogram_length
UpperCamelCase : Dict = num_channels
UpperCamelCase : Optional[Any] = patch_size
UpperCamelCase : str = feature_size // self.patch_size[1]
UpperCamelCase : List[str] = n_fft
UpperCamelCase : int = sampling_rate // hop_length_to_sampling_rate
UpperCamelCase : Optional[int] = sampling_rate
UpperCamelCase : int = padding_value
UpperCamelCase : str = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=A_ , min_frequency=0.0 , max_frequency=2_20_50.0 , sampling_rate=A_ , norm="slaney" , mel_scale="slaney" , ).T
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = spectrogram(
A_ , window_function(self.n_fft , "hann" ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel="dB" , db_range=80.0 , )
UpperCamelCase : List[Any] = log_spec[:, :-1]
UpperCamelCase : Optional[int] = log_spec - 20.0
UpperCamelCase : str = np.clip(log_spec / 40.0 , -2.0 , 0.0 ) + 1.0
return log_spec
def __call__( self , A_ , A_ = None , A_ = True , A_ = None , A_ = False , A_ = False , **A_ , ):
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
"This feature extractor is set to support sampling rate"
F""" of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled"""
F""" with {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
"It is strongly recommended to pass the `sampling_rate` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
UpperCamelCase : Optional[int] = isinstance(A_ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F"""Only mono-channel audio is supported for input to {self}""" )
UpperCamelCase : Union[str, Any] = is_batched_numpy or (
isinstance(A_ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
UpperCamelCase : int = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(A_ , np.ndarray ):
UpperCamelCase : str = np.asarray(A_ , dtype=np.floataa )
elif isinstance(A_ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
UpperCamelCase : List[Any] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
UpperCamelCase : Tuple = [np.asarray([raw_speech] ).T]
# Convert audio signals to log mel spectrograms, truncate by time axis
UpperCamelCase : str = [
self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech
]
if isinstance(audio_features[0] , A_ ):
UpperCamelCase : int = [np.asarray(A_ , dtype=np.floataa ) for feature in audio_features]
# Create audio attention mask
UpperCamelCase : List[str] = max(
[ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch
if return_attention_mask:
UpperCamelCase : str = [
(ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1]
+ (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0]
for feature in audio_features
]
UpperCamelCase : Tuple = np.array(A_ ).astype(np.floataa )
# convert into correct format for padding
UpperCamelCase : Union[str, Any] = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch
UpperCamelCase : Any = np.ones([len(A_ ), 1, max_time_len, self.feature_size] ).astype(np.floataa )
UpperCamelCase : List[str] = padded_audio_features * self.padding_value
for i in range(len(A_ ) ):
UpperCamelCase : Union[str, Any] = audio_features[i]
UpperCamelCase : Optional[int] = feature
# return as BatchFeature
if return_attention_mask:
UpperCamelCase : Optional[Any] = {"audio_values": padded_audio_features, "audio_mask": audio_mask}
else:
UpperCamelCase : int = {"audio_values": padded_audio_features}
UpperCamelCase : Any = BatchFeature(data=A_ , tensor_type=A_ )
return encoded_inputs
| 38 | 0 |
'''simple docstring'''
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
from PIL import Image
from ...utils import (
BaseOutput,
OptionalDependencyNotAvailable,
is_flax_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_onnx_available,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
@dataclass
class __a ( _snake_case ):
__UpperCamelCase : Union[List[PIL.Image.Image], np.ndarray]
__UpperCamelCase : Optional[List[bool]]
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_cycle_diffusion import CycleDiffusionPipeline
from .pipeline_stable_diffusion import StableDiffusionPipeline
from .pipeline_stable_diffusion_attend_and_excite import StableDiffusionAttendAndExcitePipeline
from .pipeline_stable_diffusion_imgaimg import StableDiffusionImgaImgPipeline
from .pipeline_stable_diffusion_inpaint import StableDiffusionInpaintPipeline
from .pipeline_stable_diffusion_inpaint_legacy import StableDiffusionInpaintPipelineLegacy
from .pipeline_stable_diffusion_instruct_pixapix import StableDiffusionInstructPixaPixPipeline
from .pipeline_stable_diffusion_latent_upscale import StableDiffusionLatentUpscalePipeline
from .pipeline_stable_diffusion_ldmad import StableDiffusionLDMaDPipeline
from .pipeline_stable_diffusion_model_editing import StableDiffusionModelEditingPipeline
from .pipeline_stable_diffusion_panorama import StableDiffusionPanoramaPipeline
from .pipeline_stable_diffusion_paradigms import StableDiffusionParadigmsPipeline
from .pipeline_stable_diffusion_sag import StableDiffusionSAGPipeline
from .pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline
from .pipeline_stable_unclip import StableUnCLIPPipeline
from .pipeline_stable_unclip_imgaimg import StableUnCLIPImgaImgPipeline
from .safety_checker import StableDiffusionSafetyChecker
from .stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.25.0")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import StableDiffusionImageVariationPipeline
else:
from .pipeline_stable_diffusion_image_variation import StableDiffusionImageVariationPipeline
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.26.0")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionPixaPixZeroPipeline,
)
else:
from .pipeline_stable_diffusion_depthaimg import StableDiffusionDepthaImgPipeline
from .pipeline_stable_diffusion_diffedit import StableDiffusionDiffEditPipeline
from .pipeline_stable_diffusion_pixapix_zero import StableDiffusionPixaPixZeroPipeline
try:
if not (
is_torch_available()
and is_transformers_available()
and is_k_diffusion_available()
and is_k_diffusion_version(">=", "0.0.12")
):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipeline_stable_diffusion_k_diffusion import StableDiffusionKDiffusionPipeline
try:
if not (is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_onnx_objects import * # noqa F403
else:
from .pipeline_onnx_stable_diffusion import OnnxStableDiffusionPipeline, StableDiffusionOnnxPipeline
from .pipeline_onnx_stable_diffusion_imgaimg import OnnxStableDiffusionImgaImgPipeline
from .pipeline_onnx_stable_diffusion_inpaint import OnnxStableDiffusionInpaintPipeline
from .pipeline_onnx_stable_diffusion_inpaint_legacy import OnnxStableDiffusionInpaintPipelineLegacy
from .pipeline_onnx_stable_diffusion_upscale import OnnxStableDiffusionUpscalePipeline
if is_transformers_available() and is_flax_available():
import flax
@flax.struct.dataclass
class __a ( _snake_case ):
__UpperCamelCase : np.ndarray
__UpperCamelCase : List[bool]
from ...schedulers.scheduling_pndm_flax import PNDMSchedulerState
from .pipeline_flax_stable_diffusion import FlaxStableDiffusionPipeline
from .pipeline_flax_stable_diffusion_imgaimg import FlaxStableDiffusionImgaImgPipeline
from .pipeline_flax_stable_diffusion_inpaint import FlaxStableDiffusionInpaintPipeline
from .safety_checker_flax import FlaxStableDiffusionSafetyChecker
| 109 |
from __future__ import annotations
def __snake_case ( __magic_name__ ):
'''simple docstring'''
if len(__magic_name__ ) == 0:
return []
lowercase , lowercase = min(__magic_name__ ), max(__magic_name__ )
lowercase = int(max_value - min_value ) + 1
lowercase = [[] for _ in range(__magic_name__ )]
for i in my_list:
buckets[int(i - min_value )].append(__magic_name__ )
return [v for bucket in buckets for v in sorted(__magic_name__ )]
if __name__ == "__main__":
from doctest import testmod
testmod()
assert bucket_sort([4, 5, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bucket_sort([0, 1, -10, 15, 2, -2]) == [-10, -2, 0, 1, 2, 15]
| 441 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_UpperCAmelCase : Optional[int] = logging.get_logger(__name__)
_UpperCAmelCase : int = {
'''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/config.json''',
'''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/config.json''',
'''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/config.json''',
'''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/config.json''',
'''roberta-base-openai-detector''': '''https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json''',
'''roberta-large-openai-detector''': '''https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json''',
}
class __magic_name__ ( __SCREAMING_SNAKE_CASE ):
UpperCamelCase__ = 'roberta'
def __init__( self , snake_case_=5_02_65 , snake_case_=7_68 , snake_case_=12 , snake_case_=12 , snake_case_=30_72 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=5_12 , snake_case_=2 , snake_case_=0.02 , snake_case_=1E-12 , snake_case_=1 , snake_case_=0 , snake_case_=2 , snake_case_="absolute" , snake_case_=True , snake_case_=None , **snake_case_ , ):
super().__init__(pad_token_id=snake_case_ , bos_token_id=snake_case_ , eos_token_id=snake_case_ , **snake_case_ )
lowercase =vocab_size
lowercase =hidden_size
lowercase =num_hidden_layers
lowercase =num_attention_heads
lowercase =hidden_act
lowercase =intermediate_size
lowercase =hidden_dropout_prob
lowercase =attention_probs_dropout_prob
lowercase =max_position_embeddings
lowercase =type_vocab_size
lowercase =initializer_range
lowercase =layer_norm_eps
lowercase =position_embedding_type
lowercase =use_cache
lowercase =classifier_dropout
class __magic_name__ ( __SCREAMING_SNAKE_CASE ):
@property
def _A( self ):
if self.task == "multiple-choice":
lowercase ={0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
lowercase ={0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 145 |
'''simple docstring'''
import argparse
import json
import subprocess
def UpperCamelCase ( lowercase_ : List[Any] , lowercase_ : Tuple ) -> Union[str, Any]:
'''simple docstring'''
lowercase =[]
lowercase =(
f'curl -H "Accept: application/vnd.github+json" -H "Authorization: Bearer {token}"'
''' https://api.github.com/repos/huggingface/transformers/actions/runners'''
)
lowercase =subprocess.run(lowercase_ , shell=lowercase_ , stdout=subprocess.PIPE )
lowercase =output.stdout.decode('''utf-8''' )
lowercase =json.loads(lowercase_ )
lowercase =status['''runners''']
for runner in runners:
if runner["name"] in target_runners:
if runner["status"] == "offline":
offline_runners.append(lowercase_ )
# save the result so we can report them on Slack
with open('''offline_runners.txt''' , '''w''' ) as fp:
fp.write(json.dumps(lowercase_ ) )
if len(lowercase_ ) > 0:
lowercase ='''\n'''.join([x['''name'''] for x in offline_runners] )
raise ValueError(f'The following runners are offline:\n{failed}' )
if __name__ == "__main__":
def UpperCamelCase ( lowercase_ : int ) -> Optional[int]:
'''simple docstring'''
return values.split(''',''' )
_UpperCAmelCase : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--target_runners''',
default=None,
type=list_str,
required=True,
help='''Comma-separated list of runners to check status.''',
)
parser.add_argument(
'''--token''', default=None, type=str, required=True, help='''A token that has actions:read permission.'''
)
_UpperCAmelCase : Optional[int] = parser.parse_args()
get_runner_status(args.target_runners, args.token)
| 145 | 1 |
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
UpperCAmelCase_ = '''
Hugging Face was founded in 2016 by French entrepreneurs Clément Delangue, Julien Chaumond, and Thomas Wolf originally as a company that developed a chatbot app targeted at teenagers.[2] After open-sourcing the model behind the chatbot, the company pivoted to focus on being a platform for machine learning.
In March 2021, Hugging Face raised $40 million in a Series B funding round.[3]
On April 28, 2021, the company launched the BigScience Research Workshop in collaboration with several other research groups to release an open large language model.[4] In 2022, the workshop concluded with the announcement of BLOOM, a multilingual large language model with 176 billion parameters.[5]
'''
class UpperCamelCase__ ( unittest.TestCase , UpperCAmelCase__ ):
'''simple docstring'''
def snake_case__ ( self ) -> Union[str, Any]:
"""simple docstring"""
lowercase_ : Tuple = load_tool("""text-question-answering""" )
self.tool.setup()
lowercase_ : Any = load_tool("""text-question-answering""", remote=snake_case__ )
def snake_case__ ( self ) -> Tuple:
"""simple docstring"""
lowercase_ : Union[str, Any] = self.tool(snake_case__, """What did Hugging Face do in April 2021?""" )
self.assertEqual(snake_case__, """launched the BigScience Research Workshop""" )
def snake_case__ ( self ) -> Optional[int]:
"""simple docstring"""
lowercase_ : Union[str, Any] = self.remote_tool(snake_case__, """What did Hugging Face do in April 2021?""" )
self.assertEqual(snake_case__, """launched the BigScience Research Workshop""" )
def snake_case__ ( self ) -> Tuple:
"""simple docstring"""
lowercase_ : List[str] = self.tool(text=snake_case__, question="""What did Hugging Face do in April 2021?""" )
self.assertEqual(snake_case__, """launched the BigScience Research Workshop""" )
def snake_case__ ( self ) -> List[Any]:
"""simple docstring"""
lowercase_ : List[str] = self.remote_tool(text=snake_case__, question="""What did Hugging Face do in April 2021?""" )
self.assertEqual(snake_case__, """launched the BigScience Research Workshop""" ) | 458 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ) -> int:
if n == 1 or not isinstance(_UpperCAmelCase ,_UpperCAmelCase ):
return 0
elif n == 2:
return 1
else:
_a : Dict =[0, 1]
for i in range(2 ,n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ) -> int:
_a : Union[str, Any] =0
_a : Optional[Any] =2
while digits < n:
index += 1
_a : Optional[int] =len(str(fibonacci(_UpperCAmelCase ) ) )
return index
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int = 1000 ) -> int:
return fibonacci_digits_index(_UpperCAmelCase )
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 694 | 0 |
import argparse
from collections import defaultdict
import yaml
A_ :int = '''docs/source/en/_toctree.yml'''
def A ( a_ ) -> Union[str, Any]:
__UpperCamelCase : str =defaultdict(a_ )
__UpperCamelCase : Tuple =[]
__UpperCamelCase : Optional[int] =[]
for doc in doc_list:
if "local" in doc:
counts[doc["local"]] += 1
if doc["title"].lower() == "overview":
overview_doc.append({'local': doc['local'], 'title': doc['title']} )
else:
new_doc_list.append(a_ )
__UpperCamelCase : Union[str, Any] =new_doc_list
__UpperCamelCase : str =[key for key, value in counts.items() if value > 1]
__UpperCamelCase : Optional[int] =[]
for duplicate_key in duplicates:
__UpperCamelCase : Union[str, Any] =list({doc['title'] for doc in doc_list if doc['local'] == duplicate_key} )
if len(a_ ) > 1:
raise ValueError(
F'{duplicate_key} is present several times in the documentation table of content at '
'`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the '
'others.' )
# Only add this once
new_doc.append({'local': duplicate_key, 'title': titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in doc_list if 'local' not in counts or counts[doc['local']] == 1] )
__UpperCamelCase : List[Any] =sorted(a_ ,key=lambda a_ : s["title"].lower() )
# "overview" gets special treatment and is always first
if len(a_ ) > 1:
raise ValueError('{doc_list} has two \'overview\' docs which is not allowed.' )
overview_doc.extend(a_ )
# Sort
return overview_doc
def A ( a_=False ) -> int:
with open(a_ ,encoding='utf-8' ) as f:
__UpperCamelCase : Optional[int] =yaml.safe_load(f.read() )
# Get to the API doc
__UpperCamelCase : Union[str, Any] =0
while content[api_idx]["title"] != "API":
api_idx += 1
__UpperCamelCase : Union[str, Any] =content[api_idx]['sections']
# Then to the model doc
__UpperCamelCase : str =0
while api_doc[scheduler_idx]["title"] != "Schedulers":
scheduler_idx += 1
__UpperCamelCase : Optional[Any] =api_doc[scheduler_idx]['sections']
__UpperCamelCase : Dict =clean_doc_toc(a_ )
__UpperCamelCase : List[Any] =False
if new_scheduler_doc != scheduler_doc:
__UpperCamelCase : List[Any] =True
if overwrite:
__UpperCamelCase : Tuple =new_scheduler_doc
if diff:
if overwrite:
__UpperCamelCase : Tuple =api_doc
with open(a_ ,'w' ,encoding='utf-8' ) as f:
f.write(yaml.dump(a_ ,allow_unicode=a_ ) )
else:
raise ValueError(
'The model doc part of the table of content is not properly sorted, run `make style` to fix this.' )
def A ( a_=False ) -> Optional[Any]:
with open(a_ ,encoding='utf-8' ) as f:
__UpperCamelCase : Union[str, Any] =yaml.safe_load(f.read() )
# Get to the API doc
__UpperCamelCase : List[str] =0
while content[api_idx]["title"] != "API":
api_idx += 1
__UpperCamelCase : str =content[api_idx]['sections']
# Then to the model doc
__UpperCamelCase : List[Any] =0
while api_doc[pipeline_idx]["title"] != "Pipelines":
pipeline_idx += 1
__UpperCamelCase : Dict =False
__UpperCamelCase : Optional[Any] =api_doc[pipeline_idx]['sections']
__UpperCamelCase : List[Any] =[]
# sort sub pipeline docs
for pipeline_doc in pipeline_docs:
if "section" in pipeline_doc:
__UpperCamelCase : Optional[Any] =pipeline_doc['section']
__UpperCamelCase : Any =clean_doc_toc(a_ )
if overwrite:
__UpperCamelCase : Optional[Any] =new_sub_pipeline_doc
new_pipeline_docs.append(a_ )
# sort overall pipeline doc
__UpperCamelCase : Optional[int] =clean_doc_toc(a_ )
if new_pipeline_docs != pipeline_docs:
__UpperCamelCase : List[str] =True
if overwrite:
__UpperCamelCase : str =new_pipeline_docs
if diff:
if overwrite:
__UpperCamelCase : Dict =api_doc
with open(a_ ,'w' ,encoding='utf-8' ) as f:
f.write(yaml.dump(a_ ,allow_unicode=a_ ) )
else:
raise ValueError(
'The model doc part of the table of content is not properly sorted, run `make style` to fix this.' )
if __name__ == "__main__":
A_ :List[Any] = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
A_ :Optional[Any] = parser.parse_args()
check_scheduler_doc(args.fix_and_overwrite)
check_pipeline_doc(args.fix_and_overwrite)
| 154 |
import argparse
import os
import torch
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
A_ :Dict = {
'''sample_size''': 32,
'''in_channels''': 3,
'''out_channels''': 3,
'''layers_per_block''': 2,
'''num_class_embeds''': 1000,
'''block_out_channels''': [32, 64],
'''attention_head_dim''': 8,
'''down_block_types''': [
'''ResnetDownsampleBlock2D''',
'''AttnDownBlock2D''',
],
'''up_block_types''': [
'''AttnUpBlock2D''',
'''ResnetUpsampleBlock2D''',
],
'''resnet_time_scale_shift''': '''scale_shift''',
'''upsample_type''': '''resnet''',
'''downsample_type''': '''resnet''',
}
A_ :List[str] = {
'''sample_size''': 64,
'''in_channels''': 3,
'''out_channels''': 3,
'''layers_per_block''': 3,
'''num_class_embeds''': 1000,
'''block_out_channels''': [192, 192 * 2, 192 * 3, 192 * 4],
'''attention_head_dim''': 64,
'''down_block_types''': [
'''ResnetDownsampleBlock2D''',
'''AttnDownBlock2D''',
'''AttnDownBlock2D''',
'''AttnDownBlock2D''',
],
'''up_block_types''': [
'''AttnUpBlock2D''',
'''AttnUpBlock2D''',
'''AttnUpBlock2D''',
'''ResnetUpsampleBlock2D''',
],
'''resnet_time_scale_shift''': '''scale_shift''',
'''upsample_type''': '''resnet''',
'''downsample_type''': '''resnet''',
}
A_ :Dict = {
'''sample_size''': 256,
'''in_channels''': 3,
'''out_channels''': 3,
'''layers_per_block''': 2,
'''num_class_embeds''': None,
'''block_out_channels''': [256, 256, 256 * 2, 256 * 2, 256 * 4, 256 * 4],
'''attention_head_dim''': 64,
'''down_block_types''': [
'''ResnetDownsampleBlock2D''',
'''ResnetDownsampleBlock2D''',
'''ResnetDownsampleBlock2D''',
'''AttnDownBlock2D''',
'''AttnDownBlock2D''',
'''AttnDownBlock2D''',
],
'''up_block_types''': [
'''AttnUpBlock2D''',
'''AttnUpBlock2D''',
'''AttnUpBlock2D''',
'''ResnetUpsampleBlock2D''',
'''ResnetUpsampleBlock2D''',
'''ResnetUpsampleBlock2D''',
],
'''resnet_time_scale_shift''': '''default''',
'''upsample_type''': '''resnet''',
'''downsample_type''': '''resnet''',
}
A_ :Tuple = {
'''num_train_timesteps''': 40,
'''sigma_min''': 0.002,
'''sigma_max''': 80.0,
}
A_ :List[str] = {
'''num_train_timesteps''': 201,
'''sigma_min''': 0.002,
'''sigma_max''': 80.0,
}
A_ :int = {
'''num_train_timesteps''': 151,
'''sigma_min''': 0.002,
'''sigma_max''': 80.0,
}
def A ( a_ ) -> Optional[Any]:
if isinstance(a_ ,a_ ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError('boolean value expected' )
def A ( a_ ,a_ ,a_ ,a_ ,a_=False ) -> Dict:
__UpperCamelCase : str =checkpoint[F'{old_prefix}.in_layers.0.weight']
__UpperCamelCase : Optional[Any] =checkpoint[F'{old_prefix}.in_layers.0.bias']
__UpperCamelCase : Dict =checkpoint[F'{old_prefix}.in_layers.2.weight']
__UpperCamelCase : Dict =checkpoint[F'{old_prefix}.in_layers.2.bias']
__UpperCamelCase : int =checkpoint[F'{old_prefix}.emb_layers.1.weight']
__UpperCamelCase : str =checkpoint[F'{old_prefix}.emb_layers.1.bias']
__UpperCamelCase : List[Any] =checkpoint[F'{old_prefix}.out_layers.0.weight']
__UpperCamelCase : List[str] =checkpoint[F'{old_prefix}.out_layers.0.bias']
__UpperCamelCase : str =checkpoint[F'{old_prefix}.out_layers.3.weight']
__UpperCamelCase : Any =checkpoint[F'{old_prefix}.out_layers.3.bias']
if has_skip:
__UpperCamelCase : Union[str, Any] =checkpoint[F'{old_prefix}.skip_connection.weight']
__UpperCamelCase : Dict =checkpoint[F'{old_prefix}.skip_connection.bias']
return new_checkpoint
def A ( a_ ,a_ ,a_ ,a_ ,a_=None ) -> int:
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase : List[str] =checkpoint[F'{old_prefix}.qkv.weight'].chunk(3 ,dim=0 )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase : List[Any] =checkpoint[F'{old_prefix}.qkv.bias'].chunk(3 ,dim=0 )
__UpperCamelCase : Tuple =checkpoint[F'{old_prefix}.norm.weight']
__UpperCamelCase : Optional[Any] =checkpoint[F'{old_prefix}.norm.bias']
__UpperCamelCase : Union[str, Any] =weight_q.squeeze(-1 ).squeeze(-1 )
__UpperCamelCase : Union[str, Any] =bias_q.squeeze(-1 ).squeeze(-1 )
__UpperCamelCase : Dict =weight_k.squeeze(-1 ).squeeze(-1 )
__UpperCamelCase : Optional[int] =bias_k.squeeze(-1 ).squeeze(-1 )
__UpperCamelCase : str =weight_v.squeeze(-1 ).squeeze(-1 )
__UpperCamelCase : Optional[Any] =bias_v.squeeze(-1 ).squeeze(-1 )
__UpperCamelCase : Tuple =(
checkpoint[F'{old_prefix}.proj_out.weight'].squeeze(-1 ).squeeze(-1 )
)
__UpperCamelCase : Union[str, Any] =checkpoint[F'{old_prefix}.proj_out.bias'].squeeze(-1 ).squeeze(-1 )
return new_checkpoint
def A ( a_ ,a_ ) -> Union[str, Any]:
__UpperCamelCase : Tuple =torch.load(a_ ,map_location='cpu' )
__UpperCamelCase : List[str] ={}
__UpperCamelCase : Any =checkpoint['time_embed.0.weight']
__UpperCamelCase : int =checkpoint['time_embed.0.bias']
__UpperCamelCase : Tuple =checkpoint['time_embed.2.weight']
__UpperCamelCase : List[str] =checkpoint['time_embed.2.bias']
if unet_config["num_class_embeds"] is not None:
__UpperCamelCase : List[str] =checkpoint['label_emb.weight']
__UpperCamelCase : Tuple =checkpoint['input_blocks.0.0.weight']
__UpperCamelCase : str =checkpoint['input_blocks.0.0.bias']
__UpperCamelCase : Optional[Any] =unet_config['down_block_types']
__UpperCamelCase : Optional[int] =unet_config['layers_per_block']
__UpperCamelCase : int =unet_config['attention_head_dim']
__UpperCamelCase : List[str] =unet_config['block_out_channels']
__UpperCamelCase : Tuple =1
__UpperCamelCase : Union[str, Any] =channels_list[0]
for i, layer_type in enumerate(a_ ):
__UpperCamelCase : List[Any] =channels_list[i]
__UpperCamelCase : Optional[int] =current_channels != prev_channels
if layer_type == "ResnetDownsampleBlock2D":
for j in range(a_ ):
__UpperCamelCase : Dict =F'down_blocks.{i}.resnets.{j}'
__UpperCamelCase : Any =F'input_blocks.{current_layer}.0'
__UpperCamelCase : Any =True if j == 0 and downsample_block_has_skip else False
__UpperCamelCase : Optional[int] =convert_resnet(a_ ,a_ ,a_ ,a_ ,has_skip=a_ )
current_layer += 1
elif layer_type == "AttnDownBlock2D":
for j in range(a_ ):
__UpperCamelCase : Optional[Any] =F'down_blocks.{i}.resnets.{j}'
__UpperCamelCase : Union[str, Any] =F'input_blocks.{current_layer}.0'
__UpperCamelCase : Any =True if j == 0 and downsample_block_has_skip else False
__UpperCamelCase : List[str] =convert_resnet(a_ ,a_ ,a_ ,a_ ,has_skip=a_ )
__UpperCamelCase : Union[str, Any] =F'down_blocks.{i}.attentions.{j}'
__UpperCamelCase : Optional[Any] =F'input_blocks.{current_layer}.1'
__UpperCamelCase : Optional[Any] =convert_attention(
a_ ,a_ ,a_ ,a_ ,a_ )
current_layer += 1
if i != len(a_ ) - 1:
__UpperCamelCase : List[Any] =F'down_blocks.{i}.downsamplers.0'
__UpperCamelCase : int =F'input_blocks.{current_layer}.0'
__UpperCamelCase : str =convert_resnet(a_ ,a_ ,a_ ,a_ )
current_layer += 1
__UpperCamelCase : Optional[Any] =current_channels
# hardcoded the mid-block for now
__UpperCamelCase : Any ='mid_block.resnets.0'
__UpperCamelCase : List[str] ='middle_block.0'
__UpperCamelCase : Union[str, Any] =convert_resnet(a_ ,a_ ,a_ ,a_ )
__UpperCamelCase : List[Any] ='mid_block.attentions.0'
__UpperCamelCase : Optional[Any] ='middle_block.1'
__UpperCamelCase : List[Any] =convert_attention(a_ ,a_ ,a_ ,a_ ,a_ )
__UpperCamelCase : str ='mid_block.resnets.1'
__UpperCamelCase : Optional[Any] ='middle_block.2'
__UpperCamelCase : int =convert_resnet(a_ ,a_ ,a_ ,a_ )
__UpperCamelCase : List[Any] =0
__UpperCamelCase : Optional[int] =unet_config['up_block_types']
for i, layer_type in enumerate(a_ ):
if layer_type == "ResnetUpsampleBlock2D":
for j in range(layers_per_block + 1 ):
__UpperCamelCase : Any =F'up_blocks.{i}.resnets.{j}'
__UpperCamelCase : Dict =F'output_blocks.{current_layer}.0'
__UpperCamelCase : Dict =convert_resnet(a_ ,a_ ,a_ ,a_ ,has_skip=a_ )
current_layer += 1
if i != len(a_ ) - 1:
__UpperCamelCase : List[str] =F'up_blocks.{i}.upsamplers.0'
__UpperCamelCase : Optional[Any] =F'output_blocks.{current_layer-1}.1'
__UpperCamelCase : Union[str, Any] =convert_resnet(a_ ,a_ ,a_ ,a_ )
elif layer_type == "AttnUpBlock2D":
for j in range(layers_per_block + 1 ):
__UpperCamelCase : Optional[Any] =F'up_blocks.{i}.resnets.{j}'
__UpperCamelCase : Optional[Any] =F'output_blocks.{current_layer}.0'
__UpperCamelCase : str =convert_resnet(a_ ,a_ ,a_ ,a_ ,has_skip=a_ )
__UpperCamelCase : Optional[int] =F'up_blocks.{i}.attentions.{j}'
__UpperCamelCase : Tuple =F'output_blocks.{current_layer}.1'
__UpperCamelCase : str =convert_attention(
a_ ,a_ ,a_ ,a_ ,a_ )
current_layer += 1
if i != len(a_ ) - 1:
__UpperCamelCase : Optional[Any] =F'up_blocks.{i}.upsamplers.0'
__UpperCamelCase : str =F'output_blocks.{current_layer-1}.2'
__UpperCamelCase : str =convert_resnet(a_ ,a_ ,a_ ,a_ )
__UpperCamelCase : Any =checkpoint['out.0.weight']
__UpperCamelCase : Any =checkpoint['out.0.bias']
__UpperCamelCase : Optional[Any] =checkpoint['out.2.weight']
__UpperCamelCase : List[Any] =checkpoint['out.2.bias']
return new_checkpoint
if __name__ == "__main__":
A_ :int = argparse.ArgumentParser()
parser.add_argument('''--unet_path''', default=None, type=str, required=True, help='''Path to the unet.pt to convert.''')
parser.add_argument(
'''--dump_path''', default=None, type=str, required=True, help='''Path to output the converted UNet model.'''
)
parser.add_argument('''--class_cond''', default=True, type=str, help='''Whether the model is class-conditional.''')
A_ :List[Any] = parser.parse_args()
A_ :Tuple = strabool(args.class_cond)
A_ :List[str] = os.path.basename(args.unet_path)
print(f"Checkpoint: {ckpt_name}")
# Get U-Net config
if "imagenet64" in ckpt_name:
A_ :Optional[int] = IMAGENET_64_UNET_CONFIG
elif "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
A_ :List[Any] = LSUN_256_UNET_CONFIG
elif "test" in ckpt_name:
A_ :Optional[int] = TEST_UNET_CONFIG
else:
raise ValueError(f"Checkpoint type {ckpt_name} is not currently supported.")
if not args.class_cond:
A_ :List[str] = None
A_ :List[str] = con_pt_to_diffuser(args.unet_path, unet_config)
A_ :str = UNetaDModel(**unet_config)
image_unet.load_state_dict(converted_unet_ckpt)
# Get scheduler config
if "cd" in ckpt_name or "test" in ckpt_name:
A_ :List[str] = CD_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "imagenet64" in ckpt_name:
A_ :Optional[int] = CT_IMAGENET_64_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
A_ :Dict = CT_LSUN_256_SCHEDULER_CONFIG
else:
raise ValueError(f"Checkpoint type {ckpt_name} is not currently supported.")
A_ :Optional[Any] = CMStochasticIterativeScheduler(**scheduler_config)
A_ :int = ConsistencyModelPipeline(unet=image_unet, scheduler=cm_scheduler)
consistency_model.save_pretrained(args.dump_path)
| 154 | 1 |
from ..utils import DummyObject, requires_backends
class UpperCAmelCase_ ( metaclass=__lowercase ):
"""simple docstring"""
UpperCAmelCase__ : List[Any] = ["flax", "transformers"]
def __init__( self , *_a , **_a ) -> Dict:
requires_backends(self , ['''flax''', '''transformers'''] )
@classmethod
def __lowercase ( cls , *_a , **_a ) -> Optional[int]:
requires_backends(cls , ['''flax''', '''transformers'''] )
@classmethod
def __lowercase ( cls , *_a , **_a ) -> Union[str, Any]:
requires_backends(cls , ['''flax''', '''transformers'''] )
class UpperCAmelCase_ ( metaclass=__lowercase ):
"""simple docstring"""
UpperCAmelCase__ : str = ["flax", "transformers"]
def __init__( self , *_a , **_a ) -> Union[str, Any]:
requires_backends(self , ['''flax''', '''transformers'''] )
@classmethod
def __lowercase ( cls , *_a , **_a ) -> List[str]:
requires_backends(cls , ['''flax''', '''transformers'''] )
@classmethod
def __lowercase ( cls , *_a , **_a ) -> Tuple:
requires_backends(cls , ['''flax''', '''transformers'''] )
class UpperCAmelCase_ ( metaclass=__lowercase ):
"""simple docstring"""
UpperCAmelCase__ : List[str] = ["flax", "transformers"]
def __init__( self , *_a , **_a ) -> str:
requires_backends(self , ['''flax''', '''transformers'''] )
@classmethod
def __lowercase ( cls , *_a , **_a ) -> Optional[int]:
requires_backends(cls , ['''flax''', '''transformers'''] )
@classmethod
def __lowercase ( cls , *_a , **_a ) -> Dict:
requires_backends(cls , ['''flax''', '''transformers'''] )
class UpperCAmelCase_ ( metaclass=__lowercase ):
"""simple docstring"""
UpperCAmelCase__ : List[Any] = ["flax", "transformers"]
def __init__( self , *_a , **_a ) -> str:
requires_backends(self , ['''flax''', '''transformers'''] )
@classmethod
def __lowercase ( cls , *_a , **_a ) -> Tuple:
requires_backends(cls , ['''flax''', '''transformers'''] )
@classmethod
def __lowercase ( cls , *_a , **_a ) -> Optional[Any]:
requires_backends(cls , ['''flax''', '''transformers'''] )
| 14 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import is_speech_available, is_vision_available
from transformers.testing_utils import require_torch
if is_vision_available():
from transformers import TvltImageProcessor
if is_speech_available():
from transformers import TvltFeatureExtractor
from transformers import TvltProcessor
@require_torch
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self ) -> int:
_a : Dict = '''ZinengTang/tvlt-base'''
_a : List[str] = tempfile.mkdtemp()
def __lowercase ( self , **_a ) -> int:
return TvltImageProcessor.from_pretrained(self.checkpoint , **_a )
def __lowercase ( self , **_a ) -> List[Any]:
return TvltFeatureExtractor.from_pretrained(self.checkpoint , **_a )
def __lowercase ( self ) -> Optional[int]:
shutil.rmtree(self.tmpdirname )
def __lowercase ( self ) -> Dict:
_a : Union[str, Any] = self.get_image_processor()
_a : Dict = self.get_feature_extractor()
_a : Optional[int] = TvltProcessor(image_processor=_a , feature_extractor=_a )
processor.save_pretrained(self.tmpdirname )
_a : Any = TvltProcessor.from_pretrained(self.tmpdirname )
self.assertIsInstance(processor.feature_extractor , _a )
self.assertIsInstance(processor.image_processor , _a )
def __lowercase ( self ) -> Any:
_a : Optional[Any] = self.get_image_processor()
_a : Dict = self.get_feature_extractor()
_a : Dict = TvltProcessor(image_processor=_a , feature_extractor=_a )
_a : Union[str, Any] = np.ones([1_2_0_0_0] )
_a : Dict = feature_extractor(_a , return_tensors='''np''' )
_a : Tuple = processor(audio=_a , return_tensors='''np''' )
for key in audio_dict.keys():
self.assertAlmostEqual(audio_dict[key].sum() , input_processor[key].sum() , delta=1e-2 )
def __lowercase ( self ) -> int:
_a : Optional[Any] = self.get_image_processor()
_a : Union[str, Any] = self.get_feature_extractor()
_a : Optional[Any] = TvltProcessor(image_processor=_a , feature_extractor=_a )
_a : List[Any] = np.ones([3, 2_2_4, 2_2_4] )
_a : int = image_processor(_a , return_tensors='''np''' )
_a : Optional[int] = processor(images=_a , return_tensors='''np''' )
for key in image_dict.keys():
self.assertAlmostEqual(image_dict[key].sum() , input_processor[key].sum() , delta=1e-2 )
def __lowercase ( self ) -> Union[str, Any]:
_a : int = self.get_image_processor()
_a : Union[str, Any] = self.get_feature_extractor()
_a : Any = TvltProcessor(image_processor=_a , feature_extractor=_a )
_a : List[str] = np.ones([1_2_0_0_0] )
_a : Optional[int] = np.ones([3, 2_2_4, 2_2_4] )
_a : int = processor(audio=_a , images=_a )
self.assertListEqual(list(inputs.keys() ) , ['''audio_values''', '''audio_mask''', '''pixel_values''', '''pixel_mask'''] )
# test if it raises when no input is passed
with pytest.raises(_a ):
processor()
def __lowercase ( self ) -> Union[str, Any]:
_a : str = self.get_image_processor()
_a : Union[str, Any] = self.get_feature_extractor()
_a : Dict = TvltProcessor(image_processor=_a , feature_extractor=_a )
self.assertListEqual(
processor.model_input_names , image_processor.model_input_names + feature_extractor.model_input_names , msg='''`processor` and `image_processor`+`feature_extractor` model input names do not match''' , )
| 14 | 1 |
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import torch
from ...utils import logging, randn_tensor
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
_a : Optional[int]= logging.get_logger(__name__) # pylint: disable=invalid-name
class UpperCamelCase ( lowercase ):
def __init__(self : List[str] , _A : Optional[int] , _A : Tuple) -> Any:
super().__init__()
self.register_modules(unet=_A , scheduler=_A)
@torch.no_grad()
def __call__(self : Union[str, Any] , _A : int = 1 , _A : int = 1_00 , _A : Optional[Union[torch.Generator, List[torch.Generator]]] = None , _A : Optional[float] = None , _A : bool = True , ) -> Union[AudioPipelineOutput, Tuple]:
if audio_length_in_s is None:
__snake_case : Tuple = self.unet.config.sample_size / self.unet.config.sample_rate
__snake_case : Optional[int] = audio_length_in_s * self.unet.config.sample_rate
__snake_case : List[str] = 2 ** len(self.unet.up_blocks)
if sample_size < 3 * down_scale_factor:
raise ValueError(
f"{audio_length_in_s} is too small. Make sure it's bigger or equal to"
f" {3 * down_scale_factor / self.unet.config.sample_rate}.")
__snake_case : Optional[Any] = int(_A)
if sample_size % down_scale_factor != 0:
__snake_case : Dict = (
(audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1
) * down_scale_factor
logger.info(
f"{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled"
f" by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising"
' process.')
__snake_case : str = int(_A)
__snake_case : Any = next(iter(self.unet.parameters())).dtype
__snake_case : List[Any] = (batch_size, self.unet.config.in_channels, sample_size)
if isinstance(_A , _A) and len(_A) != batch_size:
raise ValueError(
f"You have passed a list of generators of length {len(_A)}, but requested an effective batch"
f" size of {batch_size}. Make sure the batch size matches the length of the generators.")
__snake_case : Any = randn_tensor(_A , generator=_A , device=self.device , dtype=_A)
# set step values
self.scheduler.set_timesteps(_A , device=audio.device)
__snake_case : Optional[Any] = self.scheduler.timesteps.to(_A)
for t in self.progress_bar(self.scheduler.timesteps):
# 1. predict noise model_output
__snake_case : Optional[Any] = self.unet(_A , _A).sample
# 2. compute previous image: x_t -> t_t-1
__snake_case : int = self.scheduler.step(_A , _A , _A).prev_sample
__snake_case : Tuple = audio.clamp(-1 , 1).float().cpu().numpy()
__snake_case : List[Any] = audio[:, :, :original_sample_size]
if not return_dict:
return (audio,)
return AudioPipelineOutput(audios=_A)
| 701 | """simple docstring"""
import argparse
from collections import defaultdict
import yaml
_a : Tuple= "docs/source/en/_toctree.yml"
def __UpperCAmelCase ( UpperCAmelCase_ : Optional[Any] ) -> Dict:
'''simple docstring'''
__snake_case : Union[str, Any] = defaultdict(UpperCAmelCase_ )
__snake_case : List[str] = []
__snake_case : Tuple = []
for doc in doc_list:
if "local" in doc:
counts[doc["local"]] += 1
if doc["title"].lower() == "overview":
overview_doc.append({'local': doc['local'], 'title': doc['title']} )
else:
new_doc_list.append(UpperCAmelCase_ )
__snake_case : Any = new_doc_list
__snake_case : Dict = [key for key, value in counts.items() if value > 1]
__snake_case : Optional[int] = []
for duplicate_key in duplicates:
__snake_case : Tuple = list({doc['title'] for doc in doc_list if doc['local'] == duplicate_key} )
if len(UpperCAmelCase_ ) > 1:
raise ValueError(
F"{duplicate_key} is present several times in the documentation table of content at "
'`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the '
'others.' )
# Only add this once
new_doc.append({'local': duplicate_key, 'title': titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in doc_list if 'local' not in counts or counts[doc['local']] == 1] )
__snake_case : List[Any] = sorted(UpperCAmelCase_ , key=lambda UpperCAmelCase_ : s["title"].lower() )
# "overview" gets special treatment and is always first
if len(UpperCAmelCase_ ) > 1:
raise ValueError('{doc_list} has two \'overview\' docs which is not allowed.' )
overview_doc.extend(UpperCAmelCase_ )
# Sort
return overview_doc
def __UpperCAmelCase ( UpperCAmelCase_ : Optional[int]=False ) -> str:
'''simple docstring'''
with open(UpperCAmelCase_ , encoding='utf-8' ) as f:
__snake_case : List[Any] = yaml.safe_load(f.read() )
# Get to the API doc
__snake_case : str = 0
while content[api_idx]["title"] != "API":
api_idx += 1
__snake_case : Union[str, Any] = content[api_idx]['sections']
# Then to the model doc
__snake_case : Dict = 0
while api_doc[scheduler_idx]["title"] != "Schedulers":
scheduler_idx += 1
__snake_case : Optional[Any] = api_doc[scheduler_idx]['sections']
__snake_case : Tuple = clean_doc_toc(UpperCAmelCase_ )
__snake_case : Tuple = False
if new_scheduler_doc != scheduler_doc:
__snake_case : Optional[int] = True
if overwrite:
__snake_case : Tuple = new_scheduler_doc
if diff:
if overwrite:
__snake_case : Optional[int] = api_doc
with open(UpperCAmelCase_ , 'w' , encoding='utf-8' ) as f:
f.write(yaml.dump(UpperCAmelCase_ , allow_unicode=UpperCAmelCase_ ) )
else:
raise ValueError(
'The model doc part of the table of content is not properly sorted, run `make style` to fix this.' )
def __UpperCAmelCase ( UpperCAmelCase_ : Any=False ) -> Union[str, Any]:
'''simple docstring'''
with open(UpperCAmelCase_ , encoding='utf-8' ) as f:
__snake_case : Dict = yaml.safe_load(f.read() )
# Get to the API doc
__snake_case : Dict = 0
while content[api_idx]["title"] != "API":
api_idx += 1
__snake_case : Dict = content[api_idx]['sections']
# Then to the model doc
__snake_case : Dict = 0
while api_doc[pipeline_idx]["title"] != "Pipelines":
pipeline_idx += 1
__snake_case : Optional[int] = False
__snake_case : Optional[Any] = api_doc[pipeline_idx]['sections']
__snake_case : Tuple = []
# sort sub pipeline docs
for pipeline_doc in pipeline_docs:
if "section" in pipeline_doc:
__snake_case : List[str] = pipeline_doc['section']
__snake_case : Optional[int] = clean_doc_toc(UpperCAmelCase_ )
if overwrite:
__snake_case : List[Any] = new_sub_pipeline_doc
new_pipeline_docs.append(UpperCAmelCase_ )
# sort overall pipeline doc
__snake_case : List[Any] = clean_doc_toc(UpperCAmelCase_ )
if new_pipeline_docs != pipeline_docs:
__snake_case : Dict = True
if overwrite:
__snake_case : Tuple = new_pipeline_docs
if diff:
if overwrite:
__snake_case : Union[str, Any] = api_doc
with open(UpperCAmelCase_ , 'w' , encoding='utf-8' ) as f:
f.write(yaml.dump(UpperCAmelCase_ , allow_unicode=UpperCAmelCase_ ) )
else:
raise ValueError(
'The model doc part of the table of content is not properly sorted, run `make style` to fix this.' )
if __name__ == "__main__":
_a : Optional[int]= argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
_a : List[str]= parser.parse_args()
check_scheduler_doc(args.fix_and_overwrite)
check_pipeline_doc(args.fix_and_overwrite)
| 192 | 0 |
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotSmallConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
lowercase = '''platform'''
import jax
import jax.numpy as jnp
from transformers.models.blenderbot_small.modeling_flax_blenderbot_small import (
FlaxBlenderbotSmallForConditionalGeneration,
FlaxBlenderbotSmallModel,
shift_tokens_right,
)
def __lowerCAmelCase ( UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Any=None , UpperCAmelCase__ : Optional[Any]=None , UpperCAmelCase__ : List[str]=None , UpperCAmelCase__ : Optional[int]=None , UpperCAmelCase__ : List[str]=None , UpperCAmelCase__ : Dict=None , ) -> List[Any]:
if attention_mask is None:
lowerCamelCase_ = np.where(input_ids != config.pad_token_id , 1 , 0 )
if decoder_attention_mask is None:
lowerCamelCase_ = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 )
if head_mask is None:
lowerCamelCase_ = np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
lowerCamelCase_ = np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
lowerCamelCase_ = np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class __A:
def __init__( self : str , __UpperCamelCase : Optional[int] , __UpperCamelCase : str=1_3 , __UpperCamelCase : List[str]=7 , __UpperCamelCase : Tuple=True , __UpperCamelCase : List[Any]=False , __UpperCamelCase : Dict=9_9 , __UpperCamelCase : Tuple=1_6 , __UpperCamelCase : Any=2 , __UpperCamelCase : Dict=4 , __UpperCamelCase : List[str]=4 , __UpperCamelCase : Optional[Any]="gelu" , __UpperCamelCase : List[str]=0.1 , __UpperCamelCase : str=0.1 , __UpperCamelCase : Union[str, Any]=3_2 , __UpperCamelCase : str=2 , __UpperCamelCase : Tuple=1 , __UpperCamelCase : Optional[Any]=0 , __UpperCamelCase : str=0.02 , ):
lowerCamelCase_ = parent
lowerCamelCase_ = batch_size
lowerCamelCase_ = seq_length
lowerCamelCase_ = is_training
lowerCamelCase_ = use_labels
lowerCamelCase_ = vocab_size
lowerCamelCase_ = hidden_size
lowerCamelCase_ = num_hidden_layers
lowerCamelCase_ = num_attention_heads
lowerCamelCase_ = intermediate_size
lowerCamelCase_ = hidden_act
lowerCamelCase_ = hidden_dropout_prob
lowerCamelCase_ = attention_probs_dropout_prob
lowerCamelCase_ = max_position_embeddings
lowerCamelCase_ = eos_token_id
lowerCamelCase_ = pad_token_id
lowerCamelCase_ = bos_token_id
lowerCamelCase_ = initializer_range
def lowercase__ ( self : str ):
lowerCamelCase_ = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size )
lowerCamelCase_ = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 )
lowerCamelCase_ = shift_tokens_right(__UpperCamelCase , 1 , 2 )
lowerCamelCase_ = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=__UpperCamelCase , )
lowerCamelCase_ = prepare_blenderbot_inputs_dict(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
return config, inputs_dict
def lowercase__ ( self : List[str] ):
lowerCamelCase_ , lowerCamelCase_ = self.prepare_config_and_inputs()
return config, inputs_dict
def lowercase__ ( self : Tuple , __UpperCamelCase : int , __UpperCamelCase : List[str] , __UpperCamelCase : str ):
lowerCamelCase_ = 2_0
lowerCamelCase_ = model_class_name(__UpperCamelCase )
lowerCamelCase_ = model.encode(inputs_dict["""input_ids"""] )
lowerCamelCase_ , lowerCamelCase_ = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
lowerCamelCase_ = model.init_cache(decoder_input_ids.shape[0] , __UpperCamelCase , __UpperCamelCase )
lowerCamelCase_ = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="""i4""" )
lowerCamelCase_ = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
lowerCamelCase_ = model.decode(
decoder_input_ids[:, :-1] , __UpperCamelCase , decoder_attention_mask=__UpperCamelCase , past_key_values=__UpperCamelCase , decoder_position_ids=__UpperCamelCase , )
lowerCamelCase_ = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" )
lowerCamelCase_ = model.decode(
decoder_input_ids[:, -1:] , __UpperCamelCase , decoder_attention_mask=__UpperCamelCase , past_key_values=outputs_cache.past_key_values , decoder_position_ids=__UpperCamelCase , )
lowerCamelCase_ = model.decode(__UpperCamelCase , __UpperCamelCase )
lowerCamelCase_ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F'''Max diff is {diff}''' )
def lowercase__ ( self : Optional[int] , __UpperCamelCase : Any , __UpperCamelCase : Optional[Any] , __UpperCamelCase : List[str] ):
lowerCamelCase_ = 2_0
lowerCamelCase_ = model_class_name(__UpperCamelCase )
lowerCamelCase_ = model.encode(inputs_dict["""input_ids"""] )
lowerCamelCase_ , lowerCamelCase_ = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
lowerCamelCase_ = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
lowerCamelCase_ = model.init_cache(decoder_input_ids.shape[0] , __UpperCamelCase , __UpperCamelCase )
lowerCamelCase_ = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
lowerCamelCase_ = model.decode(
decoder_input_ids[:, :-1] , __UpperCamelCase , decoder_attention_mask=__UpperCamelCase , past_key_values=__UpperCamelCase , decoder_position_ids=__UpperCamelCase , )
lowerCamelCase_ = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" )
lowerCamelCase_ = model.decode(
decoder_input_ids[:, -1:] , __UpperCamelCase , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=__UpperCamelCase , decoder_position_ids=__UpperCamelCase , )
lowerCamelCase_ = model.decode(__UpperCamelCase , __UpperCamelCase , decoder_attention_mask=__UpperCamelCase )
lowerCamelCase_ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F'''Max diff is {diff}''' )
@require_flax
class __A( unittest.TestCase ):
SCREAMING_SNAKE_CASE = 9_9
def lowercase__ ( self : Dict ):
lowerCamelCase_ = np.array(
[
[7_1, 8_2, 1_8, 3_3, 4_6, 9_1, 2],
[6_8, 3_4, 2_6, 5_8, 3_0, 8_2, 2],
[5, 9_7, 1_7, 3_9, 9_4, 4_0, 2],
[7_6, 8_3, 9_4, 2_5, 7_0, 7_8, 2],
[8_7, 5_9, 4_1, 3_5, 4_8, 6_6, 2],
[5_5, 1_3, 1_6, 5_8, 5, 2, 1], # note padding
[6_4, 2_7, 3_1, 5_1, 1_2, 7_5, 2],
[5_2, 6_4, 8_6, 1_7, 8_3, 3_9, 2],
[4_8, 6_1, 9, 2_4, 7_1, 8_2, 2],
[2_6, 1, 6_0, 4_8, 2_2, 1_3, 2],
[2_1, 5, 6_2, 2_8, 1_4, 7_6, 2],
[4_5, 9_8, 3_7, 8_6, 5_9, 4_8, 2],
[7_0, 7_0, 5_0, 9, 2_8, 0, 2],
] , dtype=np.intaa , )
lowerCamelCase_ = input_ids.shape[0]
lowerCamelCase_ = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=2_4 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=3_2 , decoder_ffn_dim=3_2 , max_position_embeddings=4_8 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def lowercase__ ( self : List[Any] ):
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = self._get_config_and_data()
lowerCamelCase_ = FlaxBlenderbotSmallForConditionalGeneration(__UpperCamelCase )
lowerCamelCase_ = lm_model(input_ids=__UpperCamelCase )
lowerCamelCase_ = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs["""logits"""].shape , __UpperCamelCase )
def lowercase__ ( self : Tuple ):
lowerCamelCase_ = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=1_4 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=4_8 , )
lowerCamelCase_ = FlaxBlenderbotSmallForConditionalGeneration(__UpperCamelCase )
lowerCamelCase_ = np.array([[7_1, 8_2, 1_8, 3_3, 4_6, 9_1, 2], [6_8, 3_4, 2_6, 5_8, 3_0, 2, 1]] , dtype=np.intaa )
lowerCamelCase_ = np.array([[8_2, 7_1, 8_2, 1_8, 2], [5_8, 6_8, 2, 1, 1]] , dtype=np.intaa )
lowerCamelCase_ = lm_model(input_ids=__UpperCamelCase , decoder_input_ids=__UpperCamelCase )
lowerCamelCase_ = (*summary.shape, config.vocab_size)
self.assertEqual(outputs["""logits"""].shape , __UpperCamelCase )
def lowercase__ ( self : int ):
lowerCamelCase_ = np.array([[7_1, 8_2, 1_8, 3_3, 2, 1, 1], [6_8, 3_4, 2_6, 5_8, 3_0, 8_2, 2]] , dtype=np.intaa )
lowerCamelCase_ = shift_tokens_right(__UpperCamelCase , 1 , 2 )
lowerCamelCase_ = np.equal(__UpperCamelCase , 1 ).astype(np.floataa ).sum()
lowerCamelCase_ = np.equal(__UpperCamelCase , 1 ).astype(np.floataa ).sum()
self.assertEqual(shifted.shape , input_ids.shape )
self.assertEqual(__UpperCamelCase , n_pad_before - 1 )
self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() )
@require_flax
class __A( UpperCAmelCase , unittest.TestCase , UpperCAmelCase ):
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = (
(
FlaxBlenderbotSmallModel,
FlaxBlenderbotSmallForConditionalGeneration,
)
if is_flax_available()
else ()
)
SCREAMING_SNAKE_CASE = (FlaxBlenderbotSmallForConditionalGeneration,) if is_flax_available() else ()
def lowercase__ ( self : Optional[Any] ):
lowerCamelCase_ = FlaxBlenderbotSmallModelTester(self )
def lowercase__ ( self : str ):
lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
def lowercase__ ( self : Optional[int] ):
lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
def lowercase__ ( self : List[str] ):
lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
lowerCamelCase_ = self._prepare_for_class(__UpperCamelCase , __UpperCamelCase )
lowerCamelCase_ = model_class(__UpperCamelCase )
@jax.jit
def encode_jitted(__UpperCamelCase : List[Any] , __UpperCamelCase : Dict=None , **__UpperCamelCase : Tuple ):
return model.encode(input_ids=__UpperCamelCase , attention_mask=__UpperCamelCase )
with self.subTest("""JIT Enabled""" ):
lowerCamelCase_ = encode_jitted(**__UpperCamelCase ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
lowerCamelCase_ = encode_jitted(**__UpperCamelCase ).to_tuple()
self.assertEqual(len(__UpperCamelCase ) , len(__UpperCamelCase ) )
for jitted_output, output in zip(__UpperCamelCase , __UpperCamelCase ):
self.assertEqual(jitted_output.shape , output.shape )
def lowercase__ ( self : Any ):
lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
lowerCamelCase_ = model_class(__UpperCamelCase )
lowerCamelCase_ = model.encode(inputs_dict["""input_ids"""] , inputs_dict["""attention_mask"""] )
lowerCamelCase_ = {
"""decoder_input_ids""": inputs_dict["""decoder_input_ids"""],
"""decoder_attention_mask""": inputs_dict["""decoder_attention_mask"""],
"""encoder_outputs""": encoder_outputs,
}
@jax.jit
def decode_jitted(__UpperCamelCase : int , __UpperCamelCase : Optional[int] , __UpperCamelCase : Union[str, Any] ):
return model.decode(
decoder_input_ids=__UpperCamelCase , decoder_attention_mask=__UpperCamelCase , encoder_outputs=__UpperCamelCase , )
with self.subTest("""JIT Enabled""" ):
lowerCamelCase_ = decode_jitted(**__UpperCamelCase ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
lowerCamelCase_ = decode_jitted(**__UpperCamelCase ).to_tuple()
self.assertEqual(len(__UpperCamelCase ) , len(__UpperCamelCase ) )
for jitted_output, output in zip(__UpperCamelCase , __UpperCamelCase ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def lowercase__ ( self : str ):
for model_class_name in self.all_model_classes:
lowerCamelCase_ = model_class_name.from_pretrained("""facebook/blenderbot_small-90M""" )
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
lowerCamelCase_ = np.ones((1, 1) ) * model.config.eos_token_id
lowerCamelCase_ = model(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
| 272 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
lowercase = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = ['''MLukeTokenizer''']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mluke import MLukeTokenizer
else:
import sys
lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 272 | 1 |
import json
import os
from functools import lru_cache
from typing import TYPE_CHECKING, List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__lowerCAmelCase : List[Any] = logging.get_logger(__name__)
__lowerCAmelCase : List[str] = {
"vocab_file": "vocab.json",
"merges_file": "merges.txt",
"tokenizer_config_file": "tokenizer_config.json",
}
__lowerCAmelCase : Optional[int] = {
"vocab_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json"},
"merges_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt"},
"tokenizer_config_file": {
"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json"
},
}
__lowerCAmelCase : Tuple = {"facebook/blenderbot-3B": 128}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def UpperCAmelCase_ ( ) -> Tuple:
__lowercase : List[Any] = (
list(range(ord('''!''' ) , ord('''~''' ) + 1 ) ) + list(range(ord('''¡''' ) , ord('''¬''' ) + 1 ) ) + list(range(ord('''®''' ) , ord('''ÿ''' ) + 1 ) )
)
__lowercase : str = bs[:]
__lowercase : Tuple = 0
for b in range(2**8 ):
if b not in bs:
bs.append(__lowerCAmelCase )
cs.append(2**8 + n )
n += 1
__lowercase : Tuple = [chr(__lowerCAmelCase ) for n in cs]
return dict(zip(__lowerCAmelCase , __lowerCAmelCase ) )
def UpperCAmelCase_ ( __lowerCAmelCase ) -> Dict:
__lowercase : Optional[int] = set()
__lowercase : Tuple = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__lowercase : str = char
return pairs
class __lowerCAmelCase ( lowerCAmelCase_ ):
"""simple docstring"""
A__ : int = VOCAB_FILES_NAMES
A__ : Any = PRETRAINED_VOCAB_FILES_MAP
A__ : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ : Dict = ['''input_ids''', '''attention_mask''']
def __init__( self : Tuple , _snake_case : List[Any] , _snake_case : Dict , _snake_case : Union[str, Any]="replace" , _snake_case : Tuple="<s>" , _snake_case : Optional[int]="</s>" , _snake_case : Optional[Any]="</s>" , _snake_case : str="<s>" , _snake_case : Dict="<unk>" , _snake_case : Union[str, Any]="<pad>" , _snake_case : Union[str, Any]="<mask>" , _snake_case : Optional[Any]=False , **_snake_case : int , ):
__lowercase : Optional[int] = AddedToken(_snake_case , lstrip=_snake_case , rstrip=_snake_case ) if isinstance(_snake_case , _snake_case ) else bos_token
__lowercase : Optional[int] = AddedToken(_snake_case , lstrip=_snake_case , rstrip=_snake_case ) if isinstance(_snake_case , _snake_case ) else eos_token
__lowercase : List[Any] = AddedToken(_snake_case , lstrip=_snake_case , rstrip=_snake_case ) if isinstance(_snake_case , _snake_case ) else sep_token
__lowercase : Tuple = AddedToken(_snake_case , lstrip=_snake_case , rstrip=_snake_case ) if isinstance(_snake_case , _snake_case ) else cls_token
__lowercase : Dict = AddedToken(_snake_case , lstrip=_snake_case , rstrip=_snake_case ) if isinstance(_snake_case , _snake_case ) else unk_token
__lowercase : List[Any] = AddedToken(_snake_case , lstrip=_snake_case , rstrip=_snake_case ) if isinstance(_snake_case , _snake_case ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
__lowercase : Optional[Any] = AddedToken(_snake_case , lstrip=_snake_case , rstrip=_snake_case ) if isinstance(_snake_case , _snake_case ) else mask_token
super().__init__(
errors=_snake_case , bos_token=_snake_case , eos_token=_snake_case , unk_token=_snake_case , sep_token=_snake_case , cls_token=_snake_case , pad_token=_snake_case , mask_token=_snake_case , add_prefix_space=_snake_case , **_snake_case , )
with open(_snake_case , encoding='''utf-8''' ) as vocab_handle:
__lowercase : Union[str, Any] = json.load(_snake_case )
__lowercase : Optional[Any] = {v: k for k, v in self.encoder.items()}
__lowercase : List[str] = errors # how to handle errors in decoding
__lowercase : Tuple = bytes_to_unicode()
__lowercase : str = {v: k for k, v in self.byte_encoder.items()}
with open(_snake_case , encoding='''utf-8''' ) as merges_handle:
__lowercase : str = merges_handle.read().split('''\n''' )[1:-1]
__lowercase : Optional[Any] = [tuple(merge.split() ) for merge in bpe_merges]
__lowercase : Any = dict(zip(_snake_case , range(len(_snake_case ) ) ) )
__lowercase : Union[str, Any] = {}
__lowercase : int = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
__lowercase : Dict = re.compile(r'''\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+''' )
@property
# Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Blenderbot, RoBERTa->Blenderbot
def snake_case_ ( self : List[str] ):
return len(self.encoder )
def snake_case_ ( self : Optional[int] ):
return dict(self.encoder , **self.added_tokens_encoder )
def snake_case_ ( self : str , _snake_case : int ):
if token in self.cache:
return self.cache[token]
__lowercase : Optional[int] = tuple(_snake_case )
__lowercase : Union[str, Any] = get_pairs(_snake_case )
if not pairs:
return token
while True:
__lowercase : List[str] = min(_snake_case , key=lambda _snake_case : self.bpe_ranks.get(_snake_case , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
__lowercase , __lowercase : Dict = bigram
__lowercase : Union[str, Any] = []
__lowercase : List[str] = 0
while i < len(_snake_case ):
try:
__lowercase : Any = word.index(_snake_case , _snake_case )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
__lowercase : Dict = j
if word[i] == first and i < len(_snake_case ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__lowercase : Any = tuple(_snake_case )
__lowercase : List[Any] = new_word
if len(_snake_case ) == 1:
break
else:
__lowercase : Any = get_pairs(_snake_case )
__lowercase : str = ''' '''.join(_snake_case )
__lowercase : Tuple = word
return word
def snake_case_ ( self : List[Any] , _snake_case : int ):
__lowercase : List[str] = []
for token in re.findall(self.pat , _snake_case ):
__lowercase : Optional[Any] = ''''''.join(
self.byte_encoder[b] for b in token.encode('''utf-8''' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(_snake_case ).split(''' ''' ) )
return bpe_tokens
def snake_case_ ( self : Dict , _snake_case : Union[str, Any] ):
return self.encoder.get(_snake_case , self.encoder.get(self.unk_token ) )
def snake_case_ ( self : Dict , _snake_case : List[Any] ):
return self.decoder.get(_snake_case )
def snake_case_ ( self : Any , _snake_case : Any ):
__lowercase : List[str] = ''''''.join(_snake_case )
__lowercase : List[Any] = bytearray([self.byte_decoder[c] for c in text] ).decode('''utf-8''' , errors=self.errors )
return text
def snake_case_ ( self : int , _snake_case : str , _snake_case : Optional[str] = None ):
if not os.path.isdir(_snake_case ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
__lowercase : Any = os.path.join(
_snake_case , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
__lowercase : Optional[Any] = os.path.join(
_snake_case , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(_snake_case , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=_snake_case , ensure_ascii=_snake_case ) + '''\n''' )
__lowercase : Any = 0
with open(_snake_case , '''w''' , encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda _snake_case : kv[1] ):
if index != token_index:
logger.warning(
F'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
''' Please check that the tokenizer is not corrupted!''' )
__lowercase : List[Any] = token_index
writer.write(''' '''.join(_snake_case ) + '''\n''' )
index += 1
return vocab_file, merge_file
def snake_case_ ( self : str , _snake_case : List[int] , _snake_case : Optional[List[int]] = None , _snake_case : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_snake_case , token_ids_a=_snake_case , already_has_special_tokens=_snake_case )
if token_ids_a is None:
return [1] + ([0] * len(_snake_case )) + [1]
return [1] + ([0] * len(_snake_case )) + [1, 1] + ([0] * len(_snake_case )) + [1]
def snake_case_ ( self : Union[str, Any] , _snake_case : List[int] , _snake_case : Optional[List[int]] = None ):
__lowercase : int = [self.sep_token_id]
__lowercase : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def snake_case_ ( self : Dict , _snake_case : Optional[int] , _snake_case : Union[str, Any]=False , **_snake_case : int ):
__lowercase : Tuple = kwargs.pop('''add_prefix_space''' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(_snake_case ) > 0 and not text[0].isspace()):
__lowercase : List[Any] = ''' ''' + text
return (text, kwargs)
def snake_case_ ( self : Tuple , _snake_case : List[int] , _snake_case : Optional[List[int]] = None ):
return token_ids_a + [self.eos_token_id]
def snake_case_ ( self : int , _snake_case : "Conversation" ):
__lowercase : Tuple = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(''' ''' + text )
else:
# Generated responses should contain them already.
inputs.append(_snake_case )
__lowercase : Union[str, Any] = ''' '''.join(_snake_case )
__lowercase : Tuple = self.encode(_snake_case )
if len(_snake_case ) > self.model_max_length:
__lowercase : str = input_ids[-self.model_max_length :]
logger.warning(F'Trimmed input from conversation as it was longer than {self.model_max_length} tokens.' )
return input_ids
| 284 |
import warnings
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__)
__lowerCAmelCase : Optional[int] = {
"nvidia/segformer-b0-finetuned-ade-512-512": (
"https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json"
),
# See all SegFormer models at https://huggingface.co/models?filter=segformer
}
class __lowerCAmelCase ( lowerCAmelCase_ ):
"""simple docstring"""
A__ : Dict = '''segformer'''
def __init__( self : Optional[Any] , _snake_case : List[str]=3 , _snake_case : Union[str, Any]=4 , _snake_case : str=[2, 2, 2, 2] , _snake_case : Dict=[8, 4, 2, 1] , _snake_case : List[Any]=[32, 64, 160, 256] , _snake_case : Dict=[7, 3, 3, 3] , _snake_case : List[Any]=[4, 2, 2, 2] , _snake_case : Tuple=[1, 2, 5, 8] , _snake_case : Optional[Any]=[4, 4, 4, 4] , _snake_case : Optional[int]="gelu" , _snake_case : Optional[int]=0.0 , _snake_case : Optional[int]=0.0 , _snake_case : int=0.1 , _snake_case : Any=0.02 , _snake_case : Optional[int]=0.1 , _snake_case : Union[str, Any]=1E-6 , _snake_case : Tuple=256 , _snake_case : str=255 , **_snake_case : Any , ):
super().__init__(**_snake_case )
if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False:
warnings.warn(
'''Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be'''
''' removed, as the behaviour will default to that of reshape_last_stage = True.''' , _snake_case , )
__lowercase : Any = num_channels
__lowercase : Optional[Any] = num_encoder_blocks
__lowercase : List[str] = depths
__lowercase : str = sr_ratios
__lowercase : List[str] = hidden_sizes
__lowercase : List[str] = patch_sizes
__lowercase : Dict = strides
__lowercase : Optional[int] = mlp_ratios
__lowercase : List[str] = num_attention_heads
__lowercase : Optional[int] = hidden_act
__lowercase : List[Any] = hidden_dropout_prob
__lowercase : Optional[Any] = attention_probs_dropout_prob
__lowercase : List[Any] = classifier_dropout_prob
__lowercase : str = initializer_range
__lowercase : Optional[int] = drop_path_rate
__lowercase : List[str] = layer_norm_eps
__lowercase : List[str] = decoder_hidden_size
__lowercase : Union[str, Any] = kwargs.get('''reshape_last_stage''' , _snake_case )
__lowercase : str = semantic_loss_ignore_index
class __lowerCAmelCase ( lowerCAmelCase_ ):
"""simple docstring"""
A__ : Union[str, Any] = version.parse('''1.11''' )
@property
def snake_case_ ( self : Dict ):
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def snake_case_ ( self : int ):
return 1E-4
@property
def snake_case_ ( self : List[Any] ):
return 12
| 284 | 1 |
'''simple docstring'''
import inspect
import os
import sys
import unittest
import accelerate
from accelerate.test_utils import execute_subprocess_async, require_tpu
class __UpperCamelCase ( unittest.TestCase ):
def a__ ( self :int ):
snake_case_ : Dict = inspect.getfile(accelerate.test_utils )
snake_case_ : Optional[Any] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["""scripts""", """test_script.py"""] )
snake_case_ : Union[str, Any] = os.path.sep.join(inspect.getfile(self.__class__ ).split(os.path.sep )[:-1] )
@require_tpu
def a__ ( self :int ):
snake_case_ : Tuple = F'''\n {self.test_dir}/xla_spawn.py\n --num_cores 8\n {self.test_file_path}\n '''.split()
snake_case_ : Optional[int] = [sys.executable] + distributed_args
execute_subprocess_async(_UpperCAmelCase ,env=os.environ.copy() ) | 334 |
'''simple docstring'''
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class SCREAMING_SNAKE_CASE :
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=13 , _UpperCAmelCase=7 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=99 , _UpperCAmelCase=32 , _UpperCAmelCase=2 , _UpperCAmelCase=4 , _UpperCAmelCase=37 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=512 , _UpperCAmelCase=16 , _UpperCAmelCase=2 , _UpperCAmelCase=0.02 , _UpperCAmelCase=3 , _UpperCAmelCase=4 , _UpperCAmelCase=None , ):
'''simple docstring'''
__A : Optional[int] = parent
__A : str = 13
__A : List[Any] = 7
__A : List[str] = True
__A : str = True
__A : Optional[Any] = True
__A : int = True
__A : Dict = 99
__A : Dict = 384
__A : Any = 2
__A : int = 4
__A : Optional[Any] = 37
__A : Optional[int] = 'gelu'
__A : Dict = 0.1
__A : Optional[int] = 0.1
__A : Any = 512
__A : int = 16
__A : List[str] = 2
__A : str = 0.02
__A : Any = 3
__A : str = 4
__A : Union[str, Any] = 128
__A : int = 2
__A : List[Any] = 9
__A : List[Any] = 1
__A : List[Any] = None
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
__A : str = None
if self.use_input_mask:
__A : List[Any] = random_attention_mask([self.batch_size, self.seq_length])
__A : Optional[Any] = None
if self.use_token_type_ids:
__A : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
__A : Optional[int] = None
__A : List[str] = None
__A : Dict = None
if self.use_labels:
__A : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size)
__A : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
__A : str = ids_tensor([self.batch_size] , self.num_choices)
__A : List[Any] = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=_UpperCAmelCase , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : int = TFConvBertModel(config=_UpperCAmelCase)
__A : Optional[Any] = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
__A : Tuple = [input_ids, input_mask]
__A : Any = model(_UpperCAmelCase)
__A : Dict = model(_UpperCAmelCase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : str = TFConvBertForMaskedLM(config=_UpperCAmelCase)
__A : str = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__A : str = model(_UpperCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : Optional[int] = self.num_labels
__A : Any = TFConvBertForSequenceClassification(config=_UpperCAmelCase)
__A : Optional[Any] = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__A : Dict = model(_UpperCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : Tuple = self.num_choices
__A : List[str] = TFConvBertForMultipleChoice(config=_UpperCAmelCase)
__A : int = tf.tile(tf.expand_dims(_UpperCAmelCase , 1) , (1, self.num_choices, 1))
__A : Optional[Any] = tf.tile(tf.expand_dims(_UpperCAmelCase , 1) , (1, self.num_choices, 1))
__A : List[Any] = tf.tile(tf.expand_dims(_UpperCAmelCase , 1) , (1, self.num_choices, 1))
__A : int = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
__A : Optional[Any] = model(_UpperCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : List[Any] = self.num_labels
__A : List[Any] = TFConvBertForTokenClassification(config=_UpperCAmelCase)
__A : str = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__A : int = model(_UpperCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : Optional[Any] = TFConvBertForQuestionAnswering(config=_UpperCAmelCase)
__A : Any = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__A : Union[str, Any] = model(_UpperCAmelCase)
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Optional[int] = self.prepare_config_and_inputs()
(
(
__A
) ,(
__A
) ,(
__A
) ,(
__A
) ,(
__A
) ,(
__A
) ,(
__A
) ,
) : Union[str, Any] = config_and_inputs
__A : List[str] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class SCREAMING_SNAKE_CASE (a__ , a__ , unittest.TestCase ):
lowerCAmelCase = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
lowerCAmelCase = (
{
'''feature-extraction''': TFConvBertModel,
'''fill-mask''': TFConvBertForMaskedLM,
'''question-answering''': TFConvBertForQuestionAnswering,
'''text-classification''': TFConvBertForSequenceClassification,
'''token-classification''': TFConvBertForTokenClassification,
'''zero-shot''': TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
lowerCAmelCase = False
lowerCAmelCase = False
lowerCAmelCase = False
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : int = TFConvBertModelTester(self)
__A : str = ConfigTester(self , config_class=_UpperCAmelCase , hidden_size=37)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_UpperCAmelCase)
@slow
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A ,__A : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
__A : List[str] = True
__A : List[str] = True
if hasattr(_UpperCAmelCase , 'use_cache'):
__A : List[Any] = True
__A : str = getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length)
__A : Union[str, Any] = getattr(self.model_tester , 'key_length' , _UpperCAmelCase)
for model_class in self.all_model_classes:
__A : List[str] = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase)
__A : Optional[int] = model_class(_UpperCAmelCase)
__A : Optional[Any] = len(model(_UpperCAmelCase))
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_UpperCAmelCase , saved_model=_UpperCAmelCase)
__A : Union[str, Any] = os.path.join(_UpperCAmelCase , 'saved_model' , '1')
__A : Tuple = tf.keras.models.load_model(_UpperCAmelCase)
__A : str = model(_UpperCAmelCase)
if self.is_encoder_decoder:
__A : Optional[int] = outputs['encoder_hidden_states']
__A : str = outputs['encoder_attentions']
else:
__A : List[Any] = outputs['hidden_states']
__A : Optional[Any] = outputs['attentions']
self.assertEqual(len(_UpperCAmelCase) , _UpperCAmelCase)
__A : str = getattr(
self.model_tester , 'expected_num_hidden_layers' , self.model_tester.num_hidden_layers + 1)
self.assertEqual(len(_UpperCAmelCase) , _UpperCAmelCase)
self.assertListEqual(
list(output_hidden_states[0].shape[-2:]) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(_UpperCAmelCase) , self.model_tester.num_hidden_layers)
self.assertListEqual(
list(output_attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Dict = TFConvBertModel.from_pretrained('YituTech/conv-bert-base')
self.assertIsNotNone(_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A ,__A : Dict = self.model_tester.prepare_config_and_inputs_for_common()
__A : Any = True
__A : str = getattr(self.model_tester , 'decoder_seq_length' , self.model_tester.seq_length)
__A : Any = getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length)
__A : int = getattr(self.model_tester , 'key_length' , _UpperCAmelCase)
__A : Tuple = getattr(self.model_tester , 'key_length' , _UpperCAmelCase)
def check_decoder_attentions_output(_UpperCAmelCase):
__A : List[str] = len(_UpperCAmelCase)
self.assertEqual(out_len % 2 , 0)
__A : Any = outputs.decoder_attentions
self.assertEqual(len(_UpperCAmelCase) , self.model_tester.num_hidden_layers)
self.assertListEqual(
list(decoder_attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(_UpperCAmelCase):
__A : str = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(_UpperCAmelCase) , self.model_tester.num_hidden_layers)
self.assertListEqual(
list(attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
__A : Dict = True
__A : Any = False
__A : str = model_class(_UpperCAmelCase)
__A : List[str] = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase))
__A : List[str] = len(_UpperCAmelCase)
self.assertEqual(config.output_hidden_states , _UpperCAmelCase)
check_encoder_attentions_output(_UpperCAmelCase)
if self.is_encoder_decoder:
__A : Union[str, Any] = model_class(_UpperCAmelCase)
__A : int = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase))
self.assertEqual(config.output_hidden_states , _UpperCAmelCase)
check_decoder_attentions_output(_UpperCAmelCase)
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
__A : int = True
__A : Tuple = model_class(_UpperCAmelCase)
__A : Dict = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase))
self.assertEqual(config.output_hidden_states , _UpperCAmelCase)
check_encoder_attentions_output(_UpperCAmelCase)
# Check attention is always last and order is fine
__A : Any = True
__A : str = True
__A : Union[str, Any] = model_class(_UpperCAmelCase)
__A : Union[str, Any] = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase))
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(_UpperCAmelCase))
self.assertEqual(model.config.output_hidden_states , _UpperCAmelCase)
check_encoder_attentions_output(_UpperCAmelCase)
@require_tf
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Tuple = TFConvBertModel.from_pretrained('YituTech/conv-bert-base')
__A : str = tf.constant([[0, 1, 2, 3, 4, 5]])
__A : Optional[int] = model(_UpperCAmelCase)[0]
__A : List[Any] = [1, 6, 768]
self.assertEqual(output.shape , _UpperCAmelCase)
__A : Tuple = tf.constant(
[
[
[-0.03475493, -0.4686034, -0.30638832],
[0.22637248, -0.26988646, -0.7423424],
[0.10324868, -0.45013508, -0.58280784],
]
])
tf.debugging.assert_near(output[:, :3, :3] , _UpperCAmelCase , atol=1e-4) | 8 | 0 |
import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
_snake_case : List[Any] = logging.get_logger(__name__)
_snake_case : List[str] = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt'}
# See all LED models at https://huggingface.co/models?filter=LED
_snake_case : int = {
'vocab_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json',
},
'merges_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt',
},
'tokenizer_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json',
},
}
_snake_case : Optional[int] = {
'allenai/led-base-16384': 1_63_84,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def _A ( ) -> Optional[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = (
list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) )
)
__SCREAMING_SNAKE_CASE = bs[:]
__SCREAMING_SNAKE_CASE = 0
for b in range(2**8 ):
if b not in bs:
bs.append(__snake_case )
cs.append(2**8 + n )
n += 1
__SCREAMING_SNAKE_CASE = [chr(__snake_case ) for n in cs]
return dict(zip(__snake_case , __snake_case ) )
def _A ( __snake_case :Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = set()
__SCREAMING_SNAKE_CASE = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__SCREAMING_SNAKE_CASE = char
return pairs
class __SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE__ =VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ =PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE__ =["""input_ids""", """attention_mask"""]
def __init__( self, _a, _a, _a="replace", _a="<s>", _a="</s>", _a="</s>", _a="<s>", _a="<unk>", _a="<pad>", _a="<mask>", _a=False, **_a, ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE = AddedToken(_a, lstrip=_a, rstrip=_a ) if isinstance(_a, _a ) else bos_token
__SCREAMING_SNAKE_CASE = AddedToken(_a, lstrip=_a, rstrip=_a ) if isinstance(_a, _a ) else eos_token
__SCREAMING_SNAKE_CASE = AddedToken(_a, lstrip=_a, rstrip=_a ) if isinstance(_a, _a ) else sep_token
__SCREAMING_SNAKE_CASE = AddedToken(_a, lstrip=_a, rstrip=_a ) if isinstance(_a, _a ) else cls_token
__SCREAMING_SNAKE_CASE = AddedToken(_a, lstrip=_a, rstrip=_a ) if isinstance(_a, _a ) else unk_token
__SCREAMING_SNAKE_CASE = AddedToken(_a, lstrip=_a, rstrip=_a ) if isinstance(_a, _a ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
__SCREAMING_SNAKE_CASE = AddedToken(_a, lstrip=_a, rstrip=_a ) if isinstance(_a, _a ) else mask_token
super().__init__(
errors=_a, bos_token=_a, eos_token=_a, unk_token=_a, sep_token=_a, cls_token=_a, pad_token=_a, mask_token=_a, add_prefix_space=_a, **_a, )
with open(_a, encoding="utf-8" ) as vocab_handle:
__SCREAMING_SNAKE_CASE = json.load(_a )
__SCREAMING_SNAKE_CASE = {v: k for k, v in self.encoder.items()}
__SCREAMING_SNAKE_CASE = errors # how to handle errors in decoding
__SCREAMING_SNAKE_CASE = bytes_to_unicode()
__SCREAMING_SNAKE_CASE = {v: k for k, v in self.byte_encoder.items()}
with open(_a, encoding="utf-8" ) as merges_handle:
__SCREAMING_SNAKE_CASE = merges_handle.read().split("\n" )[1:-1]
__SCREAMING_SNAKE_CASE = [tuple(merge.split() ) for merge in bpe_merges]
__SCREAMING_SNAKE_CASE = dict(zip(_a, range(len(_a ) ) ) )
__SCREAMING_SNAKE_CASE = {}
__SCREAMING_SNAKE_CASE = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
__SCREAMING_SNAKE_CASE = re.compile(r"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" )
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def __lowerCAmelCase ( self ) -> int:
return len(self.encoder )
def __lowerCAmelCase ( self ) -> str:
return dict(self.encoder, **self.added_tokens_encoder )
def __lowerCAmelCase ( self, _a ) -> Dict:
if token in self.cache:
return self.cache[token]
__SCREAMING_SNAKE_CASE = tuple(_a )
__SCREAMING_SNAKE_CASE = get_pairs(_a )
if not pairs:
return token
while True:
__SCREAMING_SNAKE_CASE = min(_a, key=lambda _a : self.bpe_ranks.get(_a, float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = bigram
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = 0
while i < len(_a ):
try:
__SCREAMING_SNAKE_CASE = word.index(_a, _a )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
__SCREAMING_SNAKE_CASE = j
if word[i] == first and i < len(_a ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__SCREAMING_SNAKE_CASE = tuple(_a )
__SCREAMING_SNAKE_CASE = new_word
if len(_a ) == 1:
break
else:
__SCREAMING_SNAKE_CASE = get_pairs(_a )
__SCREAMING_SNAKE_CASE = " ".join(_a )
__SCREAMING_SNAKE_CASE = word
return word
def __lowerCAmelCase ( self, _a ) -> Any:
__SCREAMING_SNAKE_CASE = []
for token in re.findall(self.pat, _a ):
__SCREAMING_SNAKE_CASE = "".join(
self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(_a ).split(" " ) )
return bpe_tokens
def __lowerCAmelCase ( self, _a ) -> Dict:
return self.encoder.get(_a, self.encoder.get(self.unk_token ) )
def __lowerCAmelCase ( self, _a ) -> Union[str, Any]:
return self.decoder.get(_a )
def __lowerCAmelCase ( self, _a ) -> Any:
__SCREAMING_SNAKE_CASE = "".join(_a )
__SCREAMING_SNAKE_CASE = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8", errors=self.errors )
return text
def __lowerCAmelCase ( self, _a, _a = None ) -> Tuple[str]:
if not os.path.isdir(_a ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
__SCREAMING_SNAKE_CASE = os.path.join(
_a, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
__SCREAMING_SNAKE_CASE = os.path.join(
_a, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(_a, "w", encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder, indent=2, sort_keys=_a, ensure_ascii=_a ) + "\n" )
__SCREAMING_SNAKE_CASE = 0
with open(_a, "w", encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda _a : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
" Please check that the tokenizer is not corrupted!" )
__SCREAMING_SNAKE_CASE = token_index
writer.write(" ".join(_a ) + "\n" )
index += 1
return vocab_file, merge_file
def __lowerCAmelCase ( self, _a, _a = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__SCREAMING_SNAKE_CASE = [self.cls_token_id]
__SCREAMING_SNAKE_CASE = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __lowerCAmelCase ( self, _a, _a = None, _a = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_a, token_ids_a=_a, already_has_special_tokens=_a )
if token_ids_a is None:
return [1] + ([0] * len(_a )) + [1]
return [1] + ([0] * len(_a )) + [1, 1] + ([0] * len(_a )) + [1]
def __lowerCAmelCase ( self, _a, _a = None ) -> List[int]:
__SCREAMING_SNAKE_CASE = [self.sep_token_id]
__SCREAMING_SNAKE_CASE = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __lowerCAmelCase ( self, _a, _a=False, **_a ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE = kwargs.pop("add_prefix_space", self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(_a ) > 0 and not text[0].isspace()):
__SCREAMING_SNAKE_CASE = " " + text
return (text, kwargs)
def __lowerCAmelCase ( self, _a, _a = None, _a = PaddingStrategy.DO_NOT_PAD, _a = None, _a = None, ) -> dict:
__SCREAMING_SNAKE_CASE = super()._pad(
encoded_inputs=_a, max_length=_a, padding_strategy=_a, pad_to_multiple_of=_a, return_attention_mask=_a, )
# Load from model defaults
if return_attention_mask is None:
__SCREAMING_SNAKE_CASE = "attention_mask" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
__SCREAMING_SNAKE_CASE = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
__SCREAMING_SNAKE_CASE = len(encoded_inputs["global_attention_mask"] ) != len(_a )
if needs_to_be_padded:
__SCREAMING_SNAKE_CASE = len(_a ) - len(encoded_inputs["global_attention_mask"] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
__SCREAMING_SNAKE_CASE = (
encoded_inputs["global_attention_mask"] + [-1] * difference
)
elif self.padding_side == "left":
__SCREAMING_SNAKE_CASE = [-1] * difference + encoded_inputs[
"global_attention_mask"
]
else:
raise ValueError("Invalid padding strategy:" + str(self.padding_side ) )
return encoded_inputs
| 715 |
import argparse
import collections
import numpy as np
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import MTaConfig, UMTaEncoderModel, UMTaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def _A ( __snake_case :Union[str, Any] , __snake_case :Tuple , __snake_case :List[str] ) -> str:
"""simple docstring"""
return params[f'''{prefix}/{prefix}/relpos_bias/rel_embedding'''][:, i, :]
def _A ( __snake_case :str , __snake_case :List[str] , __snake_case :Any , __snake_case :Optional[int]="attention" ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = __SCREAMING_SNAKE_CASE = np.ascontiguousarray(params[f'''{prefix}/{prefix}/{layer_name}/key/kernel'''][:, i, :, :] )
__SCREAMING_SNAKE_CASE = k_tmp.reshape(k_tmp.shape[0] , k_tmp.shape[1] * k_tmp.shape[2] )
__SCREAMING_SNAKE_CASE = np.ascontiguousarray(params[f'''{prefix}/{prefix}/{layer_name}/out/kernel'''][:, i, :, :] )
__SCREAMING_SNAKE_CASE = o_tmp.reshape(o_tmp.shape[0] * o_tmp.shape[1] , o_tmp.shape[2] )
__SCREAMING_SNAKE_CASE = np.ascontiguousarray(params[f'''{prefix}/{prefix}/{layer_name}/query/kernel'''][:, i, :, :] )
__SCREAMING_SNAKE_CASE = q_tmp.reshape(q_tmp.shape[0] , q_tmp.shape[1] * q_tmp.shape[2] )
__SCREAMING_SNAKE_CASE = np.ascontiguousarray(params[f'''{prefix}/{prefix}/{layer_name}/value/kernel'''][:, i, :, :] )
__SCREAMING_SNAKE_CASE = v_tmp.reshape(v_tmp.shape[0] , v_tmp.shape[1] * v_tmp.shape[2] )
return k, o, q, v
def _A ( __snake_case :Optional[int] , __snake_case :int , __snake_case :str , __snake_case :Dict=False ) -> List[Any]:
"""simple docstring"""
if split_mlp_wi:
__SCREAMING_SNAKE_CASE = params[f'''{prefix}/{prefix}/mlp/wi_0/kernel'''][:, i, :]
__SCREAMING_SNAKE_CASE = params[f'''{prefix}/{prefix}/mlp/wi_1/kernel'''][:, i, :]
__SCREAMING_SNAKE_CASE = (wi_a, wi_a)
else:
__SCREAMING_SNAKE_CASE = params[f'''{prefix}/{prefix}/mlp/wi/kernel'''][:, i, :]
__SCREAMING_SNAKE_CASE = params[f'''{prefix}/{prefix}/mlp/wo/kernel'''][:, i, :]
return wi, wo
def _A ( __snake_case :Optional[Any] , __snake_case :str , __snake_case :Optional[Any] , __snake_case :List[str] ) -> List[str]:
"""simple docstring"""
return params[f'''{prefix}/{prefix}/{layer_name}/scale'''][:, i]
def _A ( __snake_case :dict , *, __snake_case :int , __snake_case :bool , __snake_case :bool = False ) -> List[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = traverse_util.flatten_dict(variables["target"] )
__SCREAMING_SNAKE_CASE = {"/".join(__snake_case ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
__SCREAMING_SNAKE_CASE = "encoder/encoder/mlp/wi_0/kernel" in old
print("Split MLP:" , __snake_case )
__SCREAMING_SNAKE_CASE = collections.OrderedDict()
# Shared embeddings.
__SCREAMING_SNAKE_CASE = old["token_embedder/embedding"]
# Encoder.
for i in range(__snake_case ):
# Block i, layer 0 (Self Attention).
__SCREAMING_SNAKE_CASE = tax_layer_norm_lookup(__snake_case , __snake_case , "encoder" , "pre_attention_layer_norm" )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = tax_attention_lookup(__snake_case , __snake_case , "encoder" , "attention" )
__SCREAMING_SNAKE_CASE = layer_norm
__SCREAMING_SNAKE_CASE = k.T
__SCREAMING_SNAKE_CASE = o.T
__SCREAMING_SNAKE_CASE = q.T
__SCREAMING_SNAKE_CASE = v.T
# Block i, layer 1 (MLP).
__SCREAMING_SNAKE_CASE = tax_layer_norm_lookup(__snake_case , __snake_case , "encoder" , "pre_mlp_layer_norm" )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = tax_mlp_lookup(__snake_case , __snake_case , "encoder" , __snake_case )
__SCREAMING_SNAKE_CASE = layer_norm
if split_mlp_wi:
__SCREAMING_SNAKE_CASE = wi[0].T
__SCREAMING_SNAKE_CASE = wi[1].T
else:
__SCREAMING_SNAKE_CASE = wi.T
__SCREAMING_SNAKE_CASE = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
__SCREAMING_SNAKE_CASE = tax_relpos_bias_lookup(
__snake_case , __snake_case , "encoder" ).T
__SCREAMING_SNAKE_CASE = old["encoder/encoder_norm/scale"]
if not scalable_attention:
__SCREAMING_SNAKE_CASE = tax_relpos_bias_lookup(
__snake_case , 0 , "encoder" ).T
__SCREAMING_SNAKE_CASE = tax_relpos_bias_lookup(
__snake_case , 0 , "decoder" ).T
if not is_encoder_only:
# Decoder.
for i in range(__snake_case ):
# Block i, layer 0 (Self Attention).
__SCREAMING_SNAKE_CASE = tax_layer_norm_lookup(__snake_case , __snake_case , "decoder" , "pre_self_attention_layer_norm" )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = tax_attention_lookup(__snake_case , __snake_case , "decoder" , "self_attention" )
__SCREAMING_SNAKE_CASE = layer_norm
__SCREAMING_SNAKE_CASE = k.T
__SCREAMING_SNAKE_CASE = o.T
__SCREAMING_SNAKE_CASE = q.T
__SCREAMING_SNAKE_CASE = v.T
# Block i, layer 1 (Cross Attention).
__SCREAMING_SNAKE_CASE = tax_layer_norm_lookup(__snake_case , __snake_case , "decoder" , "pre_cross_attention_layer_norm" )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = tax_attention_lookup(__snake_case , __snake_case , "decoder" , "encoder_decoder_attention" )
__SCREAMING_SNAKE_CASE = layer_norm
__SCREAMING_SNAKE_CASE = k.T
__SCREAMING_SNAKE_CASE = o.T
__SCREAMING_SNAKE_CASE = q.T
__SCREAMING_SNAKE_CASE = v.T
# Block i, layer 2 (MLP).
__SCREAMING_SNAKE_CASE = tax_layer_norm_lookup(__snake_case , __snake_case , "decoder" , "pre_mlp_layer_norm" )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = tax_mlp_lookup(__snake_case , __snake_case , "decoder" , __snake_case )
__SCREAMING_SNAKE_CASE = layer_norm
if split_mlp_wi:
__SCREAMING_SNAKE_CASE = wi[0].T
__SCREAMING_SNAKE_CASE = wi[1].T
else:
__SCREAMING_SNAKE_CASE = wi.T
__SCREAMING_SNAKE_CASE = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
__SCREAMING_SNAKE_CASE = tax_relpos_bias_lookup(__snake_case , __snake_case , "decoder" ).T
__SCREAMING_SNAKE_CASE = old["decoder/decoder_norm/scale"]
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
__SCREAMING_SNAKE_CASE = old["decoder/logits_dense/kernel"].T
return new
def _A ( __snake_case :List[str] , __snake_case :bool ) -> List[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
__SCREAMING_SNAKE_CASE = state_dict["shared.weight"]
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
__SCREAMING_SNAKE_CASE = state_dict["shared.weight"]
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print("Using shared word embeddings as lm_head." )
__SCREAMING_SNAKE_CASE = state_dict["shared.weight"]
return state_dict
def _A ( __snake_case :int , __snake_case :List[Any] , __snake_case :str , __snake_case :Tuple , __snake_case :str ) -> Dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = checkpoints.load_tax_checkpoint(__snake_case )
__SCREAMING_SNAKE_CASE = convert_tax_to_pytorch(
__snake_case , num_layers=config.num_layers , is_encoder_only=__snake_case , scalable_attention=__snake_case )
__SCREAMING_SNAKE_CASE = make_state_dict(__snake_case , __snake_case )
model.load_state_dict(__snake_case , strict=__snake_case )
def _A ( __snake_case :Optional[int] , __snake_case :int , __snake_case :List[Any] , __snake_case :bool = False , __snake_case :bool = False , ) -> List[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = MTaConfig.from_json_file(__snake_case )
print(f'''Building PyTorch model from configuration: {config}''' )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
__SCREAMING_SNAKE_CASE = UMTaEncoderModel(__snake_case )
else:
__SCREAMING_SNAKE_CASE = UMTaForConditionalGeneration(__snake_case )
# Load weights from tf checkpoint
load_tax_weights_in_ta(__snake_case , __snake_case , __snake_case , __snake_case , __snake_case )
# Save pytorch-model
print(f'''Save PyTorch model to {pytorch_dump_path}''' )
model.save_pretrained(__snake_case )
# Verify that we can load the checkpoint.
model.from_pretrained(__snake_case )
print("Done" )
if __name__ == "__main__":
_snake_case : Any = argparse.ArgumentParser(description='Converts a native T5X checkpoint into a PyTorch checkpoint.')
# Required parameters
parser.add_argument(
'--t5x_checkpoint_path', default=None, type=str, required=True, help='Path to the T5X checkpoint.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.',
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--is_encoder_only', action='store_true', help='Check if the model is encoder-decoder model', default=False
)
parser.add_argument(
'--scalable_attention',
action='store_true',
help='Whether the model uses scaled attention (umt5 model)',
default=False,
)
_snake_case : Any = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path,
args.config_file,
args.pytorch_dump_path,
args.is_encoder_only,
args.scalable_attention,
)
| 214 | 0 |
'''simple docstring'''
import os
from tempfile import TemporaryDirectory
from unittest import TestCase
import pytest
from absl.testing import parameterized
from datasets import config
from datasets.arrow_reader import HF_GCP_BASE_URL
from datasets.builder import DatasetBuilder
from datasets.dataset_dict import IterableDatasetDict
from datasets.iterable_dataset import IterableDataset
from datasets.load import dataset_module_factory, import_main_class
from datasets.utils.file_utils import cached_path
SCREAMING_SNAKE_CASE = [
{"""dataset""": """wikipedia""", """config_name""": """20220301.de"""},
{"""dataset""": """wikipedia""", """config_name""": """20220301.en"""},
{"""dataset""": """wikipedia""", """config_name""": """20220301.fr"""},
{"""dataset""": """wikipedia""", """config_name""": """20220301.frr"""},
{"""dataset""": """wikipedia""", """config_name""": """20220301.it"""},
{"""dataset""": """wikipedia""", """config_name""": """20220301.simple"""},
{"""dataset""": """snli""", """config_name""": """plain_text"""},
{"""dataset""": """eli5""", """config_name""": """LFQA_reddit"""},
{"""dataset""": """wiki40b""", """config_name""": """en"""},
{"""dataset""": """wiki_dpr""", """config_name""": """psgs_w100.nq.compressed"""},
{"""dataset""": """wiki_dpr""", """config_name""": """psgs_w100.nq.no_index"""},
{"""dataset""": """wiki_dpr""", """config_name""": """psgs_w100.multiset.no_index"""},
{"""dataset""": """natural_questions""", """config_name""": """default"""},
]
def snake_case_ ( lowercase__=True ):
if with_config:
return [
{
"testcase_name": d["dataset"] + "/" + d["config_name"],
"dataset": d["dataset"],
"config_name": d["config_name"],
}
for d in DATASETS_ON_HF_GCP
]
else:
return [
{"testcase_name": dataset, "dataset": dataset} for dataset in {d["dataset"] for d in DATASETS_ON_HF_GCP}
]
@parameterized.named_parameters(list_datasets_on_hf_gcp_parameters(with_config=SCREAMING_SNAKE_CASE_ ) )
class UpperCAmelCase_ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
lowercase_ : Union[str, Any] = None
lowercase_ : Tuple = None
def UpperCamelCase ( self : Optional[Any] , snake_case__ : Tuple , snake_case__ : List[Any] ):
'''simple docstring'''
with TemporaryDirectory() as tmp_dir:
UpperCAmelCase__ : Any = dataset_module_factory(snake_case__ , cache_dir=snake_case__ )
UpperCAmelCase__ : Union[str, Any] = import_main_class(dataset_module.module_path , dataset=snake_case__ )
UpperCAmelCase__ : str = builder_cls(
cache_dir=snake_case__ , config_name=snake_case__ , hash=dataset_module.hash , )
UpperCAmelCase__ : Optional[int] = "/".join(
[
HF_GCP_BASE_URL,
builder_instance._relative_data_dir(with_hash=snake_case__ ).replace(os.sep , "/" ),
config.DATASET_INFO_FILENAME,
] )
UpperCAmelCase__ : List[str] = cached_path(snake_case__ , cache_dir=snake_case__ )
self.assertTrue(os.path.exists(snake_case__ ) )
@pytest.mark.integration
def snake_case_ ( lowercase__ ):
UpperCAmelCase__ : Union[str, Any] = tmp_path_factory.mktemp("test_hf_gcp" ) / "test_wikipedia_simple"
UpperCAmelCase__ : Tuple = dataset_module_factory("wikipedia" , cache_dir=lowercase__ )
UpperCAmelCase__ : Tuple = import_main_class(dataset_module.module_path )
UpperCAmelCase__ : Any = builder_cls(
cache_dir=lowercase__ , config_name="20220301.frr" , hash=dataset_module.hash , )
# use the HF cloud storage, not the original download_and_prepare that uses apache-beam
UpperCAmelCase__ : Optional[Any] = None
builder_instance.download_and_prepare()
UpperCAmelCase__ : str = builder_instance.as_dataset()
assert ds
@pytest.mark.integration
def snake_case_ ( lowercase__ ):
UpperCAmelCase__ : Dict = dataset_module_factory("wikipedia" , cache_dir=lowercase__ )
UpperCAmelCase__ : Tuple = import_main_class(dataset_module.module_path , dataset=lowercase__ )
UpperCAmelCase__ : Union[str, Any] = builder_cls(
cache_dir=lowercase__ , config_name="20220301.frr" , hash=dataset_module.hash , )
UpperCAmelCase__ : int = builder_instance.as_streaming_dataset()
assert ds
assert isinstance(lowercase__ , lowercase__ )
assert "train" in ds
assert isinstance(ds["train"] , lowercase__ )
assert next(iter(ds["train"] ) )
| 199 | import functools
from typing import Any
def lowerCamelCase ( UpperCamelCase : str , UpperCamelCase : list[str] ) -> bool:
# Validation
if not isinstance(UpperCamelCase , UpperCamelCase ) or len(UpperCamelCase ) == 0:
raise ValueError('the string should be not empty string' )
if not isinstance(UpperCamelCase , UpperCamelCase ) or not all(
isinstance(UpperCamelCase , UpperCamelCase ) and len(UpperCamelCase ) > 0 for item in words ):
raise ValueError('the words should be a list of non-empty strings' )
# Build trie
_lowerCamelCase = {}
_lowerCamelCase = 'WORD_KEEPER'
for word in words:
_lowerCamelCase = trie
for c in word:
if c not in trie_node:
_lowerCamelCase = {}
_lowerCamelCase = trie_node[c]
_lowerCamelCase = True
_lowerCamelCase = len(UpperCamelCase )
# Dynamic programming method
@functools.cache
def is_breakable(UpperCamelCase : int ) -> bool:
if index == len_string:
return True
_lowerCamelCase = trie
for i in range(UpperCamelCase , UpperCamelCase ):
_lowerCamelCase = trie_node.get(string[i] , UpperCamelCase )
if trie_node is None:
return False
if trie_node.get(UpperCamelCase , UpperCamelCase ) and is_breakable(i + 1 ):
return True
return False
return is_breakable(0 )
if __name__ == "__main__":
import doctest
doctest.testmod() | 544 | 0 |
import logging
import os
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
from filelock import FileLock
from transformers import PreTrainedTokenizer, is_tf_available, is_torch_available
_lowerCamelCase : Optional[Any] = logging.getLogger(__name__)
@dataclass
class lowerCamelCase :
"""simple docstring"""
UpperCAmelCase_ = 42
UpperCAmelCase_ = 42
UpperCAmelCase_ = 42
@dataclass
class lowerCamelCase :
"""simple docstring"""
UpperCAmelCase_ = 42
UpperCAmelCase_ = 42
UpperCAmelCase_ = None
UpperCAmelCase_ = None
class lowerCamelCase (_lowercase ):
"""simple docstring"""
UpperCAmelCase_ = '''train'''
UpperCAmelCase_ = '''dev'''
UpperCAmelCase_ = '''test'''
class lowerCamelCase :
"""simple docstring"""
@staticmethod
def A_ ( _UpperCAmelCase : List[str], _UpperCAmelCase : Union[Split, str] ) -> List[InputExample]:
"""simple docstring"""
raise NotImplementedError
@staticmethod
def A_ ( _UpperCAmelCase : str ) -> List[str]:
"""simple docstring"""
raise NotImplementedError
@staticmethod
def A_ ( _UpperCAmelCase : List[InputExample], _UpperCAmelCase : List[str], _UpperCAmelCase : int, _UpperCAmelCase : PreTrainedTokenizer, _UpperCAmelCase : int=False, _UpperCAmelCase : Optional[int]="[CLS]", _UpperCAmelCase : Union[str, Any]=1, _UpperCAmelCase : Union[str, Any]="[SEP]", _UpperCAmelCase : int=False, _UpperCAmelCase : List[Any]=False, _UpperCAmelCase : Any=0, _UpperCAmelCase : List[str]=0, _UpperCAmelCase : int=-1_0_0, _UpperCAmelCase : List[str]=0, _UpperCAmelCase : Tuple=True, ) -> List[InputFeatures]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = {label: i for i, label in enumerate(A_ )}
SCREAMING_SNAKE_CASE__ : Tuple = []
for ex_index, example in enumerate(A_ ):
if ex_index % 1_0_0_0_0 == 0:
logger.info("Writing example %d of %d", A_, len(A_ ) )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = []
SCREAMING_SNAKE_CASE__ : Optional[int] = []
for word, label in zip(example.words, example.labels ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = tokenizer.tokenize(A_ )
# bert-base-multilingual-cased sometimes output "nothing ([]) when calling tokenize with just a space.
if len(A_ ) > 0:
tokens.extend(A_ )
# Use the real label id for the first token of the word, and padding ids for the remaining tokens
label_ids.extend([label_map[label]] + [pad_token_label_id] * (len(A_ ) - 1) )
# Account for [CLS] and [SEP] with "- 2" and with "- 3" for RoBERTa.
SCREAMING_SNAKE_CASE__ : List[str] = tokenizer.num_special_tokens_to_add()
if len(A_ ) > max_seq_length - special_tokens_count:
SCREAMING_SNAKE_CASE__ : Tuple = tokens[: (max_seq_length - special_tokens_count)]
SCREAMING_SNAKE_CASE__ : Tuple = label_ids[: (max_seq_length - special_tokens_count)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens += [sep_token]
label_ids += [pad_token_label_id]
if sep_token_extra:
# roberta uses an extra separator b/w pairs of sentences
tokens += [sep_token]
label_ids += [pad_token_label_id]
SCREAMING_SNAKE_CASE__ : Tuple = [sequence_a_segment_id] * len(A_ )
if cls_token_at_end:
tokens += [cls_token]
label_ids += [pad_token_label_id]
segment_ids += [cls_token_segment_id]
else:
SCREAMING_SNAKE_CASE__ : Any = [cls_token] + tokens
SCREAMING_SNAKE_CASE__ : int = [pad_token_label_id] + label_ids
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [cls_token_segment_id] + segment_ids
SCREAMING_SNAKE_CASE__ : Tuple = tokenizer.convert_tokens_to_ids(A_ )
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
SCREAMING_SNAKE_CASE__ : Tuple = [1 if mask_padding_with_zero else 0] * len(A_ )
# Zero-pad up to the sequence length.
SCREAMING_SNAKE_CASE__ : Dict = max_seq_length - len(A_ )
if pad_on_left:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ([pad_token] * padding_length) + input_ids
SCREAMING_SNAKE_CASE__ : int = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ([pad_token_segment_id] * padding_length) + segment_ids
SCREAMING_SNAKE_CASE__ : List[Any] = ([pad_token_label_id] * padding_length) + label_ids
else:
input_ids += [pad_token] * padding_length
input_mask += [0 if mask_padding_with_zero else 1] * padding_length
segment_ids += [pad_token_segment_id] * padding_length
label_ids += [pad_token_label_id] * padding_length
assert len(A_ ) == max_seq_length
assert len(A_ ) == max_seq_length
assert len(A_ ) == max_seq_length
assert len(A_ ) == max_seq_length
if ex_index < 5:
logger.info("*** Example ***" )
logger.info("guid: %s", example.guid )
logger.info("tokens: %s", " ".join([str(A_ ) for x in tokens] ) )
logger.info("input_ids: %s", " ".join([str(A_ ) for x in input_ids] ) )
logger.info("input_mask: %s", " ".join([str(A_ ) for x in input_mask] ) )
logger.info("segment_ids: %s", " ".join([str(A_ ) for x in segment_ids] ) )
logger.info("label_ids: %s", " ".join([str(A_ ) for x in label_ids] ) )
if "token_type_ids" not in tokenizer.model_input_names:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = None
features.append(
InputFeatures(
input_ids=A_, attention_mask=A_, token_type_ids=A_, label_ids=A_ ) )
return features
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
class lowerCamelCase (_lowercase ):
"""simple docstring"""
UpperCAmelCase_ = 42
UpperCAmelCase_ = nn.CrossEntropyLoss().ignore_index
def __init__( self : Tuple, _UpperCAmelCase : TokenClassificationTask, _UpperCAmelCase : str, _UpperCAmelCase : PreTrainedTokenizer, _UpperCAmelCase : List[str], _UpperCAmelCase : str, _UpperCAmelCase : Optional[int] = None, _UpperCAmelCase : Optional[int]=False, _UpperCAmelCase : Split = Split.train, ) -> Dict:
"""simple docstring"""
# Load data features from cache or dataset file
SCREAMING_SNAKE_CASE__ : Tuple = os.path.join(
A_, "cached_{}_{}_{}".format(mode.value, tokenizer.__class__.__name__, str(A_ ) ), )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
SCREAMING_SNAKE_CASE__ : Union[str, Any] = cached_features_file + ".lock"
with FileLock(A_ ):
if os.path.exists(A_ ) and not overwrite_cache:
logger.info(F'''Loading features from cached file {cached_features_file}''' )
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.load(A_ )
else:
logger.info(F'''Creating features from dataset file at {data_dir}''' )
SCREAMING_SNAKE_CASE__ : Tuple = token_classification_task.read_examples_from_file(A_, A_ )
# TODO clean up all this to leverage built-in features of tokenizers
SCREAMING_SNAKE_CASE__ : Optional[int] = token_classification_task.convert_examples_to_features(
A_, A_, A_, A_, cls_token_at_end=bool(model_type in ["xlnet"] ), cls_token=tokenizer.cls_token, cls_token_segment_id=2 if model_type in ["xlnet"] else 0, sep_token=tokenizer.sep_token, sep_token_extra=A_, pad_on_left=bool(tokenizer.padding_side == "left" ), pad_token=tokenizer.pad_token_id, pad_token_segment_id=tokenizer.pad_token_type_id, pad_token_label_id=self.pad_token_label_id, )
logger.info(F'''Saving features into cached file {cached_features_file}''' )
torch.save(self.features, A_ )
def __len__( self : Optional[int] ) -> List[str]:
"""simple docstring"""
return len(self.features )
def __getitem__( self : List[Any], _UpperCAmelCase : Optional[int] ) -> InputFeatures:
"""simple docstring"""
return self.features[i]
if is_tf_available():
import tensorflow as tf
class lowerCamelCase :
"""simple docstring"""
UpperCAmelCase_ = 42
UpperCAmelCase_ = -100
def __init__( self : Any, _UpperCAmelCase : TokenClassificationTask, _UpperCAmelCase : str, _UpperCAmelCase : PreTrainedTokenizer, _UpperCAmelCase : List[str], _UpperCAmelCase : str, _UpperCAmelCase : Optional[int] = None, _UpperCAmelCase : str=False, _UpperCAmelCase : Split = Split.train, ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = token_classification_task.read_examples_from_file(A_, A_ )
# TODO clean up all this to leverage built-in features of tokenizers
SCREAMING_SNAKE_CASE__ : Tuple = token_classification_task.convert_examples_to_features(
A_, A_, A_, A_, cls_token_at_end=bool(model_type in ["xlnet"] ), cls_token=tokenizer.cls_token, cls_token_segment_id=2 if model_type in ["xlnet"] else 0, sep_token=tokenizer.sep_token, sep_token_extra=A_, pad_on_left=bool(tokenizer.padding_side == "left" ), pad_token=tokenizer.pad_token_id, pad_token_segment_id=tokenizer.pad_token_type_id, pad_token_label_id=self.pad_token_label_id, )
def gen():
for ex in self.features:
if ex.token_type_ids is None:
yield (
{"input_ids": ex.input_ids, "attention_mask": ex.attention_mask},
ex.label_ids,
)
else:
yield (
{
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label_ids,
)
if "token_type_ids" not in tokenizer.model_input_names:
SCREAMING_SNAKE_CASE__ : str = tf.data.Dataset.from_generator(
A_, ({"input_ids": tf.intaa, "attention_mask": tf.intaa}, tf.intaa), (
{"input_ids": tf.TensorShape([None] ), "attention_mask": tf.TensorShape([None] )},
tf.TensorShape([None] ),
), )
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] = tf.data.Dataset.from_generator(
A_, ({"input_ids": tf.intaa, "attention_mask": tf.intaa, "token_type_ids": tf.intaa}, tf.intaa), (
{
"input_ids": tf.TensorShape([None] ),
"attention_mask": tf.TensorShape([None] ),
"token_type_ids": tf.TensorShape([None] ),
},
tf.TensorShape([None] ),
), )
def A_ ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = self.dataset.apply(tf.data.experimental.assert_cardinality(len(self.features ) ) )
return self.dataset
def __len__( self : Optional[Any] ) -> str:
"""simple docstring"""
return len(self.features )
def __getitem__( self : Union[str, Any], _UpperCAmelCase : Optional[Any] ) -> InputFeatures:
"""simple docstring"""
return self.features[i]
| 700 |
class lowerCamelCase :
"""simple docstring"""
def __init__( self : str, _UpperCAmelCase : list ) -> None:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = set_counts
SCREAMING_SNAKE_CASE__ : Dict = max(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : List[Any] = len(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : str = [1] * num_sets
SCREAMING_SNAKE_CASE__ : Optional[int] = list(range(_UpperCAmelCase ) )
def A_ ( self : Dict, _UpperCAmelCase : int, _UpperCAmelCase : int ) -> bool:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = self.get_parent(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.get_parent(_UpperCAmelCase )
if src_parent == dst_parent:
return False
if self.ranks[dst_parent] >= self.ranks[src_parent]:
self.set_counts[dst_parent] += self.set_counts[src_parent]
SCREAMING_SNAKE_CASE__ : Dict = 0
SCREAMING_SNAKE_CASE__ : Dict = dst_parent
if self.ranks[dst_parent] == self.ranks[src_parent]:
self.ranks[dst_parent] += 1
SCREAMING_SNAKE_CASE__ : Optional[int] = self.set_counts[dst_parent]
else:
self.set_counts[src_parent] += self.set_counts[dst_parent]
SCREAMING_SNAKE_CASE__ : Dict = 0
SCREAMING_SNAKE_CASE__ : str = src_parent
SCREAMING_SNAKE_CASE__ : int = self.set_counts[src_parent]
SCREAMING_SNAKE_CASE__ : Optional[int] = max(self.max_set, _UpperCAmelCase )
return True
def A_ ( self : int, _UpperCAmelCase : int ) -> int:
"""simple docstring"""
if self.parents[disj_set] == disj_set:
return disj_set
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.get_parent(self.parents[disj_set] )
return self.parents[disj_set]
| 157 | 0 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_batched,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
UpperCAmelCase =logging.get_logger(__name__)
class lowerCamelCase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
_lowerCamelCase = ['''pixel_values''']
def __init__( self ,lowerCamelCase_ = True ,lowerCamelCase_ = None ,lowerCamelCase_ = PILImageResampling.BICUBIC ,lowerCamelCase_ = True ,lowerCamelCase_ = True ,lowerCamelCase_ = 1 / 2_5_5 ,lowerCamelCase_ = None ,lowerCamelCase_ = True ,lowerCamelCase_ = None ,lowerCamelCase_ = None ,**lowerCamelCase_ ,) -> None:
super().__init__(**lowerCamelCase_ )
A = size if size is not None else {"""height""": 2_2_4, """width""": 2_2_4}
A = get_size_dict(lowerCamelCase_ )
A = crop_size if crop_size is not None else {"""height""": 2_2_4, """width""": 2_2_4}
A = get_size_dict(lowerCamelCase_ ,default_to_square=lowerCamelCase_ ,param_name="""crop_size""" )
A = do_resize
A = do_rescale
A = do_normalize
A = do_center_crop
A = crop_size
A = size
A = resample
A = rescale_factor
A = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
A = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def UpperCamelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = PILImageResampling.BILINEAR ,lowerCamelCase_ = None ,**lowerCamelCase_ ,) -> np.ndarray:
A = get_size_dict(lowerCamelCase_ )
if "shortest_edge" in size:
A = get_resize_output_image_size(lowerCamelCase_ ,size=size["""shortest_edge"""] ,default_to_square=lowerCamelCase_ )
# size = get_resize_output_image_size(image, size["shortest_edge"], size["longest_edge"])
elif "height" in size and "width" in size:
A = (size["""height"""], size["""width"""])
else:
raise ValueError(f'Size must contain \'height\' and \'width\' keys or \'shortest_edge\' key. Got {size.keys()}' )
return resize(lowerCamelCase_ ,size=lowerCamelCase_ ,resample=lowerCamelCase_ ,data_format=lowerCamelCase_ ,**lowerCamelCase_ )
def UpperCamelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = None ,**lowerCamelCase_ ,) -> np.ndarray:
A = get_size_dict(lowerCamelCase_ )
if "height" not in size or "width" not in size:
raise ValueError(f'The `size` parameter must contain the keys (height, width). Got {size.keys()}' )
return center_crop(lowerCamelCase_ ,size=(size["""height"""], size["""width"""]) ,data_format=lowerCamelCase_ ,**lowerCamelCase_ )
def UpperCamelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = None ,**lowerCamelCase_ ) -> np.ndarray:
return rescale(lowerCamelCase_ ,scale=lowerCamelCase_ ,data_format=lowerCamelCase_ ,**lowerCamelCase_ )
def UpperCamelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = None ,**lowerCamelCase_ ,) -> np.ndarray:
return normalize(lowerCamelCase_ ,mean=lowerCamelCase_ ,std=lowerCamelCase_ ,data_format=lowerCamelCase_ ,**lowerCamelCase_ )
def UpperCamelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ = None ,lowerCamelCase_ = None ,lowerCamelCase_ = None ,lowerCamelCase_ = None ,lowerCamelCase_ = None ,lowerCamelCase_ = None ,lowerCamelCase_ = None ,lowerCamelCase_ = None ,lowerCamelCase_ = None ,lowerCamelCase_ = None ,lowerCamelCase_ = None ,lowerCamelCase_ = ChannelDimension.FIRST ,**lowerCamelCase_ ,) -> BatchFeature:
A = do_resize if do_resize is not None else self.do_resize
A = do_rescale if do_rescale is not None else self.do_rescale
A = do_normalize if do_normalize is not None else self.do_normalize
A = do_center_crop if do_center_crop is not None else self.do_center_crop
A = crop_size if crop_size is not None else self.crop_size
A = get_size_dict(lowerCamelCase_ ,param_name="""crop_size""" ,default_to_square=lowerCamelCase_ )
A = resample if resample is not None else self.resample
A = rescale_factor if rescale_factor is not None else self.rescale_factor
A = image_mean if image_mean is not None else self.image_mean
A = image_std if image_std is not None else self.image_std
A = size if size is not None else self.size
A = get_size_dict(lowerCamelCase_ )
if not is_batched(lowerCamelCase_ ):
A = [images]
if not valid_images(lowerCamelCase_ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
# All transformations expect numpy arrays.
A = [to_numpy_array(lowerCamelCase_ ) for image in images]
if do_resize:
A = [self.resize(image=lowerCamelCase_ ,size=lowerCamelCase_ ,resample=lowerCamelCase_ ) for image in images]
if do_center_crop:
A = [self.center_crop(image=lowerCamelCase_ ,size=lowerCamelCase_ ) for image in images]
if do_rescale:
A = [self.rescale(image=lowerCamelCase_ ,scale=lowerCamelCase_ ) for image in images]
if do_normalize:
A = [self.normalize(image=lowerCamelCase_ ,mean=lowerCamelCase_ ,std=lowerCamelCase_ ) for image in images]
A = [to_channel_dimension_format(lowerCamelCase_ ,lowerCamelCase_ ) for image in images]
A = {"""pixel_values""": images}
return BatchFeature(data=lowerCamelCase_ ,tensor_type=lowerCamelCase_ )
| 617 |
"""simple docstring"""
from typing import Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
UpperCAmelCase =logging.get_logger(__name__)
@add_end_docstrings(SCREAMING_SNAKE_CASE )
class lowerCamelCase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self ,*lowerCamelCase_ ,**lowerCamelCase_ ) -> str:
super().__init__(*lowerCamelCase_ ,**lowerCamelCase_ )
self.check_model_type(lowerCamelCase_ )
def UpperCamelCase__ ( self ,lowerCamelCase_=None ,lowerCamelCase_=None ,lowerCamelCase_=None ,**lowerCamelCase_ ) -> List[Any]:
A , A = {}, {}
if padding is not None:
A = padding
if truncation is not None:
A = truncation
if top_k is not None:
A = top_k
return preprocess_params, {}, postprocess_params
def __call__( self ,lowerCamelCase_ ,lowerCamelCase_ = None ,**lowerCamelCase_ ) -> Dict:
if isinstance(lowerCamelCase_ ,(Image.Image, str) ) and isinstance(lowerCamelCase_ ,lowerCamelCase_ ):
A = {"""image""": image, """question""": question}
else:
A = image
A = super().__call__(lowerCamelCase_ ,**lowerCamelCase_ )
return results
def UpperCamelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_=False ,lowerCamelCase_=False ) -> List[Any]:
A = load_image(inputs["""image"""] )
A = self.tokenizer(
inputs["""question"""] ,return_tensors=self.framework ,padding=lowerCamelCase_ ,truncation=lowerCamelCase_ )
A = self.image_processor(images=lowerCamelCase_ ,return_tensors=self.framework )
model_inputs.update(lowerCamelCase_ )
return model_inputs
def UpperCamelCase__ ( self ,lowerCamelCase_ ) -> Union[str, Any]:
A = self.model(**lowerCamelCase_ )
return model_outputs
def UpperCamelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_=5 ) -> int:
if top_k > self.model.config.num_labels:
A = self.model.config.num_labels
if self.framework == "pt":
A = model_outputs.logits.sigmoid()[0]
A , A = probs.topk(lowerCamelCase_ )
else:
raise ValueError(f'Unsupported framework: {self.framework}' )
A = scores.tolist()
A = ids.tolist()
return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(lowerCamelCase_ ,lowerCamelCase_ )]
| 617 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowercase__ ={
'configuration_convnext': ['CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ConvNextConfig', 'ConvNextOnnxConfig']
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ =['ConvNextFeatureExtractor']
lowercase__ =['ConvNextImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ =[
'CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST',
'ConvNextForImageClassification',
'ConvNextModel',
'ConvNextPreTrainedModel',
'ConvNextBackbone',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ =[
'TFConvNextForImageClassification',
'TFConvNextModel',
'TFConvNextPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_convnext import CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvNextConfig, ConvNextOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_convnext import ConvNextFeatureExtractor
from .image_processing_convnext import ConvNextImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convnext import (
CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvNextBackbone,
ConvNextForImageClassification,
ConvNextModel,
ConvNextPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convnext import TFConvNextForImageClassification, TFConvNextModel, TFConvNextPreTrainedModel
else:
import sys
lowercase__ =_LazyModule(__name__, globals()['__file__'], _import_structure)
| 511 |
'''simple docstring'''
import argparse
import json
import os
import re
import shutil
import torch
from transformers import BioGptConfig, BioGptForCausalLM
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
lowercase__ =2
class a_ :
def __init__( self , *, # begin keyword-only arguments
UpperCAmelCase="<s>" , UpperCAmelCase="<pad>" , UpperCAmelCase="</s>" , UpperCAmelCase="<unk>" , UpperCAmelCase=None , ):
a_ , a_ , a_ , a_ = bos, unk, pad, eos
a_ = []
a_ = []
a_ = {}
a_ = self.add_symbol(UpperCAmelCase )
a_ = self.add_symbol(UpperCAmelCase )
a_ = self.add_symbol(UpperCAmelCase )
a_ = self.add_symbol(UpperCAmelCase )
if extra_special_symbols:
for s in extra_special_symbols:
self.add_symbol(UpperCAmelCase )
a_ = len(self.symbols )
def __eq__( self , UpperCAmelCase ):
return self.indices == other.indices
def __getitem__( self , UpperCAmelCase ):
if idx < len(self.symbols ):
return self.symbols[idx]
return self.unk_word
def __len__( self ):
return len(self.symbols )
def __contains__( self , UpperCAmelCase ):
return sym in self.indices
@classmethod
def lowerCAmelCase__ ( cls , UpperCAmelCase ):
a_ = cls()
d.add_from_file(UpperCAmelCase )
return d
def lowerCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase=1 , UpperCAmelCase=False ):
if word in self.indices and not overwrite:
a_ = self.indices[word]
a_ = self.count[idx] + n
return idx
else:
a_ = len(self.symbols )
a_ = idx
self.symbols.append(UpperCAmelCase )
self.count.append(UpperCAmelCase )
return idx
def lowerCAmelCase__ ( self , UpperCAmelCase ):
return 0
def lowerCAmelCase__ ( self , UpperCAmelCase ):
if isinstance(UpperCAmelCase , UpperCAmelCase ):
try:
with open(UpperCAmelCase , """r""" , encoding="""utf-8""" ) as fd:
self.add_from_file(UpperCAmelCase )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception("""Incorrect encoding detected in {}, please rebuild the dataset""".format(UpperCAmelCase ) )
return
a_ = f.readlines()
a_ = self._load_meta(UpperCAmelCase )
for line in lines[indices_start_line:]:
try:
a_ , a_ = line.rstrip().rsplit(""" """ , 1 )
if field == "#fairseq:overwrite":
a_ = True
a_ , a_ = line.rsplit(""" """ , 1 )
else:
a_ = False
a_ = int(UpperCAmelCase )
a_ = line
if word in self and not overwrite:
raise RuntimeError(
"""Duplicate word found when loading Dictionary: '{}'. """
"""Duplicate words can overwrite earlier ones by adding the """
"""#fairseq:overwrite flag at the end of the corresponding row """
"""in the dictionary file. If using the Camembert model, please """
"""download an updated copy of the model file.""".format(UpperCAmelCase ) )
self.add_symbol(UpperCAmelCase , n=UpperCAmelCase , overwrite=UpperCAmelCase )
except ValueError:
raise ValueError("""Incorrect dictionary format, expected '<token> <cnt> [flags]'""" )
def UpperCamelCase_ ( A__ ):
# (1) remove word breaking symbol, (2) add word ending symbol where the word is not broken up,
# e.g.: d = {'le@@': 5, 'tt@@': 6, 'er': 7} => {'le': 5, 'tt': 6, 'er</w>': 7}
a_ = dict((re.sub(r"""@@$""" , """""" , A__ ), v) if k.endswith("""@@""" ) else (re.sub(r"""$""" , """</w>""" , A__ ), v) for k, v in d.items() )
a_ = """<s> <pad> </s> <unk>""".split()
# restore the special tokens
for k in keep_keys:
del da[F'''{k}</w>''']
a_ = d[k] # restore
return da
def UpperCamelCase_ ( A__ , A__ ):
# prep
if not os.path.exists(A__ ):
raise ValueError(F'''path {biogpt_checkpoint_path} does not exist!''' )
os.makedirs(A__ , exist_ok=A__ )
print(F'''Writing results to {pytorch_dump_folder_path}''' )
# handle various types of models
a_ = os.path.join(A__ , """checkpoint.pt""" )
if not os.path.isfile(A__ ):
raise ValueError(F'''path to the file {checkpoint_file} does not exist!''' )
a_ = torch.load(A__ , map_location="""cpu""" )
a_ = chkpt["""cfg"""]["""model"""]
# dicts
a_ = os.path.join(A__ , """dict.txt""" )
if not os.path.isfile(A__ ):
raise ValueError(F'''path to the file {dict_file} does not exist!''' )
a_ = Dictionary.load(A__ )
a_ = rewrite_dict_keys(src_dict.indices )
a_ = len(A__ )
a_ = os.path.join(A__ , VOCAB_FILES_NAMES["""vocab_file"""] )
print(F'''Generating {src_vocab_file} of {src_vocab_size} records''' )
with open(A__ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(A__ , ensure_ascii=A__ , indent=A__ ) )
# merges_file (bpecodes)
a_ = os.path.join(A__ , """bpecodes""" )
if not os.path.isfile(A__ ):
raise ValueError(F'''path to the file {bpecodes_file} does not exist!''' )
a_ = os.path.join(A__ , VOCAB_FILES_NAMES["""merges_file"""] )
shutil.copyfile(A__ , A__ )
# model config
a_ = os.path.join(A__ , """config.json""" )
a_ = {
"""activation_dropout""": args["""activation_dropout"""],
"""architectures""": ["""BioGptForCausalLM"""],
"""attention_probs_dropout_prob""": args["""attention_dropout"""],
"""bos_token_id""": 0,
"""eos_token_id""": 2,
"""hidden_act""": args["""activation_fn"""],
"""hidden_dropout_prob""": args["""dropout"""],
"""hidden_size""": args["""decoder_embed_dim"""],
"""initializer_range""": 0.02,
"""intermediate_size""": args["""decoder_ffn_embed_dim"""],
"""layer_norm_eps""": 1e-12,
"""layerdrop""": args["""decoder_layerdrop"""],
"""max_position_embeddings""": args["""max_target_positions"""],
"""model_type""": """biogpt""",
"""num_attention_heads""": args["""decoder_attention_heads"""],
"""num_hidden_layers""": args["""decoder_layers"""],
"""pad_token_id""": 1,
"""scale_embedding""": not args["""no_scale_embedding"""],
"""tie_word_embeddings""": args["""share_decoder_input_output_embed"""],
"""vocab_size""": src_vocab_size,
}
# good hparam defaults to start with
print(F'''Generating {biogpt_model_config_file}''' )
with open(A__ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(A__ , ensure_ascii=A__ , indent=A__ ) )
# tokenizer config
a_ = os.path.join(A__ , A__ )
a_ = {
"""bos_token""": """<s>""",
"""eos_token""": """</s>""",
"""model_max_length""": 10_24,
"""pad_token""": """<pad>""",
"""special_tokens_map_file""": None,
"""tokenizer_class""": """BioGptTokenizer""",
"""unk_token""": """<unk>""",
}
print(F'''Generating {biogpt_tokenizer_config_file}''' )
with open(A__ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(A__ , ensure_ascii=A__ , indent=A__ ) )
# model
a_ = chkpt["""model"""]
# remove unneeded keys
a_ = [
"""decoder.version""",
]
for k in ignore_keys:
model_state_dict.pop(A__ , A__ )
a_ = list(model_state_dict.keys() )
for layer_name in layer_names:
if layer_name.endswith("""output_projection.weight""" ):
a_ = model_state_dict.pop(A__ )
else:
a_ = model_state_dict.pop(A__ )
a_ = BioGptConfig.from_pretrained(A__ )
a_ = BioGptForCausalLM(A__ )
# check that it loads ok
model_new.load_state_dict(A__ )
# save
a_ = os.path.join(A__ , A__ )
print(F'''Generating {pytorch_weights_dump_path}''' )
torch.save(A__ , A__ )
print("""Conversion is done!""" )
if __name__ == "__main__":
lowercase__ =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--biogpt_checkpoint_path',
default=None,
type=str,
required=True,
help=(
'Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,'
' bpecodes, etc.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
lowercase__ =parser.parse_args()
convert_biogpt_checkpoint_to_pytorch(args.biogpt_checkpoint_path, args.pytorch_dump_folder_path)
| 511 | 1 |
import os
import tempfile
import unittest
from transformers import NezhaConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
)
from transformers.models.nezha.modeling_nezha import NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST
class lowercase :
'''simple docstring'''
def __init__(self , __a , __a=13 , __a=7 , __a=True , __a=True , __a=True , __a=True , __a=99 , __a=32 , __a=5 , __a=4 , __a=37 , __a="gelu" , __a=0.1 , __a=0.1 , __a=128 , __a=32 , __a=16 , __a=2 , __a=0.02 , __a=3 , __a=4 , __a=None , ) -> List[str]:
"""simple docstring"""
UpperCAmelCase__ = parent
UpperCAmelCase__ = batch_size
UpperCAmelCase__ = seq_length
UpperCAmelCase__ = is_training
UpperCAmelCase__ = use_input_mask
UpperCAmelCase__ = use_token_type_ids
UpperCAmelCase__ = use_labels
UpperCAmelCase__ = vocab_size
UpperCAmelCase__ = hidden_size
UpperCAmelCase__ = num_hidden_layers
UpperCAmelCase__ = num_attention_heads
UpperCAmelCase__ = intermediate_size
UpperCAmelCase__ = hidden_act
UpperCAmelCase__ = hidden_dropout_prob
UpperCAmelCase__ = attention_probs_dropout_prob
UpperCAmelCase__ = max_position_embeddings
UpperCAmelCase__ = type_vocab_size
UpperCAmelCase__ = type_sequence_label_size
UpperCAmelCase__ = initializer_range
UpperCAmelCase__ = num_labels
UpperCAmelCase__ = num_choices
UpperCAmelCase__ = scope
def UpperCamelCase__ (self ) -> Any:
"""simple docstring"""
UpperCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase__ = None
if self.use_input_mask:
UpperCAmelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase__ = None
if self.use_token_type_ids:
UpperCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase__ = None
UpperCAmelCase__ = None
UpperCAmelCase__ = None
if self.use_labels:
UpperCAmelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase__ = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase__ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase__ (self ) -> Tuple:
"""simple docstring"""
return NezhaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__lowercase , initializer_range=self.initializer_range , )
def UpperCamelCase__ (self ) -> int:
"""simple docstring"""
(
UpperCAmelCase__
) = self.prepare_config_and_inputs()
UpperCAmelCase__ = True
UpperCAmelCase__ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
UpperCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def UpperCamelCase__ (self , __a , __a , __a , __a , __a , __a , __a ) -> Any:
"""simple docstring"""
UpperCAmelCase__ = NezhaModel(config=__lowercase )
model.to(__lowercase )
model.eval()
UpperCAmelCase__ = model(__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase )
UpperCAmelCase__ = model(__lowercase , token_type_ids=__lowercase )
UpperCAmelCase__ = model(__lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def UpperCamelCase__ (self , __a , __a , __a , __a , __a , __a , __a , __a , __a , ) -> Tuple:
"""simple docstring"""
UpperCAmelCase__ = True
UpperCAmelCase__ = NezhaModel(__lowercase )
model.to(__lowercase )
model.eval()
UpperCAmelCase__ = model(
__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , encoder_hidden_states=__lowercase , encoder_attention_mask=__lowercase , )
UpperCAmelCase__ = model(
__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , encoder_hidden_states=__lowercase , )
UpperCAmelCase__ = model(__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def UpperCamelCase__ (self , __a , __a , __a , __a , __a , __a , __a ) -> Dict:
"""simple docstring"""
UpperCAmelCase__ = NezhaForMaskedLM(config=__lowercase )
model.to(__lowercase )
model.eval()
UpperCAmelCase__ = model(__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , labels=__lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase__ (self , __a , __a , __a , __a , __a , __a , __a ) -> List[str]:
"""simple docstring"""
UpperCAmelCase__ = NezhaForNextSentencePrediction(config=__lowercase )
model.to(__lowercase )
model.eval()
UpperCAmelCase__ = model(
__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , labels=__lowercase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def UpperCamelCase__ (self , __a , __a , __a , __a , __a , __a , __a ) -> Any:
"""simple docstring"""
UpperCAmelCase__ = NezhaForPreTraining(config=__lowercase )
model.to(__lowercase )
model.eval()
UpperCAmelCase__ = model(
__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , labels=__lowercase , next_sentence_label=__lowercase , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def UpperCamelCase__ (self , __a , __a , __a , __a , __a , __a , __a ) -> str:
"""simple docstring"""
UpperCAmelCase__ = NezhaForQuestionAnswering(config=__lowercase )
model.to(__lowercase )
model.eval()
UpperCAmelCase__ = model(
__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , start_positions=__lowercase , end_positions=__lowercase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase__ (self , __a , __a , __a , __a , __a , __a , __a ) -> int:
"""simple docstring"""
UpperCAmelCase__ = self.num_labels
UpperCAmelCase__ = NezhaForSequenceClassification(__lowercase )
model.to(__lowercase )
model.eval()
UpperCAmelCase__ = model(__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , labels=__lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase__ (self , __a , __a , __a , __a , __a , __a , __a ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase__ = self.num_labels
UpperCAmelCase__ = NezhaForTokenClassification(config=__lowercase )
model.to(__lowercase )
model.eval()
UpperCAmelCase__ = model(__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , labels=__lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase__ (self , __a , __a , __a , __a , __a , __a , __a ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase__ = self.num_choices
UpperCAmelCase__ = NezhaForMultipleChoice(config=__lowercase )
model.to(__lowercase )
model.eval()
UpperCAmelCase__ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase__ = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase__ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase__ = model(
__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , labels=__lowercase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCamelCase__ (self ) -> int:
"""simple docstring"""
UpperCAmelCase__ = self.prepare_config_and_inputs()
(
UpperCAmelCase__
) = config_and_inputs
UpperCAmelCase__ = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class lowercase ( lowercase__ , lowercase__ , lowercase__ , unittest.TestCase ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = (
(
NezhaModel,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
)
if is_torch_available()
else ()
)
__SCREAMING_SNAKE_CASE = (
{
'feature-extraction': NezhaModel,
'fill-mask': NezhaForMaskedLM,
'question-answering': NezhaForQuestionAnswering,
'text-classification': NezhaForSequenceClassification,
'token-classification': NezhaForTokenClassification,
'zero-shot': NezhaForSequenceClassification,
}
if is_torch_available()
else {}
)
__SCREAMING_SNAKE_CASE = True
def UpperCamelCase__ (self , __a , __a , __a=False ) -> Dict:
"""simple docstring"""
UpperCAmelCase__ = super()._prepare_for_class(__lowercase , __lowercase , return_labels=__lowercase )
if return_labels:
if model_class in get_values(__lowercase ):
UpperCAmelCase__ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=__lowercase )
UpperCAmelCase__ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__lowercase )
return inputs_dict
def UpperCamelCase__ (self ) -> Tuple:
"""simple docstring"""
UpperCAmelCase__ = NezhaModelTester(self )
UpperCAmelCase__ = ConfigTester(self , config_class=__lowercase , hidden_size=37 )
def UpperCamelCase__ (self ) -> str:
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase__ (self ) -> Dict:
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowercase )
def UpperCamelCase__ (self ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*__lowercase )
def UpperCamelCase__ (self ) -> int:
"""simple docstring"""
(
UpperCAmelCase__
) = self.model_tester.prepare_config_and_inputs_for_decoder()
UpperCAmelCase__ = None
self.model_tester.create_and_check_model_as_decoder(
__lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , )
def UpperCamelCase__ (self ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__lowercase )
def UpperCamelCase__ (self ) -> Tuple:
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__lowercase )
def UpperCamelCase__ (self ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_next_sequence_prediction(*__lowercase )
def UpperCamelCase__ (self ) -> Tuple:
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*__lowercase )
def UpperCamelCase__ (self ) -> Any:
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__lowercase )
def UpperCamelCase__ (self ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__lowercase )
def UpperCamelCase__ (self ) -> str:
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__lowercase )
@slow
def UpperCamelCase__ (self ) -> Any:
"""simple docstring"""
for model_name in NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase__ = NezhaModel.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
@slow
@require_torch_gpu
def UpperCamelCase__ (self ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# NezhaForMultipleChoice behaves incorrectly in JIT environments.
if model_class == NezhaForMultipleChoice:
return
UpperCAmelCase__ = True
UpperCAmelCase__ = model_class(config=__lowercase )
UpperCAmelCase__ = self._prepare_for_class(__lowercase , __lowercase )
UpperCAmelCase__ = torch.jit.trace(
__lowercase , (inputs_dict['input_ids'].to('cpu' ), inputs_dict['attention_mask'].to('cpu' )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(__lowercase , os.path.join(__lowercase , 'bert.pt' ) )
UpperCAmelCase__ = torch.jit.load(os.path.join(__lowercase , 'bert.pt' ) , map_location=__lowercase )
loaded(inputs_dict['input_ids'].to(__lowercase ) , inputs_dict['attention_mask'].to(__lowercase ) )
@require_torch
class lowercase ( unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCamelCase__ (self ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase__ = NezhaModel.from_pretrained('sijunhe/nezha-cn-base' )
UpperCAmelCase__ = torch.tensor([[0, 1, 2, 3, 4, 5]] )
UpperCAmelCase__ = torch.tensor([[0, 1, 1, 1, 1, 1]] )
with torch.no_grad():
UpperCAmelCase__ = model(__lowercase , attention_mask=__lowercase )[0]
UpperCAmelCase__ = torch.Size((1, 6, 768) )
self.assertEqual(output.shape , __lowercase )
UpperCAmelCase__ = torch.tensor([[[0.06_85, 0.24_41, 0.11_02], [0.06_00, 0.19_06, 0.13_49], [0.02_21, 0.08_19, 0.05_86]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __lowercase , atol=1E-4 ) )
@slow
def UpperCamelCase__ (self ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase__ = NezhaForMaskedLM.from_pretrained('sijunhe/nezha-cn-base' )
UpperCAmelCase__ = torch.tensor([[0, 1, 2, 3, 4, 5]] )
UpperCAmelCase__ = torch.tensor([[1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
UpperCAmelCase__ = model(__lowercase , attention_mask=__lowercase )[0]
UpperCAmelCase__ = torch.Size((1, 6, 21128) )
self.assertEqual(output.shape , __lowercase )
UpperCAmelCase__ = torch.tensor(
[[-2.79_39, -1.79_02, -2.21_89], [-2.85_85, -1.89_08, -2.37_23], [-2.64_99, -1.77_50, -2.25_58]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __lowercase , atol=1E-4 ) )
| 146 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a : List[str] = {"configuration_xglm": ["XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP", "XGLMConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Dict = ["XGLMTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : List[Any] = ["XGLMTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : List[Any] = [
"XGLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"XGLMForCausalLM",
"XGLMModel",
"XGLMPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : List[str] = [
"FlaxXGLMForCausalLM",
"FlaxXGLMModel",
"FlaxXGLMPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : List[str] = [
"TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFXGLMForCausalLM",
"TFXGLMModel",
"TFXGLMPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
a : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 63 | 0 |
import sys
import webbrowser
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
print("""Googling.....""")
__magic_name__ : List[Any] = """https://www.google.com/search?q=""" + """ """.join(sys.argv[1:])
__magic_name__ : Optional[int] = requests.get(url, headers={"""UserAgent""": UserAgent().random})
# res.raise_for_status()
with open("""project1a.html""", """wb""") as out_file: # only for knowing the class
for data in res.iter_content(10_000):
out_file.write(data)
__magic_name__ : Optional[int] = BeautifulSoup(res.text, """html.parser""")
__magic_name__ : Tuple = list(soup.select(""".eZt8xd"""))[:5]
print(len(links))
for link in links:
if link.text == "Maps":
webbrowser.open(link.get("""href"""))
else:
webbrowser.open(F'https://google.com{link.get("href")}')
| 721 |
'''simple docstring'''
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetaImageProcessor
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , lowerCamelCase , lowerCamelCase=7 , lowerCamelCase=3 , lowerCamelCase=30 , lowerCamelCase=400 , lowerCamelCase=True , lowerCamelCase=None , lowerCamelCase=True , lowerCamelCase=[0.5, 0.5, 0.5] , lowerCamelCase=[0.5, 0.5, 0.5] , lowerCamelCase=True , lowerCamelCase=1 / 255 , lowerCamelCase=True , ):
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
_snake_case = size if size is not None else {"shortest_edge": 18, "longest_edge": 1_333}
_snake_case = parent
_snake_case = batch_size
_snake_case = num_channels
_snake_case = min_resolution
_snake_case = max_resolution
_snake_case = do_resize
_snake_case = size
_snake_case = do_normalize
_snake_case = image_mean
_snake_case = image_std
_snake_case = do_rescale
_snake_case = rescale_factor
_snake_case = do_pad
def UpperCamelCase( self ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def UpperCamelCase( self , lowerCamelCase , lowerCamelCase=False ):
if not batched:
_snake_case = image_inputs[0]
if isinstance(lowerCamelCase , Image.Image ):
_snake_case , _snake_case = image.size
else:
_snake_case , _snake_case = image.shape[1], image.shape[2]
if w < h:
_snake_case = int(self.size["shortest_edge"] * h / w )
_snake_case = self.size["shortest_edge"]
elif w > h:
_snake_case = self.size["shortest_edge"]
_snake_case = int(self.size["shortest_edge"] * w / h )
else:
_snake_case = self.size["shortest_edge"]
_snake_case = self.size["shortest_edge"]
else:
_snake_case = []
for image in image_inputs:
_snake_case , _snake_case = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
_snake_case = max(lowerCamelCase , key=lambda lowerCamelCase : item[0] )[0]
_snake_case = max(lowerCamelCase , key=lambda lowerCamelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class __SCREAMING_SNAKE_CASE ( __UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = DetaImageProcessor if is_vision_available() else None
def UpperCamelCase( self ):
_snake_case = DetaImageProcessingTester(self )
@property
def UpperCamelCase( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase( self ):
_snake_case = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase , "image_mean" ) )
self.assertTrue(hasattr(lowerCamelCase , "image_std" ) )
self.assertTrue(hasattr(lowerCamelCase , "do_normalize" ) )
self.assertTrue(hasattr(lowerCamelCase , "do_resize" ) )
self.assertTrue(hasattr(lowerCamelCase , "do_rescale" ) )
self.assertTrue(hasattr(lowerCamelCase , "do_pad" ) )
self.assertTrue(hasattr(lowerCamelCase , "size" ) )
def UpperCamelCase( self ):
_snake_case = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 18, "longest_edge": 1_333} )
self.assertEqual(image_processor.do_pad , lowerCamelCase )
def UpperCamelCase( self ):
pass
def UpperCamelCase( self ):
# Initialize image_processing
_snake_case = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , Image.Image )
# Test not batched input
_snake_case = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
_snake_case , _snake_case = self.image_processor_tester.get_expected_values(lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_snake_case , _snake_case = self.image_processor_tester.get_expected_values(lowerCamelCase , batched=lowerCamelCase )
_snake_case = image_processing(lowerCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase( self ):
# Initialize image_processing
_snake_case = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase , numpify=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , np.ndarray )
# Test not batched input
_snake_case = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
_snake_case , _snake_case = self.image_processor_tester.get_expected_values(lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_snake_case = image_processing(lowerCamelCase , return_tensors="pt" ).pixel_values
_snake_case , _snake_case = self.image_processor_tester.get_expected_values(lowerCamelCase , batched=lowerCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase( self ):
# Initialize image_processing
_snake_case = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase , torchify=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , torch.Tensor )
# Test not batched input
_snake_case = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
_snake_case , _snake_case = self.image_processor_tester.get_expected_values(lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_snake_case = image_processing(lowerCamelCase , return_tensors="pt" ).pixel_values
_snake_case , _snake_case = self.image_processor_tester.get_expected_values(lowerCamelCase , batched=lowerCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def UpperCamelCase( self ):
# prepare image and target
_snake_case = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r" ) as f:
_snake_case = json.loads(f.read() )
_snake_case = {"image_id": 39_769, "annotations": target}
# encode them
_snake_case = DetaImageProcessor()
_snake_case = image_processing(images=lowerCamelCase , annotations=lowerCamelCase , return_tensors="pt" )
# verify pixel values
_snake_case = torch.Size([1, 3, 800, 1_066] )
self.assertEqual(encoding["pixel_values"].shape , lowerCamelCase )
_snake_case = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , lowerCamelCase , atol=1e-4 ) )
# verify area
_snake_case = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , lowerCamelCase ) )
# verify boxes
_snake_case = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , lowerCamelCase )
_snake_case = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , lowerCamelCase , atol=1e-3 ) )
# verify image_id
_snake_case = torch.tensor([39_769] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , lowerCamelCase ) )
# verify is_crowd
_snake_case = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , lowerCamelCase ) )
# verify class_labels
_snake_case = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , lowerCamelCase ) )
# verify orig_size
_snake_case = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , lowerCamelCase ) )
# verify size
_snake_case = torch.tensor([800, 1_066] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , lowerCamelCase ) )
@slow
def UpperCamelCase( self ):
# prepare image, target and masks_path
_snake_case = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r" ) as f:
_snake_case = json.loads(f.read() )
_snake_case = {"file_name": "000000039769.png", "image_id": 39_769, "segments_info": target}
_snake_case = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" )
# encode them
_snake_case = DetaImageProcessor(format="coco_panoptic" )
_snake_case = image_processing(images=lowerCamelCase , annotations=lowerCamelCase , masks_path=lowerCamelCase , return_tensors="pt" )
# verify pixel values
_snake_case = torch.Size([1, 3, 800, 1_066] )
self.assertEqual(encoding["pixel_values"].shape , lowerCamelCase )
_snake_case = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , lowerCamelCase , atol=1e-4 ) )
# verify area
_snake_case = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , lowerCamelCase ) )
# verify boxes
_snake_case = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , lowerCamelCase )
_snake_case = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , lowerCamelCase , atol=1e-3 ) )
# verify image_id
_snake_case = torch.tensor([39_769] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , lowerCamelCase ) )
# verify is_crowd
_snake_case = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , lowerCamelCase ) )
# verify class_labels
_snake_case = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , lowerCamelCase ) )
# verify masks
_snake_case = 822_873
self.assertEqual(encoding["labels"][0]["masks"].sum().item() , lowerCamelCase )
# verify orig_size
_snake_case = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , lowerCamelCase ) )
# verify size
_snake_case = torch.tensor([800, 1_066] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , lowerCamelCase ) )
| 368 | 0 |
'''simple docstring'''
import os
import unittest
from transformers.models.transfo_xl.tokenization_transfo_xl import VOCAB_FILES_NAMES, TransfoXLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class A ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
lowerCamelCase : Union[str, Any] = TransfoXLTokenizer
lowerCamelCase : Dict = False
lowerCamelCase : str = False
def A__ ( self ) -> Any:
'''simple docstring'''
super().setUp()
lowercase__ = [
'''<unk>''',
'''[CLS]''',
'''[SEP]''',
'''want''',
'''unwanted''',
'''wa''',
'''un''',
'''running''',
''',''',
'''low''',
'''l''',
]
lowercase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
def A__ ( self , **lowerCamelCase__ ) -> Dict:
'''simple docstring'''
lowercase__ = True
return TransfoXLTokenizer.from_pretrained(self.tmpdirname , **lowerCamelCase__ )
def A__ ( self , lowerCamelCase__ ) -> Any:
'''simple docstring'''
lowercase__ = '''<unk> UNwanted , running'''
lowercase__ = '''<unk> unwanted, running'''
return input_text, output_text
def A__ ( self ) -> Dict:
'''simple docstring'''
lowercase__ = TransfoXLTokenizer(vocab_file=self.vocab_file , lower_case=lowerCamelCase__ )
lowercase__ = tokenizer.tokenize("""<unk> UNwanted , running""" )
self.assertListEqual(lowerCamelCase__ , ["""<unk>""", """unwanted""", """,""", """running"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase__ ) , [0, 4, 8, 7] )
def A__ ( self ) -> Dict:
'''simple docstring'''
lowercase__ = TransfoXLTokenizer(lower_case=lowerCamelCase__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo ! how \n Are yoU ? """ ) , ["""hello""", """!""", """how""", """are""", """you""", """?"""] )
def A__ ( self ) -> Dict:
'''simple docstring'''
lowercase__ = TransfoXLTokenizer(lower_case=lowerCamelCase__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo ! how \n Are yoU ? """ ) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def A__ ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase__ = TransfoXLTokenizer(lower_case=lowerCamelCase__ )
lowercase__ = '''Hello (bracket) and side-scrolled [and] Henry\'s $5,000 with 3.34 m. What\'s up!?'''
lowercase__ = [
'''Hello''',
'''(''',
'''bracket''',
''')''',
'''and''',
'''side''',
'''@-@''',
'''scrolled''',
'''[''',
'''and''',
''']''',
'''Henry''',
'''\'s''',
'''$''',
'''5''',
'''@,@''',
'''000''',
'''with''',
'''3''',
'''@.@''',
'''34''',
'''m''',
'''.''',
'''What''',
'''\'s''',
'''up''',
'''!''',
'''?''',
]
self.assertListEqual(tokenizer.tokenize(lowerCamelCase__ ) , lowerCamelCase__ )
self.assertEqual(tokenizer.convert_tokens_to_string(lowerCamelCase__ ) , lowerCamelCase__ )
def A__ ( self ) -> Optional[int]:
'''simple docstring'''
lowercase__ = self.get_tokenizer()
lowercase__ = len(lowerCamelCase__ )
tokenizer.add_tokens(["""new1""", """new2"""] )
tokenizer.move_added_token("""new1""" , 1 )
# Check that moved token is not copied (duplicate)
self.assertEqual(len(lowerCamelCase__ ) , original_len + 2 )
# Check that token is moved to specified id
self.assertEqual(tokenizer.encode("""new1""" ) , [1] )
self.assertEqual(tokenizer.decode([1] ) , """new1""" )
| 325 |
def lowerCAmelCase ( UpperCAmelCase ) ->str:
"""simple docstring"""
return " ".join(
''''''.join(word[::-1] ) if len(UpperCAmelCase ) > 4 else word for word in sentence.split() )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(reverse_long_words('''Hey wollef sroirraw'''))
| 154 | 0 |
import inspect
import unittest
from transformers import ViTHybridConfig
from transformers.testing_utils import require_accelerate, require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel
from transformers.models.vit_hybrid.modeling_vit_hybrid import VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class _snake_case :
'''simple docstring'''
def __init__( self: Union[str, Any] ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: int=13 ,lowerCamelCase_: Optional[int]=64 ,lowerCamelCase_: List[str]=2 ,lowerCamelCase_: Any=3 ,lowerCamelCase_: int=True ,lowerCamelCase_: Any=True ,lowerCamelCase_: List[Any]=32 ,lowerCamelCase_: int=5 ,lowerCamelCase_: List[Any]=4 ,lowerCamelCase_: Optional[Any]=37 ,lowerCamelCase_: Union[str, Any]="gelu" ,lowerCamelCase_: List[Any]=0.1 ,lowerCamelCase_: Tuple=0.1 ,lowerCamelCase_: Optional[int]=10 ,lowerCamelCase_: List[Any]=0.0_2 ,lowerCamelCase_: Tuple=[1, 16, 4, 4] ,lowerCamelCase_: Optional[int]=None ,) -> Any:
UpperCAmelCase_ : List[str] = parent
UpperCAmelCase_ : Optional[Any] = batch_size
UpperCAmelCase_ : Dict = image_size
UpperCAmelCase_ : int = patch_size
UpperCAmelCase_ : List[str] = num_channels
UpperCAmelCase_ : Optional[Any] = is_training
UpperCAmelCase_ : List[Any] = use_labels
UpperCAmelCase_ : Optional[Any] = hidden_size
UpperCAmelCase_ : Any = num_hidden_layers
UpperCAmelCase_ : str = num_attention_heads
UpperCAmelCase_ : Tuple = intermediate_size
UpperCAmelCase_ : Tuple = hidden_act
UpperCAmelCase_ : Any = hidden_dropout_prob
UpperCAmelCase_ : str = attention_probs_dropout_prob
UpperCAmelCase_ : Optional[Any] = type_sequence_label_size
UpperCAmelCase_ : Tuple = initializer_range
UpperCAmelCase_ : Optional[Any] = scope
UpperCAmelCase_ : Dict = backbone_featmap_shape
# in ViT hybrid, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
# the number of patches is based on the feature map of the backbone, which by default uses an output stride
# of 32, which means that the feature map has a spatial resolution of 1/32 of the input image size
UpperCAmelCase_ : Dict = (self.image_size // 32) ** 2
UpperCAmelCase_ : List[Any] = num_patches + 1
def A__ ( self: Optional[Any] ) -> List[Any]:
UpperCAmelCase_ : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase_ : Dict = None
if self.use_labels:
UpperCAmelCase_ : Dict = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
UpperCAmelCase_ : Union[str, Any] = self.get_config()
return config, pixel_values, labels
def A__ ( self: Optional[int] ) -> int:
UpperCAmelCase_ : Optional[int] = {
"""global_padding""": """same""",
"""layer_type""": """bottleneck""",
"""depths""": [3, 4, 9],
"""out_features""": ["""stage1""", """stage2""", """stage3"""],
"""embedding_dynamic_padding""": True,
"""hidden_sizes""": [4, 8, 16, 32],
"""num_groups""": 2,
}
return ViTHybridConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=lowerCamelCase_ ,initializer_range=self.initializer_range ,backbone_featmap_shape=self.backbone_featmap_shape ,backbone_config=lowerCamelCase_ ,)
def A__ ( self: str ,lowerCamelCase_: Any ,lowerCamelCase_: int ,lowerCamelCase_: Optional[Any] ) -> Any:
UpperCAmelCase_ : Optional[int] = ViTHybridModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : Tuple = model(lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def A__ ( self: List[Any] ,lowerCamelCase_: int ,lowerCamelCase_: str ,lowerCamelCase_: List[Any] ) -> Any:
UpperCAmelCase_ : Any = self.type_sequence_label_size
UpperCAmelCase_ : Optional[Any] = ViTHybridForImageClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : Optional[Any] = model(lowerCamelCase_ ,labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
def A__ ( self: Dict ) -> Any:
UpperCAmelCase_ : Any = self.prepare_config_and_inputs()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = config_and_inputs
UpperCAmelCase_ : Any = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _snake_case ( __snake_case , __snake_case , unittest.TestCase ):
'''simple docstring'''
A__ : List[str] = (ViTHybridModel, ViTHybridForImageClassification) if is_torch_available() else ()
A__ : List[str] = (
{"feature-extraction": ViTHybridModel, "image-classification": ViTHybridForImageClassification}
if is_torch_available()
else {}
)
A__ : Any = False
A__ : str = False
A__ : str = False
def A__ ( self: List[str] ) -> str:
UpperCAmelCase_ : Tuple = ViTHybridModelTester(self )
UpperCAmelCase_ : Tuple = ConfigTester(self ,config_class=lowerCamelCase_ ,has_text_modality=lowerCamelCase_ ,hidden_size=37 )
def A__ ( self: str ) -> Union[str, Any]:
self.config_tester.run_common_tests()
@unittest.skip(reason="""ViT does not use inputs_embeds""" )
def A__ ( self: Union[str, Any] ) -> Union[str, Any]:
pass
def A__ ( self: List[Any] ) -> List[str]:
UpperCAmelCase_ , UpperCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ : Optional[int] = model_class(lowerCamelCase_ )
self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) )
UpperCAmelCase_ : Optional[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase_ ,nn.Linear ) )
def A__ ( self: Tuple ) -> Optional[Any]:
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ : int = model_class(lowerCamelCase_ )
UpperCAmelCase_ : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ : Union[str, Any] = [*signature.parameters.keys()]
UpperCAmelCase_ : int = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] ,lowerCamelCase_ )
def A__ ( self: Union[str, Any] ) -> Optional[Any]:
UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def A__ ( self: Tuple ) -> List[Any]:
UpperCAmelCase_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase_ )
def A__ ( self: int ) -> Dict:
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ : str = _config_zero_init(lowerCamelCase_ )
for model_class in self.all_model_classes:
UpperCAmelCase_ : str = model_class(config=lowerCamelCase_ )
# Skip the check for the backbone
for name, module in model.named_modules():
if module.__class__.__name__ == "ViTHybridPatchEmbeddings":
UpperCAmelCase_ : Dict = [F'''{name}.{key}''' for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() ,[0.0, 1.0] ,msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' ,)
@slow
def A__ ( self: List[str] ) -> Tuple:
for model_name in VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ : Tuple = ViTHybridModel.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
def lowerCamelCase_ ( ):
'''simple docstring'''
UpperCAmelCase_ : int = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def A__ ( self: str ) -> Dict:
return (
ViTHybridImageProcessor.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def A__ ( self: Any ) -> Any:
UpperCAmelCase_ : Optional[Any] = ViTHybridForImageClassification.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
lowerCamelCase_ )
UpperCAmelCase_ : int = self.default_image_processor
UpperCAmelCase_ : Union[str, Any] = prepare_img()
UpperCAmelCase_ : Optional[int] = image_processor(images=lowerCamelCase_ ,return_tensors="""pt""" ).to(lowerCamelCase_ )
# forward pass
with torch.no_grad():
UpperCAmelCase_ : Optional[Any] = model(**lowerCamelCase_ )
# verify the logits
UpperCAmelCase_ : Optional[Any] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape ,lowerCamelCase_ )
UpperCAmelCase_ : Union[str, Any] = torch.tensor([-1.9_0_9_0, -0.4_9_9_3, -0.2_3_8_9] ).to(lowerCamelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,lowerCamelCase_ ,atol=1e-4 ) )
@slow
@require_accelerate
def A__ ( self: Optional[int] ) -> Any:
UpperCAmelCase_ : List[str] = ViTHybridImageProcessor.from_pretrained("""google/vit-hybrid-base-bit-384""" )
UpperCAmelCase_ : Dict = ViTHybridForImageClassification.from_pretrained("""google/vit-hybrid-base-bit-384""" ,device_map="""auto""" )
UpperCAmelCase_ : Any = prepare_img()
UpperCAmelCase_ : List[Any] = image_processor(images=lowerCamelCase_ ,return_tensors="""pt""" )
UpperCAmelCase_ : List[Any] = model(**lowerCamelCase_ )
UpperCAmelCase_ : List[Any] = outputs.logits
# model predicts one of the 1000 ImageNet classes
UpperCAmelCase_ : Union[str, Any] = logits.argmax(-1 ).item()
self.assertTrue(model.config.idalabel[predicted_class_idx] ,"""tabby, tabby cat""" )
| 322 |
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
class _snake_case ( unittest.TestCase , __snake_case ):
'''simple docstring'''
def A__ ( self: Any ) -> Optional[Any]:
UpperCAmelCase_ : Dict = load_tool("""text-classification""" )
self.tool.setup()
UpperCAmelCase_ : List[str] = load_tool("""text-classification""" ,remote=lowerCamelCase_ )
def A__ ( self: List[str] ) -> str:
UpperCAmelCase_ : Tuple = self.tool("""That's quite cool""" ,["""positive""", """negative"""] )
self.assertEqual(lowerCamelCase_ ,"""positive""" )
def A__ ( self: List[Any] ) -> Dict:
UpperCAmelCase_ : List[str] = self.remote_tool("""That's quite cool""" ,["""positive""", """negative"""] )
self.assertEqual(lowerCamelCase_ ,"""positive""" )
def A__ ( self: str ) -> Tuple:
UpperCAmelCase_ : Optional[int] = self.tool(text="""That's quite cool""" ,labels=["""positive""", """negative"""] )
self.assertEqual(lowerCamelCase_ ,"""positive""" )
def A__ ( self: str ) -> Any:
UpperCAmelCase_ : Union[str, Any] = self.remote_tool(text="""That's quite cool""" ,labels=["""positive""", """negative"""] )
self.assertEqual(lowerCamelCase_ ,"""positive""" )
| 322 | 1 |
'''simple docstring'''
class _snake_case :
def __init__( self : Any ,SCREAMING_SNAKE_CASE__ : Any ,SCREAMING_SNAKE_CASE__ : Any ):
SCREAMING_SNAKE_CASE:List[Any] = name
SCREAMING_SNAKE_CASE:Tuple = val
def __str__( self : Union[str, Any] ):
return F'''{self.__class__.__name__}({self.name}, {self.val})'''
def __lt__( self : List[str] ,SCREAMING_SNAKE_CASE__ : Dict ):
return self.val < other.val
class _snake_case :
def __init__( self : str ,SCREAMING_SNAKE_CASE__ : Optional[Any] ):
SCREAMING_SNAKE_CASE:Optional[int] = {}
SCREAMING_SNAKE_CASE:Optional[int] = {}
SCREAMING_SNAKE_CASE:Dict = self.build_heap(lowerCAmelCase_ )
def __getitem__( self : Dict ,SCREAMING_SNAKE_CASE__ : int ):
return self.get_value(lowerCAmelCase_ )
def __UpperCamelCase ( self : Tuple ,SCREAMING_SNAKE_CASE__ : List[Any] ):
return (idx - 1) // 2
def __UpperCamelCase ( self : Dict ,SCREAMING_SNAKE_CASE__ : str ):
return idx * 2 + 1
def __UpperCamelCase ( self : Any ,SCREAMING_SNAKE_CASE__ : Optional[int] ):
return idx * 2 + 2
def __UpperCamelCase ( self : Optional[int] ,SCREAMING_SNAKE_CASE__ : Optional[Any] ):
return self.heap_dict[key]
def __UpperCamelCase ( self : Any ,SCREAMING_SNAKE_CASE__ : List[Any] ):
SCREAMING_SNAKE_CASE:Any = len(lowerCAmelCase_ ) - 1
SCREAMING_SNAKE_CASE:List[str] = self.get_parent_idx(lowerCAmelCase_ )
for idx, i in enumerate(lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE:Any = idx
SCREAMING_SNAKE_CASE:str = i.val
for i in range(lowerCAmelCase_ ,-1 ,-1 ):
self.sift_down(lowerCAmelCase_ ,lowerCAmelCase_ )
return array
def __UpperCamelCase ( self : List[str] ,SCREAMING_SNAKE_CASE__ : str ,SCREAMING_SNAKE_CASE__ : Tuple ):
while True:
SCREAMING_SNAKE_CASE:Any = self.get_left_child_idx(lowerCAmelCase_ ) # noqa: E741
SCREAMING_SNAKE_CASE:str = self.get_right_child_idx(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE:Tuple = idx
if l < len(lowerCAmelCase_ ) and array[l] < array[idx]:
SCREAMING_SNAKE_CASE:Tuple = l
if r < len(lowerCAmelCase_ ) and array[r] < array[smallest]:
SCREAMING_SNAKE_CASE:Union[str, Any] = r
if smallest != idx:
SCREAMING_SNAKE_CASE:int = array[smallest], array[idx]
(
SCREAMING_SNAKE_CASE
):Any = (
self.idx_of_element[array[smallest]],
self.idx_of_element[array[idx]],
)
SCREAMING_SNAKE_CASE:List[Any] = smallest
else:
break
def __UpperCamelCase ( self : List[Any] ,SCREAMING_SNAKE_CASE__ : Any ):
SCREAMING_SNAKE_CASE:List[Any] = self.get_parent_idx(lowerCAmelCase_ )
while p >= 0 and self.heap[p] > self.heap[idx]:
SCREAMING_SNAKE_CASE:str = self.heap[idx], self.heap[p]
SCREAMING_SNAKE_CASE:Optional[int] = (
self.idx_of_element[self.heap[idx]],
self.idx_of_element[self.heap[p]],
)
SCREAMING_SNAKE_CASE:str = p
SCREAMING_SNAKE_CASE:int = self.get_parent_idx(lowerCAmelCase_ )
def __UpperCamelCase ( self : Optional[Any] ):
return self.heap[0]
def __UpperCamelCase ( self : List[Any] ):
SCREAMING_SNAKE_CASE:List[str] = self.heap[-1], self.heap[0]
SCREAMING_SNAKE_CASE:Optional[Any] = (
self.idx_of_element[self.heap[-1]],
self.idx_of_element[self.heap[0]],
)
SCREAMING_SNAKE_CASE:Dict = self.heap.pop()
del self.idx_of_element[x]
self.sift_down(0 ,self.heap )
return x
def __UpperCamelCase ( self : Dict ,SCREAMING_SNAKE_CASE__ : Optional[Any] ):
self.heap.append(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE:int = len(self.heap ) - 1
SCREAMING_SNAKE_CASE:List[Any] = node.val
self.sift_up(len(self.heap ) - 1 )
def __UpperCamelCase ( self : int ):
return len(self.heap ) == 0
def __UpperCamelCase ( self : Dict ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : List[Any] ):
assert (
self.heap[self.idx_of_element[node]].val > new_value
), "newValue must be less that current value"
SCREAMING_SNAKE_CASE:Any = new_value
SCREAMING_SNAKE_CASE:List[str] = new_value
self.sift_up(self.idx_of_element[node] )
A_ = Node("R", -1)
A_ = Node("B", 6)
A_ = Node("A", 3)
A_ = Node("X", 1)
A_ = Node("E", 4)
# Use one of these two ways to generate Min-Heap
# Generating Min-Heap from array
A_ = MinHeap([r, b, a, x, e])
# Generating Min-Heap by Insert method
# myMinHeap.insert(a)
# myMinHeap.insert(b)
# myMinHeap.insert(x)
# myMinHeap.insert(r)
# myMinHeap.insert(e)
# Before
print("Min Heap - before decrease key")
for i in my_min_heap.heap:
print(i)
print("Min Heap - After decrease key of node [B -> -17]")
my_min_heap.decrease_key(b, -17)
# After
for i in my_min_heap.heap:
print(i)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 143 |
"""simple docstring"""
class _lowerCAmelCase :
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=None ) -> int:
_SCREAMING_SNAKE_CASE : Optional[int] = data
_SCREAMING_SNAKE_CASE : Tuple = previous
_SCREAMING_SNAKE_CASE : Any = next_node
def __str__( self ) -> str:
return F"""{self.data}"""
def A ( self ) -> int:
return self.data
def A ( self ) -> Dict:
return self.next
def A ( self ) -> Union[str, Any]:
return self.previous
class _lowerCAmelCase :
def __init__( self , lowerCAmelCase_ ) -> List[str]:
_SCREAMING_SNAKE_CASE : List[Any] = head
def __iter__( self ) -> Any:
return self
def A ( self ) -> Dict:
if not self.current:
raise StopIteration
else:
_SCREAMING_SNAKE_CASE : Any = self.current.get_data()
_SCREAMING_SNAKE_CASE : Dict = self.current.get_next()
return value
class _lowerCAmelCase :
def __init__( self ) -> Dict:
_SCREAMING_SNAKE_CASE : Dict = None # First node in list
_SCREAMING_SNAKE_CASE : Union[str, Any] = None # Last node in list
def __str__( self ) -> Any:
_SCREAMING_SNAKE_CASE : str = self.head
_SCREAMING_SNAKE_CASE : int = []
while current is not None:
nodes.append(current.get_data() )
_SCREAMING_SNAKE_CASE : List[Any] = current.get_next()
return " ".join(str(lowerCAmelCase_ ) for node in nodes )
def __contains__( self , lowerCAmelCase_ ) -> Any:
_SCREAMING_SNAKE_CASE : Any = self.head
while current:
if current.get_data() == value:
return True
_SCREAMING_SNAKE_CASE : int = current.get_next()
return False
def __iter__( self ) -> str:
return LinkedListIterator(self.head )
def A ( self ) -> Dict:
if self.head:
return self.head.get_data()
return None
def A ( self ) -> List[Any]:
if self.tail:
return self.tail.get_data()
return None
def A ( self , lowerCAmelCase_ ) -> None:
if self.head is None:
_SCREAMING_SNAKE_CASE : List[Any] = node
_SCREAMING_SNAKE_CASE : Dict = node
else:
self.insert_before_node(self.head , lowerCAmelCase_ )
def A ( self , lowerCAmelCase_ ) -> None:
if self.head is None:
self.set_head(lowerCAmelCase_ )
else:
self.insert_after_node(self.tail , lowerCAmelCase_ )
def A ( self , lowerCAmelCase_ ) -> None:
_SCREAMING_SNAKE_CASE : Any = Node(lowerCAmelCase_ )
if self.head is None:
self.set_head(lowerCAmelCase_ )
else:
self.set_tail(lowerCAmelCase_ )
def A ( self , lowerCAmelCase_ , lowerCAmelCase_ ) -> None:
_SCREAMING_SNAKE_CASE : Optional[Any] = node
_SCREAMING_SNAKE_CASE : List[Any] = node.previous
if node.get_previous() is None:
_SCREAMING_SNAKE_CASE : List[Any] = node_to_insert
else:
_SCREAMING_SNAKE_CASE : Optional[int] = node_to_insert
_SCREAMING_SNAKE_CASE : Any = node_to_insert
def A ( self , lowerCAmelCase_ , lowerCAmelCase_ ) -> None:
_SCREAMING_SNAKE_CASE : Optional[Any] = node
_SCREAMING_SNAKE_CASE : Optional[int] = node.next
if node.get_next() is None:
_SCREAMING_SNAKE_CASE : Tuple = node_to_insert
else:
_SCREAMING_SNAKE_CASE : Any = node_to_insert
_SCREAMING_SNAKE_CASE : List[str] = node_to_insert
def A ( self , lowerCAmelCase_ , lowerCAmelCase_ ) -> None:
_SCREAMING_SNAKE_CASE : List[Any] = 1
_SCREAMING_SNAKE_CASE : str = Node(lowerCAmelCase_ )
_SCREAMING_SNAKE_CASE : Union[str, Any] = self.head
while node:
if current_position == position:
self.insert_before_node(lowerCAmelCase_ , lowerCAmelCase_ )
return
current_position += 1
_SCREAMING_SNAKE_CASE : List[str] = node.next
self.insert_after_node(self.tail , lowerCAmelCase_ )
def A ( self , lowerCAmelCase_ ) -> Node:
_SCREAMING_SNAKE_CASE : Tuple = self.head
while node:
if node.get_data() == item:
return node
_SCREAMING_SNAKE_CASE : Dict = node.get_next()
raise Exception('Node not found' )
def A ( self , lowerCAmelCase_ ) -> int:
if (node := self.get_node(lowerCAmelCase_ )) is not None:
if node == self.head:
_SCREAMING_SNAKE_CASE : Union[str, Any] = self.head.get_next()
if node == self.tail:
_SCREAMING_SNAKE_CASE : int = self.tail.get_previous()
self.remove_node_pointers(lowerCAmelCase_ )
@staticmethod
def A ( lowerCAmelCase_ ) -> None:
if node.get_next():
_SCREAMING_SNAKE_CASE : List[Any] = node.previous
if node.get_previous():
_SCREAMING_SNAKE_CASE : str = node.next
_SCREAMING_SNAKE_CASE : Optional[int] = None
_SCREAMING_SNAKE_CASE : List[Any] = None
def A ( self ) -> List[str]:
return self.head is None
def lowercase__ ( ):
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 621 | 0 |
import unittest
import numpy as np
from transformers import MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING, TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
from transformers.pipelines import AudioClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_torchaudio,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class __magic_name__ ( unittest.TestCase ):
UpperCAmelCase =MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
UpperCAmelCase =TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
def lowerCAmelCase ( self , snake_case , snake_case , snake_case) -> Any:
'''simple docstring'''
_UpperCAmelCase : str =AudioClassificationPipeline(model=_lowerCamelCase , feature_extractor=_lowerCamelCase)
# test with a raw waveform
_UpperCAmelCase : str =np.zeros((3_4_0_0_0,))
_UpperCAmelCase : Dict =np.zeros((1_4_0_0_0,))
return audio_classifier, [audioa, audio]
def lowerCAmelCase ( self , snake_case , snake_case) -> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase : List[str] =examples
_UpperCAmelCase : List[str] =audio_classifier(_lowerCamelCase)
# by default a model is initialized with num_labels=2
self.assertEqual(
_lowerCamelCase , [
{'score': ANY(_lowerCamelCase), 'label': ANY(_lowerCamelCase)},
{'score': ANY(_lowerCamelCase), 'label': ANY(_lowerCamelCase)},
] , )
_UpperCAmelCase : Dict =audio_classifier(_lowerCamelCase , top_k=1)
self.assertEqual(
_lowerCamelCase , [
{'score': ANY(_lowerCamelCase), 'label': ANY(_lowerCamelCase)},
] , )
self.run_torchaudio(_lowerCamelCase)
@require_torchaudio
def lowerCAmelCase ( self , snake_case) -> Optional[Any]:
'''simple docstring'''
import datasets
# test with a local file
_UpperCAmelCase : List[Any] =datasets.load_dataset('hf-internal-testing/librispeech_asr_dummy' , 'clean' , split='validation')
_UpperCAmelCase : Optional[Any] =dataset[0]['audio']['array']
_UpperCAmelCase : Tuple =audio_classifier(_lowerCamelCase)
self.assertEqual(
_lowerCamelCase , [
{'score': ANY(_lowerCamelCase), 'label': ANY(_lowerCamelCase)},
{'score': ANY(_lowerCamelCase), 'label': ANY(_lowerCamelCase)},
] , )
@require_torch
def lowerCAmelCase ( self) -> int:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] ='anton-l/wav2vec2-random-tiny-classifier'
_UpperCAmelCase : List[str] =pipeline('audio-classification' , model=_lowerCamelCase)
_UpperCAmelCase : Any =np.ones((8_0_0_0,))
_UpperCAmelCase : str =audio_classifier(_lowerCamelCase , top_k=4)
_UpperCAmelCase : Tuple =[
{'score': 0.08_42, 'label': 'no'},
{'score': 0.08_38, 'label': 'up'},
{'score': 0.08_37, 'label': 'go'},
{'score': 0.08_34, 'label': 'right'},
]
_UpperCAmelCase : List[Any] =[
{'score': 0.08_45, 'label': 'stop'},
{'score': 0.08_44, 'label': 'on'},
{'score': 0.08_41, 'label': 'right'},
{'score': 0.08_34, 'label': 'left'},
]
self.assertIn(nested_simplify(_lowerCamelCase , decimals=4) , [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2])
_UpperCAmelCase : Any ={'array': np.ones((8_0_0_0,)), 'sampling_rate': audio_classifier.feature_extractor.sampling_rate}
_UpperCAmelCase : Optional[int] =audio_classifier(_lowerCamelCase , top_k=4)
self.assertIn(nested_simplify(_lowerCamelCase , decimals=4) , [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2])
@require_torch
@slow
def lowerCAmelCase ( self) -> Any:
'''simple docstring'''
import datasets
_UpperCAmelCase : Optional[Any] ='superb/wav2vec2-base-superb-ks'
_UpperCAmelCase : Optional[int] =pipeline('audio-classification' , model=_lowerCamelCase)
_UpperCAmelCase : Any =datasets.load_dataset('anton-l/superb_dummy' , 'ks' , split='test')
_UpperCAmelCase : int =np.array(dataset[3]['speech'] , dtype=np.floataa)
_UpperCAmelCase : str =audio_classifier(_lowerCamelCase , top_k=4)
self.assertEqual(
nested_simplify(_lowerCamelCase , decimals=3) , [
{'score': 0.9_81, 'label': 'go'},
{'score': 0.0_07, 'label': 'up'},
{'score': 0.0_06, 'label': '_unknown_'},
{'score': 0.0_01, 'label': 'down'},
] , )
@require_tf
@unittest.skip('Audio classification is not implemented for TF')
def lowerCAmelCase ( self) -> Any:
'''simple docstring'''
pass
| 712 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class __magic_name__ ( unittest.TestCase ):
def __init__( self , snake_case , snake_case=7 , snake_case=3 , snake_case=1_8 , snake_case=3_0 , snake_case=4_0_0 , snake_case=True , snake_case=None , snake_case=True , ) -> List[Any]:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] =size if size is not None else {'height': 1_8, 'width': 1_8}
_UpperCAmelCase : Optional[Any] =parent
_UpperCAmelCase : Any =batch_size
_UpperCAmelCase : Union[str, Any] =num_channels
_UpperCAmelCase : List[str] =image_size
_UpperCAmelCase : List[Any] =min_resolution
_UpperCAmelCase : Optional[Any] =max_resolution
_UpperCAmelCase : List[Any] =do_resize
_UpperCAmelCase : Union[str, Any] =size
_UpperCAmelCase : List[Any] =apply_ocr
def lowerCAmelCase ( self) -> List[Any]:
'''simple docstring'''
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class __magic_name__ ( lowerCAmelCase ,unittest.TestCase ):
UpperCAmelCase =LayoutLMvaImageProcessor if is_pytesseract_available() else None
def lowerCAmelCase ( self) -> Tuple:
'''simple docstring'''
_UpperCAmelCase : int =LayoutLMvaImageProcessingTester(self)
@property
def lowerCAmelCase ( self) -> List[str]:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase ( self) -> Dict:
'''simple docstring'''
_UpperCAmelCase : List[Any] =self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(snake_case , 'do_resize'))
self.assertTrue(hasattr(snake_case , 'size'))
self.assertTrue(hasattr(snake_case , 'apply_ocr'))
def lowerCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase : int =self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size , {'height': 1_8, 'width': 1_8})
_UpperCAmelCase : Union[str, Any] =self.image_processing_class.from_dict(self.image_processor_dict , size=4_2)
self.assertEqual(image_processor.size , {'height': 4_2, 'width': 4_2})
def lowerCAmelCase ( self) -> Union[str, Any]:
'''simple docstring'''
pass
def lowerCAmelCase ( self) -> int:
'''simple docstring'''
# Initialize image_processing
_UpperCAmelCase : Optional[int] =self.image_processing_class(**self.image_processor_dict)
# create random PIL images
_UpperCAmelCase : Tuple =prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case)
for image in image_inputs:
self.assertIsInstance(snake_case , Image.Image)
# Test not batched input
_UpperCAmelCase : Union[str, Any] =image_processing(image_inputs[0] , return_tensors='pt')
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
self.assertIsInstance(encoding.words , snake_case)
self.assertIsInstance(encoding.boxes , snake_case)
# Test batched
_UpperCAmelCase : str =image_processing(snake_case , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def lowerCAmelCase ( self) -> Any:
'''simple docstring'''
# Initialize image_processing
_UpperCAmelCase : Union[str, Any] =self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
_UpperCAmelCase : Any =prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case , numpify=snake_case)
for image in image_inputs:
self.assertIsInstance(snake_case , np.ndarray)
# Test not batched input
_UpperCAmelCase : List[str] =image_processing(image_inputs[0] , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
_UpperCAmelCase : str =image_processing(snake_case , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def lowerCAmelCase ( self) -> str:
'''simple docstring'''
# Initialize image_processing
_UpperCAmelCase : Optional[int] =self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
_UpperCAmelCase : Tuple =prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case , torchify=snake_case)
for image in image_inputs:
self.assertIsInstance(snake_case , torch.Tensor)
# Test not batched input
_UpperCAmelCase : Optional[Any] =image_processing(image_inputs[0] , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
_UpperCAmelCase : List[Any] =image_processing(snake_case , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def lowerCAmelCase ( self) -> List[str]:
'''simple docstring'''
# with apply_OCR = True
_UpperCAmelCase : str =LayoutLMvaImageProcessor()
from datasets import load_dataset
_UpperCAmelCase : Tuple =load_dataset('hf-internal-testing/fixtures_docvqa' , split='test')
_UpperCAmelCase : Tuple =Image.open(ds[0]['file']).convert('RGB')
_UpperCAmelCase : Any =image_processing(snake_case , return_tensors='pt')
self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_2_4, 2_2_4))
self.assertEqual(len(encoding.words) , len(encoding.boxes))
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
_UpperCAmelCase : Dict =[['11:14', 'to', '11:39', 'a.m', '11:39', 'to', '11:44', 'a.m.', '11:44', 'a.m.', 'to', '12:25', 'p.m.', '12:25', 'to', '12:58', 'p.m.', '12:58', 'to', '4:00', 'p.m.', '2:00', 'to', '5:00', 'p.m.', 'Coffee', 'Break', 'Coffee', 'will', 'be', 'served', 'for', 'men', 'and', 'women', 'in', 'the', 'lobby', 'adjacent', 'to', 'exhibit', 'area.', 'Please', 'move', 'into', 'exhibit', 'area.', '(Exhibits', 'Open)', 'TRRF', 'GENERAL', 'SESSION', '(PART', '|)', 'Presiding:', 'Lee', 'A.', 'Waller', 'TRRF', 'Vice', 'President', '“Introductory', 'Remarks”', 'Lee', 'A.', 'Waller,', 'TRRF', 'Vice', 'Presi-', 'dent', 'Individual', 'Interviews', 'with', 'TRRF', 'Public', 'Board', 'Members', 'and', 'Sci-', 'entific', 'Advisory', 'Council', 'Mem-', 'bers', 'Conducted', 'by', 'TRRF', 'Treasurer', 'Philip', 'G.', 'Kuehn', 'to', 'get', 'answers', 'which', 'the', 'public', 'refrigerated', 'warehousing', 'industry', 'is', 'looking', 'for.', 'Plus', 'questions', 'from', 'the', 'floor.', 'Dr.', 'Emil', 'M.', 'Mrak,', 'University', 'of', 'Cal-', 'ifornia,', 'Chairman,', 'TRRF', 'Board;', 'Sam', 'R.', 'Cecil,', 'University', 'of', 'Georgia', 'College', 'of', 'Agriculture;', 'Dr.', 'Stanley', 'Charm,', 'Tufts', 'University', 'School', 'of', 'Medicine;', 'Dr.', 'Robert', 'H.', 'Cotton,', 'ITT', 'Continental', 'Baking', 'Company;', 'Dr.', 'Owen', 'Fennema,', 'University', 'of', 'Wis-', 'consin;', 'Dr.', 'Robert', 'E.', 'Hardenburg,', 'USDA.', 'Questions', 'and', 'Answers', 'Exhibits', 'Open', 'Capt.', 'Jack', 'Stoney', 'Room', 'TRRF', 'Scientific', 'Advisory', 'Council', 'Meeting', 'Ballroom', 'Foyer']] # noqa: E231
_UpperCAmelCase : Optional[int] =[[[1_4_1, 5_7, 2_1_4, 6_9], [2_2_8, 5_8, 2_5_2, 6_9], [1_4_1, 7_5, 2_1_6, 8_8], [2_3_0, 7_9, 2_8_0, 8_8], [1_4_2, 2_6_0, 2_1_8, 2_7_3], [2_3_0, 2_6_1, 2_5_5, 2_7_3], [1_4_3, 2_7_9, 2_1_8, 2_9_0], [2_3_1, 2_8_2, 2_9_0, 2_9_1], [1_4_3, 3_4_2, 2_1_8, 3_5_4], [2_3_1, 3_4_5, 2_8_9, 3_5_5], [2_0_2, 3_6_2, 2_2_7, 3_7_3], [1_4_3, 3_7_9, 2_2_0, 3_9_2], [2_3_1, 3_8_2, 2_9_1, 3_9_4], [1_4_4, 7_1_4, 2_2_0, 7_2_6], [2_3_1, 7_1_5, 2_5_6, 7_2_6], [1_4_4, 7_3_2, 2_2_0, 7_4_5], [2_3_2, 7_3_6, 2_9_1, 7_4_7], [1_4_4, 7_6_9, 2_1_8, 7_8_2], [2_3_1, 7_7_0, 2_5_6, 7_8_2], [1_4_1, 7_8_8, 2_0_2, 8_0_1], [2_1_5, 7_9_1, 2_7_4, 8_0_4], [1_4_3, 8_2_6, 2_0_4, 8_3_8], [2_1_5, 8_2_6, 2_4_0, 8_3_8], [1_4_2, 8_4_4, 2_0_2, 8_5_7], [2_1_5, 8_4_7, 2_7_4, 8_5_9], [3_3_4, 5_7, 4_2_7, 6_9], [4_4_0, 5_7, 5_2_2, 6_9], [3_6_9, 7_5, 4_6_1, 8_8], [4_6_9, 7_5, 5_1_6, 8_8], [5_2_8, 7_6, 5_6_2, 8_8], [5_7_0, 7_6, 6_6_7, 8_8], [6_7_5, 7_5, 7_1_1, 8_7], [7_2_1, 7_9, 7_7_8, 8_8], [7_8_9, 7_5, 8_4_0, 8_8], [3_6_9, 9_7, 4_7_0, 1_0_7], [4_8_4, 9_4, 5_0_7, 1_0_6], [5_1_8, 9_4, 5_6_2, 1_0_7], [5_7_6, 9_4, 6_5_5, 1_1_0], [6_6_8, 9_4, 7_9_2, 1_0_9], [8_0_4, 9_5, 8_2_9, 1_0_7], [3_6_9, 1_1_3, 4_6_5, 1_2_5], [4_7_7, 1_1_6, 5_4_7, 1_2_5], [5_6_2, 1_1_3, 6_5_8, 1_2_5], [6_7_1, 1_1_6, 7_4_8, 1_2_5], [7_6_1, 1_1_3, 8_1_1, 1_2_5], [3_6_9, 1_3_1, 4_6_5, 1_4_3], [4_7_7, 1_3_3, 5_4_8, 1_4_3], [5_6_3, 1_3_0, 6_9_8, 1_4_5], [7_1_0, 1_3_0, 8_0_2, 1_4_6], [3_3_6, 1_7_1, 4_1_2, 1_8_3], [4_2_3, 1_7_1, 5_7_2, 1_8_3], [5_8_2, 1_7_0, 7_1_6, 1_8_4], [7_2_8, 1_7_1, 8_1_7, 1_8_7], [8_2_9, 1_7_1, 8_4_4, 1_8_6], [3_3_8, 1_9_7, 4_8_2, 2_1_2], [5_0_7, 1_9_6, 5_5_7, 2_0_9], [5_6_9, 1_9_6, 5_9_5, 2_0_8], [6_1_0, 1_9_6, 7_0_2, 2_0_9], [5_0_5, 2_1_4, 5_8_3, 2_2_6], [5_9_5, 2_1_4, 6_5_6, 2_2_7], [6_7_0, 2_1_5, 8_0_7, 2_2_7], [3_3_5, 2_5_9, 5_4_3, 2_7_4], [5_5_6, 2_5_9, 7_0_8, 2_7_2], [3_7_2, 2_7_9, 4_2_2, 2_9_1], [4_3_5, 2_7_9, 4_6_0, 2_9_1], [4_7_4, 2_7_9, 5_7_4, 2_9_2], [5_8_7, 2_7_8, 6_6_4, 2_9_1], [6_7_6, 2_7_8, 7_3_8, 2_9_1], [7_5_1, 2_7_9, 8_3_4, 2_9_1], [3_7_2, 2_9_8, 4_3_4, 3_1_0], [3_3_5, 3_4_1, 4_8_3, 3_5_4], [4_9_7, 3_4_1, 6_5_5, 3_5_4], [6_6_7, 3_4_1, 7_2_8, 3_5_4], [7_4_0, 3_4_1, 8_2_5, 3_5_4], [3_3_5, 3_6_0, 4_3_0, 3_7_2], [4_4_2, 3_6_0, 5_3_4, 3_7_2], [5_4_5, 3_5_9, 6_8_7, 3_7_2], [6_9_7, 3_6_0, 7_5_4, 3_7_2], [7_6_5, 3_6_0, 8_2_3, 3_7_3], [3_3_4, 3_7_8, 4_2_8, 3_9_1], [4_4_0, 3_7_8, 5_7_7, 3_9_4], [5_9_0, 3_7_8, 7_0_5, 3_9_1], [7_2_0, 3_7_8, 8_0_1, 3_9_1], [3_3_4, 3_9_7, 4_0_0, 4_0_9], [3_7_0, 4_1_6, 5_2_9, 4_2_9], [5_4_4, 4_1_6, 5_7_6, 4_3_2], [5_8_7, 4_1_6, 6_6_5, 4_2_8], [6_7_7, 4_1_6, 8_1_4, 4_2_9], [3_7_2, 4_3_5, 4_5_2, 4_5_0], [4_6_5, 4_3_4, 4_9_5, 4_4_7], [5_1_1, 4_3_4, 6_0_0, 4_4_7], [6_1_1, 4_3_6, 6_3_7, 4_4_7], [6_4_9, 4_3_6, 6_9_4, 4_5_1], [7_0_5, 4_3_8, 8_2_4, 4_4_7], [3_6_9, 4_5_3, 4_5_2, 4_6_6], [4_6_4, 4_5_4, 5_0_9, 4_6_6], [5_2_2, 4_5_3, 6_1_1, 4_6_9], [6_2_5, 4_5_3, 7_9_2, 4_6_9], [3_7_0, 4_7_2, 5_5_6, 4_8_8], [5_7_0, 4_7_2, 6_8_4, 4_8_7], [6_9_7, 4_7_2, 7_1_8, 4_8_5], [7_3_2, 4_7_2, 8_3_5, 4_8_8], [3_6_9, 4_9_0, 4_1_1, 5_0_3], [4_2_5, 4_9_0, 4_8_4, 5_0_3], [4_9_6, 4_9_0, 6_3_5, 5_0_6], [6_4_5, 4_9_0, 7_0_7, 5_0_3], [7_1_8, 4_9_1, 7_6_1, 5_0_3], [7_7_1, 4_9_0, 8_4_0, 5_0_3], [3_3_6, 5_1_0, 3_7_4, 5_2_1], [3_8_8, 5_1_0, 4_4_7, 5_2_2], [4_6_0, 5_1_0, 4_8_9, 5_2_1], [5_0_3, 5_1_0, 5_8_0, 5_2_2], [5_9_2, 5_0_9, 7_3_6, 5_2_5], [7_4_5, 5_0_9, 7_7_0, 5_2_2], [7_8_1, 5_0_9, 8_4_0, 5_2_2], [3_3_8, 5_2_8, 4_3_4, 5_4_1], [4_4_8, 5_2_8, 5_9_6, 5_4_1], [6_0_9, 5_2_7, 6_8_7, 5_4_0], [7_0_0, 5_2_8, 7_9_2, 5_4_1], [3_3_6, 5_4_6, 3_9_7, 5_5_9], [4_0_7, 5_4_6, 4_3_1, 5_5_9], [4_4_3, 5_4_6, 5_2_5, 5_6_0], [5_3_7, 5_4_6, 6_8_0, 5_6_2], [6_8_8, 5_4_6, 7_1_4, 5_5_9], [7_2_2, 5_4_6, 8_3_7, 5_6_2], [3_3_6, 5_6_5, 4_4_9, 5_8_1], [4_6_1, 5_6_5, 4_8_5, 5_7_7], [4_9_7, 5_6_5, 6_6_5, 5_8_1], [6_8_1, 5_6_5, 7_1_8, 5_7_7], [7_3_2, 5_6_5, 8_3_7, 5_8_0], [3_3_7, 5_8_4, 4_3_8, 5_9_7], [4_5_2, 5_8_3, 5_2_1, 5_9_6], [5_3_5, 5_8_4, 6_7_7, 5_9_9], [6_9_0, 5_8_3, 7_8_7, 5_9_6], [8_0_1, 5_8_3, 8_2_5, 5_9_6], [3_3_8, 6_0_2, 4_7_8, 6_1_5], [4_9_2, 6_0_2, 5_3_0, 6_1_4], [5_4_3, 6_0_2, 6_3_8, 6_1_5], [6_5_0, 6_0_2, 6_7_6, 6_1_4], [6_8_8, 6_0_2, 7_8_8, 6_1_5], [8_0_2, 6_0_2, 8_4_3, 6_1_4], [3_3_7, 6_2_1, 5_0_2, 6_3_3], [5_1_6, 6_2_1, 6_1_5, 6_3_7], [6_2_9, 6_2_1, 7_7_4, 6_3_6], [7_8_9, 6_2_1, 8_2_7, 6_3_3], [3_3_7, 6_3_9, 4_1_8, 6_5_2], [4_3_2, 6_4_0, 5_7_1, 6_5_3], [5_8_7, 6_3_9, 7_3_1, 6_5_5], [7_4_3, 6_3_9, 7_6_9, 6_5_2], [7_8_0, 6_3_9, 8_4_1, 6_5_2], [3_3_8, 6_5_8, 4_4_0, 6_7_3], [4_5_5, 6_5_8, 4_9_1, 6_7_0], [5_0_8, 6_5_8, 6_0_2, 6_7_1], [6_1_6, 6_5_8, 6_3_8, 6_7_0], [6_5_4, 6_5_8, 8_3_5, 6_7_4], [3_3_7, 6_7_7, 4_2_9, 6_8_9], [3_3_7, 7_1_4, 4_8_2, 7_2_6], [4_9_5, 7_1_4, 5_4_8, 7_2_6], [5_6_1, 7_1_4, 6_8_3, 7_2_6], [3_3_8, 7_7_0, 4_6_1, 7_8_2], [4_7_4, 7_6_9, 5_5_4, 7_8_5], [4_8_9, 7_8_8, 5_6_2, 8_0_3], [5_7_6, 7_8_8, 6_4_3, 8_0_1], [6_5_6, 7_8_7, 7_5_1, 8_0_4], [7_6_4, 7_8_8, 8_4_4, 8_0_1], [3_3_4, 8_2_5, 4_2_1, 8_3_8], [4_3_0, 8_2_4, 5_7_4, 8_3_8], [5_8_4, 8_2_4, 7_2_3, 8_4_1], [3_3_5, 8_4_4, 4_5_0, 8_5_7], [4_6_4, 8_4_3, 5_8_3, 8_6_0], [6_2_8, 8_6_2, 7_5_5, 8_7_5], [7_6_9, 8_6_1, 8_4_8, 8_7_8]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , snake_case)
self.assertListEqual(encoding.boxes , snake_case)
# with apply_OCR = False
_UpperCAmelCase : Dict =LayoutLMvaImageProcessor(apply_ocr=snake_case)
_UpperCAmelCase : Optional[int] =image_processing(snake_case , return_tensors='pt')
self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_2_4, 2_2_4))
| 331 | 0 |
'''simple docstring'''
_A: List[str] = """0.21.0"""
from .accelerator import Accelerator
from .big_modeling import (
cpu_offload,
cpu_offload_with_hook,
disk_offload,
dispatch_model,
init_empty_weights,
init_on_device,
load_checkpoint_and_dispatch,
)
from .data_loader import skip_first_batches
from .launchers import debug_launcher, notebook_launcher
from .state import PartialState
from .utils import (
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
FullyShardedDataParallelPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
find_executable_batch_size,
infer_auto_device_map,
is_rich_available,
load_checkpoint_in_model,
synchronize_rng_states,
)
if is_rich_available():
from .utils import rich
| 126 |
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
_A: List[str] = logging.get_logger(__name__)
_A: Optional[Any] = {
"""Salesforce/instruct-blip-flan-t5""": """https://huggingface.co/Salesforce/instruct-blip-flan-t5/resolve/main/config.json""",
}
class UpperCAmelCase ( UpperCAmelCase_ ):
_A : List[Any] = """instructblip_vision_model"""
def __init__( self , __A=1_408 , __A=6_144 , __A=39 , __A=16 , __A=224 , __A=14 , __A="gelu" , __A=1E-6 , __A=0.0 , __A=1E-10 , __A=True , **__A , ):
super().__init__(**__A )
__UpperCAmelCase = hidden_size
__UpperCAmelCase = intermediate_size
__UpperCAmelCase = num_hidden_layers
__UpperCAmelCase = num_attention_heads
__UpperCAmelCase = patch_size
__UpperCAmelCase = image_size
__UpperCAmelCase = initializer_range
__UpperCAmelCase = attention_dropout
__UpperCAmelCase = layer_norm_eps
__UpperCAmelCase = hidden_act
__UpperCAmelCase = qkv_bias
@classmethod
def __lowerCamelCase ( cls , __A , **__A ):
cls._set_token_in_kwargs(__A )
__UpperCAmelCase , __UpperCAmelCase = cls.get_config_dict(__A , **__A )
# get the vision config dict if we are loading from InstructBlipConfig
if config_dict.get('model_type' ) == "instructblip":
__UpperCAmelCase = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(__A , **__A )
class UpperCAmelCase ( UpperCAmelCase_ ):
_A : Optional[Any] = """instructblip_qformer"""
def __init__( self , __A=30_522 , __A=768 , __A=12 , __A=12 , __A=3_072 , __A="gelu" , __A=0.1 , __A=0.1 , __A=512 , __A=0.0_2 , __A=1E-12 , __A=0 , __A="absolute" , __A=2 , __A=1_408 , **__A , ):
super().__init__(pad_token_id=__A , **__A )
__UpperCAmelCase = vocab_size
__UpperCAmelCase = hidden_size
__UpperCAmelCase = num_hidden_layers
__UpperCAmelCase = num_attention_heads
__UpperCAmelCase = hidden_act
__UpperCAmelCase = intermediate_size
__UpperCAmelCase = hidden_dropout_prob
__UpperCAmelCase = attention_probs_dropout_prob
__UpperCAmelCase = max_position_embeddings
__UpperCAmelCase = initializer_range
__UpperCAmelCase = layer_norm_eps
__UpperCAmelCase = position_embedding_type
__UpperCAmelCase = cross_attention_frequency
__UpperCAmelCase = encoder_hidden_size
@classmethod
def __lowerCamelCase ( cls , __A , **__A ):
cls._set_token_in_kwargs(__A )
__UpperCAmelCase , __UpperCAmelCase = cls.get_config_dict(__A , **__A )
# get the qformer config dict if we are loading from InstructBlipConfig
if config_dict.get('model_type' ) == "instructblip":
__UpperCAmelCase = config_dict['qformer_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(__A , **__A )
class UpperCAmelCase ( UpperCAmelCase_ ):
_A : Dict = """instructblip"""
_A : List[Any] = True
def __init__( self , __A=None , __A=None , __A=None , __A=32 , **__A ):
super().__init__(**__A )
if vision_config is None:
__UpperCAmelCase = {}
logger.info('vision_config is None. initializing the InstructBlipVisionConfig with default values.' )
if qformer_config is None:
__UpperCAmelCase = {}
logger.info('qformer_config is None. Initializing the InstructBlipQFormerConfig with default values.' )
if text_config is None:
__UpperCAmelCase = {}
logger.info('text_config is None. Initializing the text config with default values (`OPTConfig`).' )
__UpperCAmelCase = InstructBlipVisionConfig(**__A )
__UpperCAmelCase = InstructBlipQFormerConfig(**__A )
__UpperCAmelCase = text_config['model_type'] if 'model_type' in text_config else 'opt'
__UpperCAmelCase = CONFIG_MAPPING[text_model_type](**__A )
__UpperCAmelCase = self.text_config.tie_word_embeddings
__UpperCAmelCase = self.text_config.is_encoder_decoder
__UpperCAmelCase = num_query_tokens
__UpperCAmelCase = self.vision_config.hidden_size
__UpperCAmelCase = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
__UpperCAmelCase = 1.0
__UpperCAmelCase = 0.0_2
@classmethod
def __lowerCamelCase ( cls , __A , __A , __A , **__A , ):
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **__A , )
def __lowerCamelCase ( self ):
__UpperCAmelCase = copy.deepcopy(self.__dict__ )
__UpperCAmelCase = self.vision_config.to_dict()
__UpperCAmelCase = self.qformer_config.to_dict()
__UpperCAmelCase = self.text_config.to_dict()
__UpperCAmelCase = self.__class__.model_type
return output
| 126 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__magic_name__ = {
"""configuration_encodec""": [
"""ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""EncodecConfig""",
],
"""feature_extraction_encodec""": ["""EncodecFeatureExtractor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
"""ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""EncodecModel""",
"""EncodecPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_encodec import (
ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP,
EncodecConfig,
)
from .feature_extraction_encodec import EncodecFeatureExtractor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encodec import (
ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST,
EncodecModel,
EncodecPreTrainedModel,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 258 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__magic_name__ = {
"""configuration_ctrl""": ["""CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP""", """CTRLConfig"""],
"""tokenization_ctrl""": ["""CTRLTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
"""CTRL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""CTRLForSequenceClassification""",
"""CTRLLMHeadModel""",
"""CTRLModel""",
"""CTRLPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
"""TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFCTRLForSequenceClassification""",
"""TFCTRLLMHeadModel""",
"""TFCTRLModel""",
"""TFCTRLPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_ctrl import CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRLConfig
from .tokenization_ctrl import CTRLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ctrl import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
CTRLPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_ctrl import (
TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCTRLForSequenceClassification,
TFCTRLLMHeadModel,
TFCTRLModel,
TFCTRLPreTrainedModel,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 258 | 1 |
'''simple docstring'''
import argparse
from typing import List
import evaluate
import numpy as np
import torch
from datasets import DatasetDict, load_dataset
# New Code #
# We'll be using StratifiedKFold for this example
from sklearn.model_selection import StratifiedKFold
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to perform Cross Validation,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
snake_case_ = 1_6
snake_case_ = 3_2
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : Accelerator , SCREAMING_SNAKE_CASE_ : DatasetDict , SCREAMING_SNAKE_CASE_ : List[int] , SCREAMING_SNAKE_CASE_ : List[int] , SCREAMING_SNAKE_CASE_ : int = 1_6 ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = AutoTokenizer.from_pretrained("bert-base-cased" )
SCREAMING_SNAKE_CASE_ : Optional[Any] = DatasetDict(
{
"train": dataset["train"].select(SCREAMING_SNAKE_CASE_ ),
"validation": dataset["train"].select(SCREAMING_SNAKE_CASE_ ),
"test": dataset["validation"],
} )
def tokenize_function(SCREAMING_SNAKE_CASE_ : Optional[Any] ):
# max_length=None => use the model max length (it's actually the default)
SCREAMING_SNAKE_CASE_ : List[Any] = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
SCREAMING_SNAKE_CASE_ : int = datasets.map(
SCREAMING_SNAKE_CASE_ , batched=SCREAMING_SNAKE_CASE_ , remove_columns=["idx", "sentence1", "sentence2"] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
SCREAMING_SNAKE_CASE_ : Optional[int] = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(SCREAMING_SNAKE_CASE_ : Dict ):
# On TPU it's best to pad everything to the same length or training will be very slow.
SCREAMING_SNAKE_CASE_ : Optional[Any] = 1_2_8 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
SCREAMING_SNAKE_CASE_ : List[Any] = 1_6
elif accelerator.mixed_precision != "no":
SCREAMING_SNAKE_CASE_ : List[str] = 8
else:
SCREAMING_SNAKE_CASE_ : List[str] = None
return tokenizer.pad(
SCREAMING_SNAKE_CASE_ , padding="longest" , max_length=SCREAMING_SNAKE_CASE_ , pad_to_multiple_of=SCREAMING_SNAKE_CASE_ , return_tensors="pt" , )
# Instantiate dataloaders.
SCREAMING_SNAKE_CASE_ : Union[str, Any] = DataLoader(
tokenized_datasets["train"] , shuffle=SCREAMING_SNAKE_CASE_ , collate_fn=SCREAMING_SNAKE_CASE_ , batch_size=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ : Optional[int] = DataLoader(
tokenized_datasets["validation"] , shuffle=SCREAMING_SNAKE_CASE_ , collate_fn=SCREAMING_SNAKE_CASE_ , batch_size=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ : Optional[int] = DataLoader(
tokenized_datasets["test"] , shuffle=SCREAMING_SNAKE_CASE_ , collate_fn=SCREAMING_SNAKE_CASE_ , batch_size=SCREAMING_SNAKE_CASE_ )
return train_dataloader, eval_dataloader, test_dataloader
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Tuple ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = []
# Download the dataset
SCREAMING_SNAKE_CASE_ : List[Any] = load_dataset("glue" , "mrpc" )
# Create our splits
SCREAMING_SNAKE_CASE_ : List[Any] = StratifiedKFold(n_splits=int(args.num_folds ) )
# Initialize accelerator
SCREAMING_SNAKE_CASE_ : Any = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
SCREAMING_SNAKE_CASE_ : Any = config["lr"]
SCREAMING_SNAKE_CASE_ : Dict = int(config["num_epochs"] )
SCREAMING_SNAKE_CASE_ : int = int(config["seed"] )
SCREAMING_SNAKE_CASE_ : str = int(config["batch_size"] )
SCREAMING_SNAKE_CASE_ : Optional[Any] = evaluate.load("glue" , "mrpc" )
# If the batch size is too big we use gradient accumulation
SCREAMING_SNAKE_CASE_ : Tuple = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
SCREAMING_SNAKE_CASE_ : Any = batch_size // MAX_GPU_BATCH_SIZE
SCREAMING_SNAKE_CASE_ : Union[str, Any] = MAX_GPU_BATCH_SIZE
set_seed(SCREAMING_SNAKE_CASE_ )
# New Code #
# Create our folds:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = kfold.split(np.zeros(datasets["train"].num_rows ) , datasets["train"]["label"] )
SCREAMING_SNAKE_CASE_ : Optional[int] = []
# Iterate over them
for i, (train_idxs, valid_idxs) in enumerate(SCREAMING_SNAKE_CASE_ ):
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : Tuple = get_fold_dataloaders(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
SCREAMING_SNAKE_CASE_ : List[Any] = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=SCREAMING_SNAKE_CASE_ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
SCREAMING_SNAKE_CASE_ : List[Any] = model.to(accelerator.device )
# Instantiate optimizer
SCREAMING_SNAKE_CASE_ : int = AdamW(params=model.parameters() , lr=SCREAMING_SNAKE_CASE_ )
# Instantiate scheduler
SCREAMING_SNAKE_CASE_ : Optional[int] = get_linear_schedule_with_warmup(
optimizer=SCREAMING_SNAKE_CASE_ , num_warmup_steps=1_0_0 , num_training_steps=(len(SCREAMING_SNAKE_CASE_ ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : Union[str, Any] = accelerator.prepare(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Now we train the model
for epoch in range(SCREAMING_SNAKE_CASE_ ):
model.train()
for step, batch in enumerate(SCREAMING_SNAKE_CASE_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
SCREAMING_SNAKE_CASE_ : Optional[int] = model(**SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ : Dict = outputs.loss
SCREAMING_SNAKE_CASE_ : Optional[int] = loss / gradient_accumulation_steps
accelerator.backward(SCREAMING_SNAKE_CASE_ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(SCREAMING_SNAKE_CASE_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
SCREAMING_SNAKE_CASE_ : Any = model(**SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ : Dict = outputs.logits.argmax(dim=-1 )
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : Dict = accelerator.gather_for_metrics((predictions, batch["labels"]) )
metric.add_batch(
predictions=SCREAMING_SNAKE_CASE_ , references=SCREAMING_SNAKE_CASE_ , )
SCREAMING_SNAKE_CASE_ : Any = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"epoch {epoch}:" , SCREAMING_SNAKE_CASE_ )
# New Code #
# We also run predictions on the test set at the very end
SCREAMING_SNAKE_CASE_ : Dict = []
for step, batch in enumerate(SCREAMING_SNAKE_CASE_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
SCREAMING_SNAKE_CASE_ : Any = model(**SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ : List[str] = outputs.logits
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : Any = accelerator.gather_for_metrics((predictions, batch["labels"]) )
fold_predictions.append(predictions.cpu() )
if i == 0:
# We need all of the test predictions
test_references.append(references.cpu() )
# Use accelerator.print to print only on the main process.
test_predictions.append(torch.cat(SCREAMING_SNAKE_CASE_ , dim=0 ) )
# We now need to release all our memory and get rid of the current model, optimizer, etc
accelerator.free_memory()
# New Code #
# Finally we check the accuracy of our folded results:
SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.cat(SCREAMING_SNAKE_CASE_ , dim=0 )
SCREAMING_SNAKE_CASE_ : str = torch.stack(SCREAMING_SNAKE_CASE_ , dim=0 ).sum(dim=0 ).div(int(args.num_folds ) ).argmax(dim=-1 )
SCREAMING_SNAKE_CASE_ : List[str] = metric.compute(predictions=SCREAMING_SNAKE_CASE_ , references=SCREAMING_SNAKE_CASE_ )
accelerator.print("Average test metrics from all folds:" , SCREAMING_SNAKE_CASE_ )
def __lowerCamelCase ( ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument(
"--mixed_precision" , type=SCREAMING_SNAKE_CASE_ , default=SCREAMING_SNAKE_CASE_ , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." )
# New Code #
parser.add_argument("--num_folds" , type=SCREAMING_SNAKE_CASE_ , default=3 , help="The number of splits to perform across the dataset" )
SCREAMING_SNAKE_CASE_ : str = parser.parse_args()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {"lr": 2E-5, "num_epochs": 3, "seed": 4_2, "batch_size": 1_6}
training_function(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
main()
| 421 |
'''simple docstring'''
import numpy as np
from sklearn.datasets import fetch_california_housing
from sklearn.metrics import mean_absolute_error, mean_squared_error
from sklearn.model_selection import train_test_split
from xgboost import XGBRegressor
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : dict ) -> tuple:
"""simple docstring"""
return (data["data"], data["target"])
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : np.ndarray ) -> np.ndarray:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = XGBRegressor(verbosity=0 , random_state=4_2 )
xgb.fit(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Predict target for test data
SCREAMING_SNAKE_CASE_ : Optional[Any] = xgb.predict(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ : List[str] = predictions.reshape(len(SCREAMING_SNAKE_CASE_ ) , 1 )
return predictions
def __lowerCamelCase ( ) -> None:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = fetch_california_housing()
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : List[Any] = data_handling(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : Dict = train_test_split(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , test_size=0.25 , random_state=1 )
SCREAMING_SNAKE_CASE_ : int = xgboost(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Error printing
print(F"Mean Absolute Error : {mean_absolute_error(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )}" )
print(F"Mean Square Error : {mean_squared_error(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )}" )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 421 | 1 |
'''simple docstring'''
def A_( A : list[list]):
UpperCamelCase = current_set.copy()
for row_index, row in enumerate(A):
UpperCamelCase = row[0]
for column_index, column in enumerate(A):
if magnitude == 0:
UpperCamelCase = column
continue
UpperCamelCase = column / magnitude
# Subtract to cancel term
UpperCamelCase = current_set[0]
UpperCamelCase = [first_row]
UpperCamelCase = current_set[1::]
for row in current_set:
UpperCamelCase = []
# If first term is 0, it is already in form we want, so we preserve it
if row[0] == 0:
final_set.append(A)
continue
for column_index in range(len(A)):
temp_row.append(first_row[column_index] - row[column_index])
final_set.append(A)
# Create next recursion iteration set
if len(final_set[0]) != 3:
UpperCamelCase = final_set[0]
UpperCamelCase = []
UpperCamelCase = []
for row in final_set[1::]:
current_first_column.append(row[0])
next_iteration.append(row[1::])
UpperCamelCase = simplify(A)
for i in range(len(A)):
resultant[i].insert(0 , current_first_column[i])
resultant.insert(0 , A)
UpperCamelCase = resultant
return final_set
def A_( A : list[list]):
if len(A) == 0:
raise IndexError('solve_simultaneous() requires n lists of length n+1')
UpperCamelCase = len(A) + 1
if any(len(A) != _length for item in equations):
raise IndexError('solve_simultaneous() requires n lists of length n+1')
for row in equations:
if any(not isinstance(A , (int, float)) for column in row):
raise ValueError('solve_simultaneous() requires lists of integers')
if len(A) == 1:
return [equations[0][-1] / equations[0][0]]
UpperCamelCase = equations.copy()
if any(0 in row for row in data_set):
UpperCamelCase = data_set.copy()
UpperCamelCase = []
for row_index, row in enumerate(A):
if 0 not in row:
UpperCamelCase = data_set.pop(A)
break
if not full_row:
raise ValueError('solve_simultaneous() requires at least 1 full equation')
data_set.insert(0 , A)
UpperCamelCase = data_set.copy()
UpperCamelCase = simplify(A)
UpperCamelCase = simplified[::-1]
UpperCamelCase = []
for row in simplified:
UpperCamelCase = row[-1]
if not solutions:
if row[-2] == 0:
solutions.append(0)
continue
solutions.append(current_solution / row[-2])
continue
UpperCamelCase = row.copy()[: len(A) - 1 :]
while temp_row[0] == 0:
temp_row.pop(0)
if len(A) == 0:
solutions.append(0)
continue
UpperCamelCase = temp_row[1::]
UpperCamelCase = temp_row[::-1]
for column_index, column in enumerate(A):
current_solution -= column * solutions[column_index]
solutions.append(A)
UpperCamelCase = []
for item in solutions:
final.append(float(round(A , 5)))
return final[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase : Optional[Any] = [
[2, 1, 1, 1, 1, 4],
[1, 2, 1, 1, 1, 5],
[1, 1, 2, 1, 1, 6],
[1, 1, 1, 2, 1, 7],
[1, 1, 1, 1, 2, 8],
]
print(solve_simultaneous(eq))
print(solve_simultaneous([[4, 2]]))
| 432 |
'''simple docstring'''
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEmbeddings,
BertLayer,
BertPooler,
BertPreTrainedModel,
)
def A_( A : Tuple):
UpperCamelCase = torch.exp(A)
UpperCamelCase = torch.sum(A , dim=1) # sum of exp(x_i)
UpperCamelCase = torch.sum(x * exp_x , dim=1) # sum of x_i * exp(x_i)
return torch.log(A) - B / A
class SCREAMING_SNAKE_CASE__ ( nn.Module):
def __init__( self , A_ )-> List[Any]:
'''simple docstring'''
super().__init__()
UpperCamelCase = config.output_attentions
UpperCamelCase = config.output_hidden_states
UpperCamelCase = nn.ModuleList([BertLayer(A_ ) for _ in range(config.num_hidden_layers )] )
UpperCamelCase = nn.ModuleList([BertHighway(A_ ) for _ in range(config.num_hidden_layers )] )
UpperCamelCase = [-1 for _ in range(config.num_hidden_layers )]
def UpperCAmelCase_ ( self , A_ )-> str:
'''simple docstring'''
if (type(A_ ) is float) or (type(A_ ) is int):
for i in range(len(self.early_exit_entropy ) ):
UpperCamelCase = x
else:
UpperCamelCase = x
def UpperCAmelCase_ ( self , A_ )-> Dict:
'''simple docstring'''
UpperCamelCase = pooler.state_dict()
for highway in self.highway:
for name, param in highway.pooler.state_dict().items():
param.copy_(loaded_model[name] )
def UpperCAmelCase_ ( self , A_ , A_=None , A_=None , A_=None , A_=None , )-> Tuple:
'''simple docstring'''
UpperCamelCase = ()
UpperCamelCase = ()
UpperCamelCase = ()
for i, layer_module in enumerate(self.layer ):
if self.output_hidden_states:
UpperCamelCase = all_hidden_states + (hidden_states,)
UpperCamelCase = layer_module(
A_ , A_ , head_mask[i] , A_ , A_ )
UpperCamelCase = layer_outputs[0]
if self.output_attentions:
UpperCamelCase = all_attentions + (layer_outputs[1],)
UpperCamelCase = (hidden_states,)
if self.output_hidden_states:
UpperCamelCase = current_outputs + (all_hidden_states,)
if self.output_attentions:
UpperCamelCase = current_outputs + (all_attentions,)
UpperCamelCase = self.highway[i](A_ )
# logits, pooled_output
if not self.training:
UpperCamelCase = highway_exit[0]
UpperCamelCase = entropy(A_ )
UpperCamelCase = highway_exit + (highway_entropy,) # logits, hidden_states(?), entropy
UpperCamelCase = all_highway_exits + (highway_exit,)
if highway_entropy < self.early_exit_entropy[i]:
UpperCamelCase = (highway_logits,) + current_outputs[1:] + (all_highway_exits,)
raise HighwayException(A_ , i + 1 )
else:
UpperCamelCase = all_highway_exits + (highway_exit,)
# Add last layer
if self.output_hidden_states:
UpperCamelCase = all_hidden_states + (hidden_states,)
UpperCamelCase = (hidden_states,)
if self.output_hidden_states:
UpperCamelCase = outputs + (all_hidden_states,)
if self.output_attentions:
UpperCamelCase = outputs + (all_attentions,)
UpperCamelCase = outputs + (all_highway_exits,)
return outputs # last-layer hidden state, (all hidden states), (all attentions), all highway exits
@add_start_docstrings(
"""The Bert Model transformer with early exiting (DeeBERT). """ , snake_case_ , )
class SCREAMING_SNAKE_CASE__ ( snake_case_):
def __init__( self , A_ )-> Dict:
'''simple docstring'''
super().__init__(A_ )
UpperCamelCase = config
UpperCamelCase = BertEmbeddings(A_ )
UpperCamelCase = DeeBertEncoder(A_ )
UpperCamelCase = BertPooler(A_ )
self.init_weights()
def UpperCAmelCase_ ( self )-> Any:
'''simple docstring'''
self.encoder.init_highway_pooler(self.pooler )
def UpperCAmelCase_ ( self )-> List[str]:
'''simple docstring'''
return self.embeddings.word_embeddings
def UpperCAmelCase_ ( self , A_ )-> Optional[Any]:
'''simple docstring'''
UpperCamelCase = value
def UpperCAmelCase_ ( self , A_ )-> List[Any]:
'''simple docstring'''
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(A_ )
@add_start_docstrings_to_model_forward(A_ )
def UpperCAmelCase_ ( self , A_=None , A_=None , A_=None , A_=None , A_=None , A_=None , A_=None , A_=None , )-> List[Any]:
'''simple docstring'''
if input_ids is not None and inputs_embeds is not None:
raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time' )
elif input_ids is not None:
UpperCamelCase = input_ids.size()
elif inputs_embeds is not None:
UpperCamelCase = inputs_embeds.size()[:-1]
else:
raise ValueError('You have to specify either input_ids or inputs_embeds' )
UpperCamelCase = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
UpperCamelCase = torch.ones(A_ , device=A_ )
if encoder_attention_mask is None:
UpperCamelCase = torch.ones(A_ , device=A_ )
if token_type_ids is None:
UpperCamelCase = torch.zeros(A_ , dtype=torch.long , device=A_ )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
UpperCamelCase = self.get_extended_attention_mask(A_ , A_ , A_ )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if encoder_attention_mask.dim() == 3:
UpperCamelCase = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.dim() == 2:
UpperCamelCase = encoder_attention_mask[:, None, None, :]
UpperCamelCase = encoder_extended_attention_mask.to(
dtype=next(self.parameters() ).dtype ) # fp16 compatibility
UpperCamelCase = (1.0 - encoder_extended_attention_mask) * -10_000.0
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
UpperCamelCase = self.get_head_mask(A_ , self.config.num_hidden_layers )
UpperCamelCase = self.embeddings(
input_ids=A_ , position_ids=A_ , token_type_ids=A_ , inputs_embeds=A_ )
UpperCamelCase = self.encoder(
A_ , attention_mask=A_ , head_mask=A_ , encoder_hidden_states=A_ , encoder_attention_mask=A_ , )
UpperCamelCase = encoder_outputs[0]
UpperCamelCase = self.pooler(A_ )
UpperCamelCase = (
sequence_output,
pooled_output,
) + encoder_outputs[
1:
] # add hidden_states and attentions if they are here
return outputs # sequence_output, pooled_output, (hidden_states), (attentions), highway exits
class SCREAMING_SNAKE_CASE__ ( snake_case_):
def __init__( self , A_ , A_ )-> Optional[int]:
'''simple docstring'''
UpperCamelCase = message
UpperCamelCase = exit_layer # start from 1!
class SCREAMING_SNAKE_CASE__ ( nn.Module):
def __init__( self , A_ )-> Dict:
'''simple docstring'''
super().__init__()
UpperCamelCase = BertPooler(A_ )
UpperCamelCase = nn.Dropout(config.hidden_dropout_prob )
UpperCamelCase = nn.Linear(config.hidden_size , config.num_labels )
def UpperCAmelCase_ ( self , A_ )-> Optional[int]:
'''simple docstring'''
UpperCamelCase = encoder_outputs[0]
UpperCamelCase = self.pooler(A_ )
# "return" pooler_output
# BertModel
UpperCamelCase = (pooler_input, pooler_output) + encoder_outputs[1:]
# "return" bmodel_output
# Dropout and classification
UpperCamelCase = bmodel_output[1]
UpperCamelCase = self.dropout(A_ )
UpperCamelCase = self.classifier(A_ )
return logits, pooled_output
@add_start_docstrings(
"""Bert Model (with early exiting - DeeBERT) with a classifier on top,
also takes care of multi-layer training. """ , snake_case_ , )
class SCREAMING_SNAKE_CASE__ ( snake_case_):
def __init__( self , A_ )-> Tuple:
'''simple docstring'''
super().__init__(A_ )
UpperCamelCase = config.num_labels
UpperCamelCase = config.num_hidden_layers
UpperCamelCase = DeeBertModel(A_ )
UpperCamelCase = nn.Dropout(config.hidden_dropout_prob )
UpperCamelCase = nn.Linear(config.hidden_size , self.config.num_labels )
self.init_weights()
@add_start_docstrings_to_model_forward(A_ )
def UpperCAmelCase_ ( self , A_=None , A_=None , A_=None , A_=None , A_=None , A_=None , A_=None , A_=-1 , A_=False , )-> Tuple:
'''simple docstring'''
UpperCamelCase = self.num_layers
try:
UpperCamelCase = self.bert(
A_ , attention_mask=A_ , token_type_ids=A_ , position_ids=A_ , head_mask=A_ , inputs_embeds=A_ , )
# sequence_output, pooled_output, (hidden_states), (attentions), highway exits
UpperCamelCase = outputs[1]
UpperCamelCase = self.dropout(A_ )
UpperCamelCase = self.classifier(A_ )
UpperCamelCase = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
UpperCamelCase = e.message
UpperCamelCase = e.exit_layer
UpperCamelCase = outputs[0]
if not self.training:
UpperCamelCase = entropy(A_ )
UpperCamelCase = []
UpperCamelCase = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
UpperCamelCase = MSELoss()
UpperCamelCase = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
UpperCamelCase = CrossEntropyLoss()
UpperCamelCase = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
UpperCamelCase = []
for highway_exit in outputs[-1]:
UpperCamelCase = highway_exit[0]
if not self.training:
highway_logits_all.append(A_ )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
UpperCamelCase = MSELoss()
UpperCamelCase = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
UpperCamelCase = CrossEntropyLoss()
UpperCamelCase = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(A_ )
if train_highway:
UpperCamelCase = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
UpperCamelCase = (loss,) + outputs
if not self.training:
UpperCamelCase = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
UpperCamelCase = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), (highway_exits)
| 432 | 1 |
def lowerCAmelCase_ ( __a ) -> bool:
"""simple docstring"""
lowerCamelCase__: set[int] =set()
# To detect a back edge, keep track of vertices currently in the recursion stack
lowerCamelCase__: set[int] =set()
return any(
node not in visited and depth_first_search(__a , __a , __a , __a )
for node in graph )
def lowerCAmelCase_ ( __a , __a , __a , __a ) -> bool:
"""simple docstring"""
visited.add(__a )
rec_stk.add(__a )
for node in graph[vertex]:
if node not in visited:
if depth_first_search(__a , __a , __a , __a ):
return True
elif node in rec_stk:
return True
# The node needs to be removed from recursion stack before function ends
rec_stk.remove(__a )
return False
if __name__ == "__main__":
from doctest import testmod
testmod()
| 59 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase : Optional[int] = logging.get_logger(__name__)
UpperCAmelCase : Union[str, Any] = {
'''facebook/timesformer''': '''https://huggingface.co/facebook/timesformer/resolve/main/config.json''',
}
class _A( snake_case__ ):
"""simple docstring"""
UpperCamelCase : int = '''timesformer'''
def __init__( self , _A=224 , _A=16 , _A=3 , _A=8 , _A=768 , _A=12 , _A=12 , _A=3072 , _A="gelu" , _A=0.0 , _A=0.0 , _A=0.0_2 , _A=1e-6 , _A=True , _A="divided_space_time" , _A=0 , **_A , ):
super().__init__(**_A )
__A : Union[str, Any] = image_size
__A : Dict = patch_size
__A : Any = num_channels
__A : Optional[int] = num_frames
__A : List[Any] = hidden_size
__A : List[Any] = num_hidden_layers
__A : List[str] = num_attention_heads
__A : Optional[Any] = intermediate_size
__A : int = hidden_act
__A : int = hidden_dropout_prob
__A : Optional[int] = attention_probs_dropout_prob
__A : List[str] = initializer_range
__A : Optional[int] = layer_norm_eps
__A : int = qkv_bias
__A : List[str] = attention_type
__A : int = drop_path_rate
| 239 | 0 |
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
from .unet_ad_blocks import get_down_block, get_mid_block, get_out_block, get_up_block
@dataclass
class lowerCAmelCase_ ( lowerCamelCase__ ):
'''simple docstring'''
__snake_case = 42
class lowerCAmelCase_ ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
@register_to_config
def __init__( self , _UpperCAmelCase = 6_55_36 , _UpperCAmelCase = None , _UpperCAmelCase = 2 , _UpperCAmelCase = 2 , _UpperCAmelCase = 0 , _UpperCAmelCase = "fourier" , _UpperCAmelCase = True , _UpperCAmelCase = False , _UpperCAmelCase = 0.0 , _UpperCAmelCase = ("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") , _UpperCAmelCase = ("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") , _UpperCAmelCase = "UNetMidBlock1D" , _UpperCAmelCase = None , _UpperCAmelCase = (32, 32, 64) , _UpperCAmelCase = None , _UpperCAmelCase = 8 , _UpperCAmelCase = 1 , _UpperCAmelCase = False , ):
super().__init__()
snake_case_ = sample_size
# time
if time_embedding_type == "fourier":
snake_case_ = GaussianFourierProjection(
embedding_size=8 , set_W_to_weight=_UpperCAmelCase , log=_UpperCAmelCase , flip_sin_to_cos=_UpperCAmelCase )
snake_case_ = 2 * block_out_channels[0]
elif time_embedding_type == "positional":
snake_case_ = Timesteps(
block_out_channels[0] , flip_sin_to_cos=_UpperCAmelCase , downscale_freq_shift=_UpperCAmelCase )
snake_case_ = block_out_channels[0]
if use_timestep_embedding:
snake_case_ = block_out_channels[0] * 4
snake_case_ = TimestepEmbedding(
in_channels=_UpperCAmelCase , time_embed_dim=_UpperCAmelCase , act_fn=_UpperCAmelCase , out_dim=block_out_channels[0] , )
snake_case_ = nn.ModuleList([] )
snake_case_ = None
snake_case_ = nn.ModuleList([] )
snake_case_ = None
# down
snake_case_ = in_channels
for i, down_block_type in enumerate(_UpperCAmelCase ):
snake_case_ = output_channel
snake_case_ = block_out_channels[i]
if i == 0:
input_channel += extra_in_channels
snake_case_ = i == len(_UpperCAmelCase ) - 1
snake_case_ = get_down_block(
_UpperCAmelCase , num_layers=_UpperCAmelCase , in_channels=_UpperCAmelCase , out_channels=_UpperCAmelCase , temb_channels=block_out_channels[0] , add_downsample=not is_final_block or downsample_each_block , )
self.down_blocks.append(_UpperCAmelCase )
# mid
snake_case_ = get_mid_block(
_UpperCAmelCase , in_channels=block_out_channels[-1] , mid_channels=block_out_channels[-1] , out_channels=block_out_channels[-1] , embed_dim=block_out_channels[0] , num_layers=_UpperCAmelCase , add_downsample=_UpperCAmelCase , )
# up
snake_case_ = list(reversed(_UpperCAmelCase ) )
snake_case_ = reversed_block_out_channels[0]
if out_block_type is None:
snake_case_ = out_channels
else:
snake_case_ = block_out_channels[0]
for i, up_block_type in enumerate(_UpperCAmelCase ):
snake_case_ = output_channel
snake_case_ = (
reversed_block_out_channels[i + 1] if i < len(_UpperCAmelCase ) - 1 else final_upsample_channels
)
snake_case_ = i == len(_UpperCAmelCase ) - 1
snake_case_ = get_up_block(
_UpperCAmelCase , num_layers=_UpperCAmelCase , in_channels=_UpperCAmelCase , out_channels=_UpperCAmelCase , temb_channels=block_out_channels[0] , add_upsample=not is_final_block , )
self.up_blocks.append(_UpperCAmelCase )
snake_case_ = output_channel
# out
snake_case_ = norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4 , 32 )
snake_case_ = get_out_block(
out_block_type=_UpperCAmelCase , num_groups_out=_UpperCAmelCase , embed_dim=block_out_channels[0] , out_channels=_UpperCAmelCase , act_fn=_UpperCAmelCase , fc_dim=block_out_channels[-1] // 4 , )
def UpperCamelCase__ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = True , ):
snake_case_ = timestep
if not torch.is_tensor(_UpperCAmelCase ):
snake_case_ = torch.tensor([timesteps] , dtype=torch.long , device=sample.device )
elif torch.is_tensor(_UpperCAmelCase ) and len(timesteps.shape ) == 0:
snake_case_ = timesteps[None].to(sample.device )
snake_case_ = self.time_proj(_UpperCAmelCase )
if self.config.use_timestep_embedding:
snake_case_ = self.time_mlp(_UpperCAmelCase )
else:
snake_case_ = timestep_embed[..., None]
snake_case_ = timestep_embed.repeat([1, 1, sample.shape[2]] ).to(sample.dtype )
snake_case_ = timestep_embed.broadcast_to((sample.shape[:1] + timestep_embed.shape[1:]) )
# 2. down
snake_case_ = ()
for downsample_block in self.down_blocks:
snake_case_ , snake_case_ = downsample_block(hidden_states=_UpperCAmelCase , temb=_UpperCAmelCase )
down_block_res_samples += res_samples
# 3. mid
if self.mid_block:
snake_case_ = self.mid_block(_UpperCAmelCase , _UpperCAmelCase )
# 4. up
for i, upsample_block in enumerate(self.up_blocks ):
snake_case_ = down_block_res_samples[-1:]
snake_case_ = down_block_res_samples[:-1]
snake_case_ = upsample_block(_UpperCAmelCase , res_hidden_states_tuple=_UpperCAmelCase , temb=_UpperCAmelCase )
# 5. post-process
if self.out_block:
snake_case_ = self.out_block(_UpperCAmelCase , _UpperCAmelCase )
if not return_dict:
return (sample,)
return UNetaDOutput(sample=_UpperCAmelCase ) | 531 |
import argparse
import pickle
import numpy as np
import torch
from torch import nn
from transformers import ReformerConfig, ReformerModelWithLMHead
from transformers.utils import logging
logging.set_verbosity_info()
def __lowerCAmelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None )-> Any:
"""simple docstring"""
assert torch_layer.weight.shape == weight.shape, f'''{torch_layer} layer.weight does not match'''
snake_case_ = nn.Parameter(SCREAMING_SNAKE_CASE )
if bias is not None:
assert torch_layer.bias.shape == bias.shape, f'''{torch_layer} layer.bias does not match'''
snake_case_ = nn.Parameter(SCREAMING_SNAKE_CASE )
def __lowerCAmelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )-> int:
"""simple docstring"""
snake_case_ = np.asarray(weights[0] )
snake_case_ = np.asarray(weights[1] )
snake_case_ = np.asarray(weights[2] )
set_param(
torch_layer.self_attention.query_key , torch.tensor(SCREAMING_SNAKE_CASE ).transpose(1 , 2 ).contiguous().view(-1 , SCREAMING_SNAKE_CASE ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(SCREAMING_SNAKE_CASE ).transpose(1 , 2 ).contiguous().view(-1 , SCREAMING_SNAKE_CASE ) , )
set_param(
torch_layer.output.dense , torch.tensor(SCREAMING_SNAKE_CASE ).view(-1 , SCREAMING_SNAKE_CASE ).contiguous().transpose(0 , 1 ) , )
def __lowerCAmelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )-> Tuple:
"""simple docstring"""
snake_case_ = np.asarray(weights[0] )
snake_case_ = np.asarray(weights[1] )
snake_case_ = np.asarray(weights[2] )
snake_case_ = np.asarray(weights[3] )
set_param(
torch_layer.self_attention.query , torch.tensor(SCREAMING_SNAKE_CASE ).transpose(1 , 2 ).contiguous().view(-1 , SCREAMING_SNAKE_CASE ) , )
set_param(
torch_layer.self_attention.key , torch.tensor(SCREAMING_SNAKE_CASE ).transpose(1 , 2 ).contiguous().view(-1 , SCREAMING_SNAKE_CASE ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(SCREAMING_SNAKE_CASE ).transpose(1 , 2 ).contiguous().view(-1 , SCREAMING_SNAKE_CASE ) , )
set_param(
torch_layer.output.dense , torch.tensor(SCREAMING_SNAKE_CASE ).view(-1 , SCREAMING_SNAKE_CASE ).contiguous().transpose(0 , 1 ) , )
def __lowerCAmelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )-> int:
"""simple docstring"""
snake_case_ = weights[0][0][0]
snake_case_ = np.asarray(layer_norm_a[0] )
snake_case_ = np.asarray(layer_norm_a[1] )
set_param(
torch_block.attention.layer_norm , torch.tensor(SCREAMING_SNAKE_CASE ) , torch.tensor(SCREAMING_SNAKE_CASE ) , )
# lsh weights + output
snake_case_ = weights[0][1]
if len(SCREAMING_SNAKE_CASE ) < 4:
set_layer_weights_in_torch_lsh(SCREAMING_SNAKE_CASE , torch_block.attention , SCREAMING_SNAKE_CASE )
else:
set_layer_weights_in_torch_local(SCREAMING_SNAKE_CASE , torch_block.attention , SCREAMING_SNAKE_CASE )
# intermediate weighs
snake_case_ = weights[2][0][1][2]
# Chunked Feed Forward
if len(SCREAMING_SNAKE_CASE ) == 4:
snake_case_ = intermediate_weights[2]
# layernorm 2
snake_case_ = np.asarray(intermediate_weights[0][0] )
snake_case_ = np.asarray(intermediate_weights[0][1] )
set_param(
torch_block.feed_forward.layer_norm , torch.tensor(SCREAMING_SNAKE_CASE ) , torch.tensor(SCREAMING_SNAKE_CASE ) , )
# intermediate dense
snake_case_ = np.asarray(intermediate_weights[1][0] )
snake_case_ = np.asarray(intermediate_weights[1][1] )
set_param(
torch_block.feed_forward.dense.dense , torch.tensor(SCREAMING_SNAKE_CASE ).transpose(0 , 1 ).contiguous() , torch.tensor(SCREAMING_SNAKE_CASE ) , )
# intermediate out
snake_case_ = np.asarray(intermediate_weights[4][0] )
snake_case_ = np.asarray(intermediate_weights[4][1] )
set_param(
torch_block.feed_forward.output.dense , torch.tensor(SCREAMING_SNAKE_CASE ).transpose(0 , 1 ).contiguous() , torch.tensor(SCREAMING_SNAKE_CASE ) , )
def __lowerCAmelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )-> str:
"""simple docstring"""
snake_case_ = torch_model.reformer
# word embeds
snake_case_ = np.asarray(weights[1] )
set_param(
torch_model_reformer.embeddings.word_embeddings , torch.tensor(SCREAMING_SNAKE_CASE ) , )
if isinstance(weights[3] , SCREAMING_SNAKE_CASE ):
snake_case_ = torch_model_reformer.embeddings.position_embeddings
for emb_idx in range(len(position_embeddings.weights ) ):
snake_case_ = np.asarray(weights[3][emb_idx][0] )
assert (
position_embeddings.weights[emb_idx].shape == emb_weights.shape
), f'''{position_embeddings[emb_idx]} emb does not match'''
snake_case_ = nn.Parameter(torch.tensor(SCREAMING_SNAKE_CASE ) )
snake_case_ = weights[5]
assert len(torch_model_reformer.encoder.layers ) * 4 == len(
SCREAMING_SNAKE_CASE ), "HF and trax model do not have the same number of layers"
for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ):
snake_case_ = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)]
set_block_weights_in_torch(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# output layer norm
snake_case_ = np.asarray(weights[7][0] )
snake_case_ = np.asarray(weights[7][1] )
set_param(
torch_model_reformer.encoder.layer_norm , torch.tensor(SCREAMING_SNAKE_CASE ) , torch.tensor(SCREAMING_SNAKE_CASE ) , )
# output embeddings
snake_case_ = np.asarray(weights[9][0] )
snake_case_ = np.asarray(weights[9][1] )
set_param(
torch_model.lm_head.decoder , torch.tensor(SCREAMING_SNAKE_CASE ).transpose(0 , 1 ).contiguous() , torch.tensor(SCREAMING_SNAKE_CASE ) , )
def __lowerCAmelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )-> int:
"""simple docstring"""
snake_case_ = ReformerConfig.from_json_file(SCREAMING_SNAKE_CASE )
print(f'''Building PyTorch model from configuration: {config}''' )
snake_case_ = ReformerModelWithLMHead(SCREAMING_SNAKE_CASE )
with open(SCREAMING_SNAKE_CASE , '''rb''' ) as f:
snake_case_ = pickle.load(SCREAMING_SNAKE_CASE )['''weights''']
set_model_weights_in_torch(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , config.hidden_size )
# Save pytorch-model
print(f'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--trax_model_pkl_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained Reformer model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
UpperCAmelCase = parser.parse_args()
convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path) | 531 | 1 |
'''simple docstring'''
from __future__ import annotations
from typing import TypedDict
class lowerCAmelCase_ ( a_ ):
__UpperCAmelCase = 42
__UpperCAmelCase = 42
def _a ( lowerCamelCase_ ):
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
raise TypeError('''The parameter s type must be str.''' )
return [s[i:] + s[:i] for i in range(len(lowerCamelCase_ ) )]
def _a ( lowerCamelCase_ ):
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
raise TypeError('''The parameter s type must be str.''' )
if not s:
raise ValueError('''The parameter s must not be empty.''' )
snake_case : Tuple =all_rotations(lowerCamelCase_ )
rotations.sort() # sort the list of rotations in alphabetically order
# make a string composed of the last char of each rotation
snake_case : BWTTransformDict ={
"bwt_string": "".join([word[-1] for word in rotations] ),
"idx_original_string": rotations.index(lowerCamelCase_ ),
}
return response
def _a ( lowerCamelCase_ , lowerCamelCase_ ):
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
raise TypeError('''The parameter bwt_string type must be str.''' )
if not bwt_string:
raise ValueError('''The parameter bwt_string must not be empty.''' )
try:
snake_case : Dict =int(lowerCamelCase_ )
except ValueError:
raise TypeError(
'''The parameter idx_original_string type must be int or passive'''
''' of cast to int.''' )
if idx_original_string < 0:
raise ValueError('''The parameter idx_original_string must not be lower than 0.''' )
if idx_original_string >= len(lowerCamelCase_ ):
raise ValueError(
'''The parameter idx_original_string must be lower than''' ''' len(bwt_string).''' )
snake_case : List[str] =[''''''] * len(lowerCamelCase_ )
for _ in range(len(lowerCamelCase_ ) ):
for i in range(len(lowerCamelCase_ ) ):
snake_case : Optional[int] =bwt_string[i] + ordered_rotations[i]
ordered_rotations.sort()
return ordered_rotations[idx_original_string]
if __name__ == "__main__":
A : Optional[int] = """Provide a string that I will generate its BWT transform: """
A : Dict = input(entry_msg).strip()
A : Tuple = bwt_transform(s)
print(
f"Burrows Wheeler transform for string '{s}' results "
f"in '{result['bwt_string']}'"
)
A : List[str] = reverse_bwt(result["""bwt_string"""], result["""idx_original_string"""])
print(
f"Reversing Burrows Wheeler transform for entry '{result['bwt_string']}' "
f"we get original string '{original_string}'"
)
| 349 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
A : Union[str, Any] = {
"""configuration_m2m_100""": ["""M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP""", """M2M100Config""", """M2M100OnnxConfig"""],
"""tokenization_m2m_100""": ["""M2M100Tokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Optional[Any] = [
"""M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""M2M100ForConditionalGeneration""",
"""M2M100Model""",
"""M2M100PreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mam_aaa import M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP, MaMaaaConfig, MaMaaaOnnxConfig
from .tokenization_mam_aaa import MaMaaaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mam_aaa import (
M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST,
MaMaaaForConditionalGeneration,
MaMaaaModel,
MaMaaaPreTrainedModel,
)
else:
import sys
A : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 349 | 1 |
from collections.abc import Iterator, MutableMapping
from dataclasses import dataclass
from typing import Generic, TypeVar
__magic_name__ : List[Any] = TypeVar('KEY')
__magic_name__ : Optional[Any] = TypeVar('VAL')
@dataclass(frozen=UpperCamelCase_ , slots=UpperCamelCase_ )
class lowerCamelCase ( Generic[KEY, VAL] ):
"""simple docstring"""
lowerCAmelCase_ = 42
lowerCAmelCase_ = 42
class lowerCamelCase ( _Item ):
"""simple docstring"""
def __init__( self ):
super().__init__(__a , __a )
def __bool__( self ):
return False
__magic_name__ : Any = _DeletedItem()
class lowerCamelCase ( MutableMapping[KEY, VAL] ):
"""simple docstring"""
def __init__( self , __UpperCamelCase = 8 , __UpperCamelCase = 0.75 ):
A_ = initial_block_size
A_ = [None] * initial_block_size
assert 0.0 < capacity_factor < 1.0
A_ = capacity_factor
A_ = 0
def lowercase_ ( self , __UpperCamelCase ):
return hash(__a ) % len(self._buckets )
def lowercase_ ( self , __UpperCamelCase ):
return (ind + 1) % len(self._buckets )
def lowercase_ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
A_ = self._buckets[ind]
if not stored:
A_ = _Item(__a , __a )
self._len += 1
return True
elif stored.key == key:
A_ = _Item(__a , __a )
return True
else:
return False
def lowercase_ ( self ):
A_ = len(self._buckets ) * self._capacity_factor
return len(self ) >= int(__a )
def lowercase_ ( self ):
if len(self._buckets ) <= self._initial_block_size:
return False
A_ = len(self._buckets ) * self._capacity_factor / 2
return len(self ) < limit
def lowercase_ ( self , __UpperCamelCase ):
A_ = self._buckets
A_ = [None] * new_size
A_ = 0
for item in old_buckets:
if item:
self._add_item(item.key , item.val )
def lowercase_ ( self ):
self._resize(len(self._buckets ) * 2 )
def lowercase_ ( self ):
self._resize(len(self._buckets ) // 2 )
def lowercase_ ( self , __UpperCamelCase ):
A_ = self._get_bucket_index(__a )
for _ in range(len(self._buckets ) ):
yield ind
A_ = self._get_next_ind(__a )
def lowercase_ ( self , __UpperCamelCase , __UpperCamelCase ):
for ind in self._iterate_buckets(__a ):
if self._try_set(__a , __a , __a ):
break
def __setitem__( self , __UpperCamelCase , __UpperCamelCase ):
if self._is_full():
self._size_up()
self._add_item(__a , __a )
def __delitem__( self , __UpperCamelCase ):
for ind in self._iterate_buckets(__a ):
A_ = self._buckets[ind]
if item is None:
raise KeyError(__a )
if item is _deleted:
continue
if item.key == key:
A_ = _deleted
self._len -= 1
break
if self._is_sparse():
self._size_down()
def __getitem__( self , __UpperCamelCase ):
for ind in self._iterate_buckets(__a ):
A_ = self._buckets[ind]
if item is None:
break
if item is _deleted:
continue
if item.key == key:
return item.val
raise KeyError(__a )
def __len__( self ):
return self._len
def __iter__( self ):
yield from (item.key for item in self._buckets if item)
def __repr__( self ):
A_ = ' ,'.join(
f'{item.key}: {item.val}' for item in self._buckets if item )
return f'HashMap({val_string})'
| 717 |
from typing import Dict, List, Optional, Tuple, Union
import torch
from ...models import AutoencoderKL, TransformeraDModel
from ...schedulers import KarrasDiffusionSchedulers
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class lowerCamelCase ( __snake_case ):
"""simple docstring"""
def __init__( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = None , ):
super().__init__()
self.register_modules(transformer=__UpperCamelCase , vae=__UpperCamelCase , scheduler=__UpperCamelCase )
# create a imagenet -> id dictionary for easier use
A_ = {}
if idalabel is not None:
for key, value in idalabel.items():
for label in value.split("," ):
A_ = int(__UpperCamelCase )
A_ = dict(sorted(self.labels.items() ) )
def lowercase_ ( self , __UpperCamelCase ):
if not isinstance(__UpperCamelCase , __UpperCamelCase ):
A_ = list(__UpperCamelCase )
for l in label:
if l not in self.labels:
raise ValueError(
f'{l} does not exist. Please make sure to select one of the following labels: \n {self.labels}.' )
return [self.labels[l] for l in label]
@torch.no_grad()
def __call__( self , __UpperCamelCase , __UpperCamelCase = 4.0 , __UpperCamelCase = None , __UpperCamelCase = 50 , __UpperCamelCase = "pil" , __UpperCamelCase = True , ):
A_ = len(__UpperCamelCase )
A_ = self.transformer.config.sample_size
A_ = self.transformer.config.in_channels
A_ = randn_tensor(
shape=(batch_size, latent_channels, latent_size, latent_size) , generator=__UpperCamelCase , device=self.device , dtype=self.transformer.dtype , )
A_ = torch.cat([latents] * 2 ) if guidance_scale > 1 else latents
A_ = torch.tensor(__UpperCamelCase , device=self.device ).reshape(-1 )
A_ = torch.tensor([1000] * batch_size , device=self.device )
A_ = torch.cat([class_labels, class_null] , 0 ) if guidance_scale > 1 else class_labels
# set step values
self.scheduler.set_timesteps(__UpperCamelCase )
for t in self.progress_bar(self.scheduler.timesteps ):
if guidance_scale > 1:
A_ = latent_model_input[: len(__UpperCamelCase ) // 2]
A_ = torch.cat([half, half] , dim=0 )
A_ = self.scheduler.scale_model_input(__UpperCamelCase , __UpperCamelCase )
A_ = t
if not torch.is_tensor(__UpperCamelCase ):
# TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can
# This would be a good case for the `match` statement (Python 3.10+)
A_ = latent_model_input.device.type == "mps"
if isinstance(__UpperCamelCase , __UpperCamelCase ):
A_ = torch.floataa if is_mps else torch.floataa
else:
A_ = torch.intaa if is_mps else torch.intaa
A_ = torch.tensor([timesteps] , dtype=__UpperCamelCase , device=latent_model_input.device )
elif len(timesteps.shape ) == 0:
A_ = timesteps[None].to(latent_model_input.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
A_ = timesteps.expand(latent_model_input.shape[0] )
# predict noise model_output
A_ = self.transformer(
__UpperCamelCase , timestep=__UpperCamelCase , class_labels=__UpperCamelCase ).sample
# perform guidance
if guidance_scale > 1:
A_ , A_ = noise_pred[:, :latent_channels], noise_pred[:, latent_channels:]
A_ , A_ = torch.split(__UpperCamelCase , len(__UpperCamelCase ) // 2 , dim=0 )
A_ = uncond_eps + guidance_scale * (cond_eps - uncond_eps)
A_ = torch.cat([half_eps, half_eps] , dim=0 )
A_ = torch.cat([eps, rest] , dim=1 )
# learned sigma
if self.transformer.config.out_channels // 2 == latent_channels:
A_ , A_ = torch.split(__UpperCamelCase , __UpperCamelCase , dim=1 )
else:
A_ = noise_pred
# compute previous image: x_t -> x_t-1
A_ = self.scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ).prev_sample
if guidance_scale > 1:
A_ , A_ = latent_model_input.chunk(2 , dim=0 )
else:
A_ = latent_model_input
A_ = 1 / self.vae.config.scaling_factor * latents
A_ = self.vae.decode(__UpperCamelCase ).sample
A_ = (samples / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
A_ = samples.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
A_ = self.numpy_to_pil(__UpperCamelCase )
if not return_dict:
return (samples,)
return ImagePipelineOutput(images=__UpperCamelCase )
| 608 | 0 |
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('TEST_SAGEMAKER' , 'False' ) ) is not True , reason='Skipping test because should only be run when releasing minor transformers version' , )
@pytest.mark.usefixtures('sm_env' )
@parameterized_class(
[
{
'framework': 'pytorch',
'script': 'run_glue.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.p3.16xlarge',
'results': {'train_runtime': 6_5_0, 'eval_accuracy': 0.7, 'eval_loss': 0.6},
},
{
'framework': 'pytorch',
'script': 'run_ddp.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.p3.16xlarge',
'results': {'train_runtime': 6_0_0, 'eval_accuracy': 0.7, 'eval_loss': 0.6},
},
{
'framework': 'tensorflow',
'script': 'run_tf_dist.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.p3.16xlarge',
'results': {'train_runtime': 6_0_0, 'eval_accuracy': 0.6, 'eval_loss': 0.7},
},
] )
class _lowercase ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
if self.framework == "pytorch":
subprocess.run(
f"cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py".split() , encoding='utf-8' , check=snake_case , )
assert hasattr(self , 'env' )
def SCREAMING_SNAKE_CASE__ ( self : Dict , snake_case : Optional[Any] ) -> Tuple:
"""simple docstring"""
UpperCamelCase_ : Optional[int] = f"{self.env.base_job_name}-{instance_count}-{'ddp' if 'ddp' in self.script else 'smd'}"
# distributed data settings
UpperCamelCase_ : Tuple = {'smdistributed': {'dataparallel': {'enabled': True}}} if self.script != 'run_ddp.py' else None
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=snake_case , instance_count=snake_case , instance_type=self.instance_type , debugger_hook_config=snake_case , hyperparameters={**self.env.distributed_hyperparameters, 'model_name_or_path': self.model_name_or_path} , metric_definitions=self.env.metric_definitions , distribution=snake_case , py_version='py36' , )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , snake_case : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
TrainingJobAnalytics(snake_case ).export_csv(f"{self.env.test_path}/{job_name}_metrics.csv" )
@parameterized.expand([(2,)] )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , snake_case : Tuple ) -> List[Any]:
"""simple docstring"""
UpperCamelCase_ : List[Any] = self.create_estimator(snake_case )
# run training
estimator.fit()
# result dataframe
UpperCamelCase_ : Optional[int] = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
UpperCamelCase_ : Dict = list(result_metrics_df[result_metrics_df.metric_name == 'eval_accuracy']['value'] )
UpperCamelCase_ : List[str] = list(result_metrics_df[result_metrics_df.metric_name == 'eval_loss']['value'] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
UpperCamelCase_ : Union[str, Any] = (
Session().describe_training_job(estimator.latest_training_job.name ).get('TrainingTimeInSeconds' , 9_9_9_9_9_9 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['eval_accuracy'] for t in eval_accuracy )
assert all(t <= self.results['eval_loss'] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f"{estimator.latest_training_job.name}.json" , 'w' ) as outfile:
json.dump({'train_time': train_runtime, 'eval_accuracy': eval_accuracy, 'eval_loss': eval_loss} , snake_case )
| 417 | import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = '▁'
a_ = {'vocab_file': 'spiece.model'}
a_ = {
'vocab_file': {
'google/reformer-crime-and-punishment': (
'https://huggingface.co/google/reformer-crime-and-punishment/resolve/main/spiece.model'
)
}
}
a_ = {
'google/reformer-crime-and-punishment': 524_288,
}
class _lowercase ( snake_case_ ):
lowercase = VOCAB_FILES_NAMES
lowercase = PRETRAINED_VOCAB_FILES_MAP
lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase = ['input_ids', 'attention_mask']
def __init__( self : Any , snake_case : List[Any] , snake_case : Any="</s>" , snake_case : Optional[Any]="<unk>" , snake_case : str=[] , snake_case : Optional[Dict[str, Any]] = None , **snake_case : str , ) -> None:
"""simple docstring"""
UpperCamelCase_ : Dict = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=snake_case , unk_token=snake_case , additional_special_tokens=snake_case , sp_model_kwargs=self.sp_model_kwargs , **snake_case , )
UpperCamelCase_ : Dict = vocab_file
UpperCamelCase_ : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(snake_case )
@property
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
return self.sp_model.get_piece_size()
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Dict[str, int]:
"""simple docstring"""
UpperCamelCase_ : List[str] = {self.convert_ids_to_tokens(snake_case ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Tuple ) -> List[str]:
"""simple docstring"""
UpperCamelCase_ : Dict = self.__dict__.copy()
UpperCamelCase_ : Any = None
return state
def __setstate__( self : Optional[Any] , snake_case : Any ) -> Dict:
"""simple docstring"""
UpperCamelCase_ : Dict = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
UpperCamelCase_ : Optional[int] = {}
UpperCamelCase_ : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def SCREAMING_SNAKE_CASE__ ( self : Tuple , snake_case : str ) -> List[str]:
"""simple docstring"""
return self.sp_model.encode(snake_case , out_type=snake_case )
def SCREAMING_SNAKE_CASE__ ( self : str , snake_case : Optional[int] ) -> int:
"""simple docstring"""
return self.sp_model.piece_to_id(snake_case )
def SCREAMING_SNAKE_CASE__ ( self : Any , snake_case : Union[str, Any] ) -> str:
"""simple docstring"""
if index < self.sp_model.get_piece_size():
UpperCamelCase_ : Tuple = self.sp_model.IdToPiece(snake_case )
return token
def SCREAMING_SNAKE_CASE__ ( self : Tuple , snake_case : List[Any] ) -> List[Any]:
"""simple docstring"""
UpperCamelCase_ : Any = []
UpperCamelCase_ : Tuple = ''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(snake_case ) + token
UpperCamelCase_ : int = []
else:
current_sub_tokens.append(snake_case )
out_string += self.sp_model.decode(snake_case )
return out_string.strip()
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , snake_case : str , snake_case : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(snake_case ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
UpperCamelCase_ : Union[str, Any] = os.path.join(
snake_case , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , snake_case )
elif not os.path.isfile(self.vocab_file ):
with open(snake_case , 'wb' ) as fi:
UpperCamelCase_ : str = self.sp_model.serialized_model_proto()
fi.write(snake_case )
return (out_vocab_file,)
| 417 | 1 |
import json
import os
import tempfile
import datasets
from utils import generate_example_dataset, get_duration
lowercase = 50000
lowercase = 5000
lowercase , lowercase = os.path.split(__file__)
lowercase = os.path.join(RESULTS_BASEPATH, "results", RESULTS_FILENAME.replace(".py", ".json"))
@get_duration
def __UpperCAmelCase ( a_ , a_):
for i in range(a_):
snake_case_ = dataset[i]
@get_duration
def __UpperCAmelCase ( a_ , a_ , a_):
for i in range(0 , len(a_) , a_):
snake_case_ = dataset[i : i + batch_size]
@get_duration
def __UpperCAmelCase ( a_ , a_ , a_):
with dataset.formatted_as(type=a_):
for i in range(a_):
snake_case_ = dataset[i]
@get_duration
def __UpperCAmelCase ( a_ , a_ , a_ , a_):
with dataset.formatted_as(type=a_):
for i in range(0 , a_ , a_):
snake_case_ = dataset[i : i + batch_size]
def __UpperCAmelCase ( ):
snake_case_ = {'num examples': SPEED_TEST_N_EXAMPLES}
snake_case_ = [
(read, {'length': SMALL_TEST}),
(read, {'length': SPEED_TEST_N_EXAMPLES}),
(read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 10}),
(read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 1_00}),
(read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 10_00}),
(read_formatted, {'type': 'numpy', 'length': SMALL_TEST}),
(read_formatted, {'type': 'pandas', 'length': SMALL_TEST}),
(read_formatted, {'type': 'torch', 'length': SMALL_TEST}),
(read_formatted, {'type': 'tensorflow', 'length': SMALL_TEST}),
(read_formatted_batch, {'type': 'numpy', 'length': SMALL_TEST, 'batch_size': 10}),
(read_formatted_batch, {'type': 'numpy', 'length': SMALL_TEST, 'batch_size': 10_00}),
]
snake_case_ = [
(read, {'length': SMALL_TEST}),
(read, {'length': SPEED_TEST_N_EXAMPLES}),
(read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 10}),
(read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 1_00}),
(read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 10_00}),
(read_formatted, {'type': 'numpy', 'length': SMALL_TEST}),
(read_formatted_batch, {'type': 'numpy', 'length': SMALL_TEST, 'batch_size': 10}),
(read_formatted_batch, {'type': 'numpy', 'length': SMALL_TEST, 'batch_size': 10_00}),
]
with tempfile.TemporaryDirectory() as tmp_dir:
print('generating dataset')
snake_case_ = datasets.Features(
{'list': datasets.Sequence(datasets.Value('float32')), 'numbers': datasets.Value('float32')})
snake_case_ = generate_example_dataset(
os.path.join(a_ , 'dataset.arrow') , a_ , num_examples=a_ , seq_shapes={'list': (1_00,)} , )
print('first set of iterations')
for func, kwargs in functions:
print(func.__name__ , str(a_))
snake_case_ = func(a_ , **a_)
print('shuffling dataset')
snake_case_ = dataset.shuffle()
print('Second set of iterations (after shuffling')
for func, kwargs in functions_shuffled:
print('shuffled ' , func.__name__ , str(a_))
snake_case_ = func(
a_ , **a_)
with open(a_ , 'wb') as f:
f.write(json.dumps(a_).encode('utf-8'))
if __name__ == "__main__": # useful to run the profiler
benchmark_iterating()
| 607 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
ImageTextPipelineOutput,
UniDiffuserPipeline,
)
else:
from .modeling_text_decoder import UniDiffuserTextDecoder
from .modeling_uvit import UniDiffuserModel, UTransformeraDModel
from .pipeline_unidiffuser import ImageTextPipelineOutput, UniDiffuserPipeline
| 607 | 1 |
"""simple docstring"""
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
A = logging.get_logger(__name__)
class a__ ( __magic_name__ ):
lowercase_ = ["input_features", "is_longer"]
def __init__( self : List[str] , UpperCamelCase_ : Dict=64 , UpperCamelCase_ : Tuple=48000 , UpperCamelCase_ : List[Any]=480 , UpperCamelCase_ : List[str]=10 , UpperCamelCase_ : str=1024 , UpperCamelCase_ : List[str]=0.0 , UpperCamelCase_ : Optional[int]=False , UpperCamelCase_ : float = 0 , UpperCamelCase_ : float = 14000 , UpperCamelCase_ : int = None , UpperCamelCase_ : str = "fusion" , UpperCamelCase_ : str = "repeatpad" , **UpperCamelCase_ : Optional[Any] , ):
"""simple docstring"""
super().__init__(
feature_size=UpperCamelCase_ , sampling_rate=UpperCamelCase_ , padding_value=UpperCamelCase_ , return_attention_mask=UpperCamelCase_ , **UpperCamelCase_ , )
__UpperCAmelCase : Union[str, Any] = top_db
__UpperCAmelCase : Optional[Any] = truncation
__UpperCAmelCase : str = padding
__UpperCAmelCase : int = fft_window_size
__UpperCAmelCase : str = (fft_window_size >> 1) + 1
__UpperCAmelCase : List[Any] = hop_length
__UpperCAmelCase : Optional[Any] = max_length_s
__UpperCAmelCase : Tuple = max_length_s * sampling_rate
__UpperCAmelCase : str = sampling_rate
__UpperCAmelCase : int = frequency_min
__UpperCAmelCase : Optional[Any] = frequency_max
__UpperCAmelCase : Any = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=UpperCamelCase_ , min_frequency=UpperCamelCase_ , max_frequency=UpperCamelCase_ , sampling_rate=UpperCamelCase_ , norm=UpperCamelCase_ , mel_scale="htk" , )
__UpperCAmelCase : Any = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=UpperCamelCase_ , min_frequency=UpperCamelCase_ , max_frequency=UpperCamelCase_ , sampling_rate=UpperCamelCase_ , norm="slaney" , mel_scale="slaney" , )
def a_ ( self : Dict):
"""simple docstring"""
__UpperCAmelCase : Dict = copy.deepcopy(self.__dict__)
__UpperCAmelCase : str = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def a_ ( self : int , UpperCamelCase_ : np.array , UpperCamelCase_ : Optional[np.array] = None):
"""simple docstring"""
__UpperCAmelCase : List[Any] = spectrogram(
UpperCamelCase_ , window_function(self.fft_window_size , "hann") , frame_length=self.fft_window_size , hop_length=self.hop_length , power=2.0 , mel_filters=UpperCamelCase_ , log_mel="dB" , )
return log_mel_spectrogram.T
def a_ ( self : List[Any] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : int):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = np.array_split(list(range(0 , total_frames - chunk_frames + 1)) , 3)
if len(ranges[1]) == 0:
# if the audio is too short, we just use the first chunk
__UpperCAmelCase : str = [0]
if len(ranges[2]) == 0:
# if the audio is too short, we just use the first chunk
__UpperCAmelCase : Dict = [0]
# randomly choose index for each part
__UpperCAmelCase : Dict = np.random.choice(ranges[0])
__UpperCAmelCase : List[str] = np.random.choice(ranges[1])
__UpperCAmelCase : List[Any] = np.random.choice(ranges[2])
__UpperCAmelCase : List[Any] = mel[idx_front : idx_front + chunk_frames, :]
__UpperCAmelCase : List[str] = mel[idx_middle : idx_middle + chunk_frames, :]
__UpperCAmelCase : List[str] = mel[idx_back : idx_back + chunk_frames, :]
__UpperCAmelCase : Tuple = torch.tensor(mel[None, None, :])
__UpperCAmelCase : Union[str, Any] = torch.nn.functional.interpolate(
UpperCamelCase_ , size=[chunk_frames, 64] , mode="bilinear" , align_corners=UpperCamelCase_)
__UpperCAmelCase : Union[str, Any] = mel_shrink[0][0].numpy()
__UpperCAmelCase : Optional[int] = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] , axis=0)
return mel_fusion
def a_ ( self : Optional[Any] , UpperCamelCase_ : np.array , UpperCamelCase_ : Any , UpperCamelCase_ : Tuple , UpperCamelCase_ : Optional[Any]):
"""simple docstring"""
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
__UpperCAmelCase : List[str] = True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
__UpperCAmelCase : List[Any] = len(UpperCamelCase_) - max_length
__UpperCAmelCase : int = np.random.randint(0 , overflow + 1)
__UpperCAmelCase : Union[str, Any] = waveform[idx : idx + max_length]
__UpperCAmelCase : Union[str, Any] = self._np_extract_fbank_features(UpperCamelCase_ , self.mel_filters_slaney)[None, :]
elif truncation == "fusion":
__UpperCAmelCase : Any = self._np_extract_fbank_features(UpperCamelCase_ , self.mel_filters)
__UpperCAmelCase : Dict = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
__UpperCAmelCase : Tuple = mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
__UpperCAmelCase : List[str] = np.stack([mel, mel, mel, mel] , axis=0)
__UpperCAmelCase : Any = False
else:
__UpperCAmelCase : List[str] = self._random_mel_fusion(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_)
__UpperCAmelCase : Union[str, Any] = True
else:
raise NotImplementedError(F"data_truncating {truncation} not implemented")
else:
__UpperCAmelCase : Optional[Any] = False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
__UpperCAmelCase : Tuple = int(max_length / len(UpperCamelCase_))
__UpperCAmelCase : List[str] = np.stack(np.tile(UpperCamelCase_ , n_repeat + 1))[:max_length]
if padding == "repeatpad":
__UpperCAmelCase : Union[str, Any] = int(max_length / len(UpperCamelCase_))
__UpperCAmelCase : Optional[Any] = np.stack(np.tile(UpperCamelCase_ , UpperCamelCase_))
__UpperCAmelCase : int = np.pad(UpperCamelCase_ , (0, max_length - waveform.shape[0]) , mode="constant" , constant_values=0)
if truncation == "fusion":
__UpperCAmelCase : Any = self._np_extract_fbank_features(UpperCamelCase_ , self.mel_filters)
__UpperCAmelCase : List[Any] = np.stack([input_mel, input_mel, input_mel, input_mel] , axis=0)
else:
__UpperCAmelCase : Optional[int] = self._np_extract_fbank_features(UpperCamelCase_ , self.mel_filters_slaney)[None, :]
return input_mel, longer
def __call__( self : Dict , UpperCamelCase_ : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , UpperCamelCase_ : str = None , UpperCamelCase_ : Optional[str] = None , UpperCamelCase_ : Optional[int] = None , UpperCamelCase_ : Optional[int] = None , UpperCamelCase_ : Optional[Union[str, TensorType]] = None , **UpperCamelCase_ : Any , ):
"""simple docstring"""
__UpperCAmelCase : int = truncation if truncation is not None else self.truncation
__UpperCAmelCase : Optional[Any] = padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F"The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a"
F" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input"
F" was sampled with {self.sampling_rate} and not {sampling_rate}.")
else:
logger.warning(
"It is strongly recommended to pass the `sampling_rate` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug.")
__UpperCAmelCase : List[str] = isinstance(UpperCamelCase_ , np.ndarray) and len(raw_speech.shape) > 1
if is_batched_numpy and len(raw_speech.shape) > 2:
raise ValueError(F"Only mono-channel audio is supported for input to {self}")
__UpperCAmelCase : str = is_batched_numpy or (
isinstance(UpperCamelCase_ , (list, tuple)) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list)))
)
if is_batched:
__UpperCAmelCase : Dict = [np.asarray(UpperCamelCase_ , dtype=np.floataa) for speech in raw_speech]
elif not is_batched and not isinstance(UpperCamelCase_ , np.ndarray):
__UpperCAmelCase : Tuple = np.asarray(UpperCamelCase_ , dtype=np.floataa)
elif isinstance(UpperCamelCase_ , np.ndarray) and raw_speech.dtype is np.dtype(np.floataa):
__UpperCAmelCase : Optional[int] = raw_speech.astype(np.floataa)
# always return batch
if not is_batched:
__UpperCAmelCase : int = [np.asarray(UpperCamelCase_)]
# convert to mel spectrogram, truncate and pad if needed.
__UpperCAmelCase : Optional[int] = [
self._get_input_mel(UpperCamelCase_ , max_length if max_length else self.nb_max_samples , UpperCamelCase_ , UpperCamelCase_)
for waveform in raw_speech
]
__UpperCAmelCase : Tuple = []
__UpperCAmelCase : List[Any] = []
for mel, longer in padded_inputs:
input_mel.append(UpperCamelCase_)
is_longer.append(UpperCamelCase_)
if truncation == "fusion" and sum(UpperCamelCase_) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
__UpperCAmelCase : Any = np.random.randint(0 , len(UpperCamelCase_))
__UpperCAmelCase : Optional[int] = True
if isinstance(input_mel[0] , UpperCamelCase_):
__UpperCAmelCase : Tuple = [np.asarray(UpperCamelCase_ , dtype=np.floataa) for feature in input_mel]
# is_longer is a list of bool
__UpperCAmelCase : List[str] = [[longer] for longer in is_longer]
__UpperCAmelCase : Optional[int] = {"input_features": input_mel, "is_longer": is_longer}
__UpperCAmelCase : Optional[int] = BatchFeature(UpperCamelCase_)
if return_tensors is not None:
__UpperCAmelCase : Any = input_features.convert_to_tensors(UpperCamelCase_)
return input_features
| 77 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import (
BitConfig,
ViTHybridConfig,
ViTHybridForImageClassification,
ViTHybridImageProcessor,
ViTHybridModel,
)
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
A_ : Optional[int] = logging.get_logger(__name__)
def UpperCamelCase__ ( __magic_name__ : Optional[Any] , __magic_name__ : str=False ) -> Tuple:
'''simple docstring'''
snake_case__ : int = []
# fmt: off
# stem:
rename_keys.append(("""cls_token""", """vit.embeddings.cls_token""") )
rename_keys.append(("""pos_embed""", """vit.embeddings.position_embeddings""") )
rename_keys.append(("""patch_embed.proj.weight""", """vit.embeddings.patch_embeddings.projection.weight""") )
rename_keys.append(("""patch_embed.proj.bias""", """vit.embeddings.patch_embeddings.projection.bias""") )
# backbone
rename_keys.append(("""patch_embed.backbone.stem.conv.weight""", """vit.embeddings.patch_embeddings.backbone.bit.embedder.convolution.weight""") )
rename_keys.append(("""patch_embed.backbone.stem.norm.weight""", """vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.weight""") )
rename_keys.append(("""patch_embed.backbone.stem.norm.bias""", """vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.bias""") )
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv1.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv1.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.bias", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.bias") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv2.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv2.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.bias", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.bias") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv3.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv3.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.bias", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.bias") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.conv.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.conv.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.bias", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.bias") )
# transformer encoder
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"blocks.{i}.norm1.weight", f"vit.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((f"blocks.{i}.norm1.bias", f"vit.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append((f"blocks.{i}.attn.proj.weight", f"vit.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append((f"blocks.{i}.attn.proj.bias", f"vit.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((f"blocks.{i}.norm2.weight", f"vit.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((f"blocks.{i}.norm2.bias", f"vit.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append((f"blocks.{i}.mlp.fc1.weight", f"vit.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((f"blocks.{i}.mlp.fc1.bias", f"vit.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((f"blocks.{i}.mlp.fc2.weight", f"vit.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((f"blocks.{i}.mlp.fc2.bias", f"vit.encoder.layer.{i}.output.dense.bias") )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("""norm.weight""", """layernorm.weight"""),
("""norm.bias""", """layernorm.bias"""),
("""pre_logits.fc.weight""", """pooler.dense.weight"""),
("""pre_logits.fc.bias""", """pooler.dense.bias"""),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
snake_case__ : List[Any] = [(pair[0], pair[1][4:]) if pair[1].startswith("""vit""" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("""norm.weight""", """vit.layernorm.weight"""),
("""norm.bias""", """vit.layernorm.bias"""),
("""head.weight""", """classifier.weight"""),
("""head.bias""", """classifier.bias"""),
] )
# fmt: on
return rename_keys
def UpperCamelCase__ ( __magic_name__ : Tuple , __magic_name__ : int , __magic_name__ : Tuple=False ) -> str:
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
snake_case__ : int = """"""
else:
snake_case__ : Dict = """vit."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
snake_case__ : int = state_dict.pop(f"blocks.{i}.attn.qkv.weight" )
snake_case__ : Union[str, Any] = state_dict.pop(f"blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
snake_case__ : Optional[int] = in_proj_weight[
: config.hidden_size, :
]
snake_case__ : Optional[Any] = in_proj_bias[: config.hidden_size]
snake_case__ : List[Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
snake_case__ : List[Any] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
snake_case__ : List[Any] = in_proj_weight[
-config.hidden_size :, :
]
snake_case__ : Optional[int] = in_proj_bias[-config.hidden_size :]
def UpperCamelCase__ ( __magic_name__ : Optional[Any] ) -> List[str]:
'''simple docstring'''
snake_case__ : str = ["""head.weight""", """head.bias"""]
for k in ignore_keys:
state_dict.pop(__magic_name__ , __magic_name__ )
def UpperCamelCase__ ( __magic_name__ : List[str] , __magic_name__ : Union[str, Any] , __magic_name__ : str ) -> Union[str, Any]:
'''simple docstring'''
snake_case__ : List[str] = dct.pop(__magic_name__ )
snake_case__ : Dict = val
def UpperCamelCase__ ( ) -> str:
'''simple docstring'''
snake_case__ : Optional[int] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
snake_case__ : Optional[int] = Image.open(requests.get(__magic_name__ , stream=__magic_name__ ).raw )
return im
@torch.no_grad()
def UpperCamelCase__ ( __magic_name__ : List[Any] , __magic_name__ : Union[str, Any] , __magic_name__ : int=False ) -> Optional[int]:
'''simple docstring'''
snake_case__ : int = BitConfig(
global_padding="""same""" , layer_type="""bottleneck""" , depths=(3, 4, 9) , out_features=["""stage3"""] , embedding_dynamic_padding=__magic_name__ , )
snake_case__ : Optional[int] = ViTHybridConfig(backbone_config=__magic_name__ , image_size=3_84 , num_labels=10_00 )
snake_case__ : Union[str, Any] = False
# load original model from timm
snake_case__ : List[Any] = timm.create_model(__magic_name__ , pretrained=__magic_name__ )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
snake_case__ : Optional[int] = timm_model.state_dict()
if base_model:
remove_classification_head_(__magic_name__ )
snake_case__ : int = create_rename_keys(__magic_name__ , __magic_name__ )
for src, dest in rename_keys:
rename_key(__magic_name__ , __magic_name__ , __magic_name__ )
read_in_q_k_v(__magic_name__ , __magic_name__ , __magic_name__ )
snake_case__ : str = """huggingface/label-files"""
snake_case__ : Union[str, Any] = """imagenet-1k-id2label.json"""
snake_case__ : Dict = json.load(open(hf_hub_download(__magic_name__ , __magic_name__ , repo_type="""dataset""" ) , """r""" ) )
snake_case__ : List[Any] = {int(__magic_name__ ): v for k, v in idalabel.items()}
snake_case__ : int = idalabel
snake_case__ : str = {v: k for k, v in idalabel.items()}
# load HuggingFace model
if vit_name[-5:] == "in21k":
snake_case__ : str = ViTHybridModel(__magic_name__ ).eval()
else:
snake_case__ : Union[str, Any] = ViTHybridForImageClassification(__magic_name__ ).eval()
model.load_state_dict(__magic_name__ )
# create image processor
snake_case__ : Optional[Any] = create_transform(**resolve_data_config({} , model=__magic_name__ ) )
snake_case__ : Union[str, Any] = transform.transforms
snake_case__ : Tuple = {
"""bilinear""": PILImageResampling.BILINEAR,
"""bicubic""": PILImageResampling.BICUBIC,
"""nearest""": PILImageResampling.NEAREST,
}
snake_case__ : Any = ViTHybridImageProcessor(
do_resize=__magic_name__ , size={"""shortest_edge""": timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=__magic_name__ , crop_size={"""height""": timm_transforms[1].size[0], """width""": timm_transforms[1].size[1]} , do_normalize=__magic_name__ , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
snake_case__ : Any = prepare_img()
snake_case__ : int = transform(__magic_name__ ).unsqueeze(0 )
snake_case__ : List[str] = processor(__magic_name__ , return_tensors="""pt""" ).pixel_values
# verify pixel values
assert torch.allclose(__magic_name__ , __magic_name__ )
# verify logits
with torch.no_grad():
snake_case__ : Optional[Any] = model(__magic_name__ )
snake_case__ : Union[str, Any] = outputs.logits
print("""Predicted class:""" , logits.argmax(-1 ).item() )
if base_model:
snake_case__ : Dict = timm_model.forward_features(__magic_name__ )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(__magic_name__ , outputs.pooler_output , atol=1E-3 )
else:
snake_case__ : int = timm_model(__magic_name__ )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(__magic_name__ , outputs.logits , atol=1E-3 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
Path(__magic_name__ ).mkdir(exist_ok=__magic_name__ )
print(f"Saving model {vit_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(__magic_name__ )
print(f"Saving processor to {pytorch_dump_folder_path}" )
processor.save_pretrained(__magic_name__ )
if push_to_hub:
print(f"Pushing model and processor to the hub {vit_name}" )
model.push_to_hub(f"ybelkada/{vit_name}" )
processor.push_to_hub(f"ybelkada/{vit_name}" )
if __name__ == "__main__":
A_ : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--vit_name",
default="vit_base_r50_s16_384",
type=str,
help="Name of the hybrid ViT timm model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether to upload the model to the HuggingFace hub."
)
A_ : Union[str, Any] = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 38 | 0 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor
from .base import PipelineTool
class __a (lowerCamelCase ):
__a : List[Any] = "openai/whisper-base"
__a : Optional[Any] = (
"This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the "
"transcribed text."
)
__a : Any = "transcriber"
__a : str = WhisperProcessor
__a : List[Any] = WhisperForConditionalGeneration
__a : int = ["audio"]
__a : Optional[Any] = ["text"]
def UpperCAmelCase__ ( self : Dict , __magic_name__ : List[str] ) -> Optional[int]:
"""simple docstring"""
return self.pre_processor(__magic_name__ , return_tensors='''pt''' ).input_features
def UpperCAmelCase__ ( self : Dict , __magic_name__ : Dict ) -> Tuple:
"""simple docstring"""
return self.model.generate(inputs=__magic_name__ )
def UpperCAmelCase__ ( self : List[str] , __magic_name__ : Dict ) -> str:
"""simple docstring"""
return self.pre_processor.batch_decode(__magic_name__ , skip_special_tokens=__magic_name__ )[0]
| 644 |
'''simple docstring'''
import unittest
from transformers import LiltConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
)
from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST
class __a :
def __init__( self : int , __magic_name__ : Optional[Any] , __magic_name__ : Any=13 , __magic_name__ : Any=7 , __magic_name__ : Union[str, Any]=True , __magic_name__ : Union[str, Any]=True , __magic_name__ : str=True , __magic_name__ : Optional[int]=True , __magic_name__ : List[Any]=99 , __magic_name__ : int=24 , __magic_name__ : Optional[int]=2 , __magic_name__ : Tuple=6 , __magic_name__ : Union[str, Any]=37 , __magic_name__ : Optional[Any]="gelu" , __magic_name__ : Any=0.1 , __magic_name__ : str=0.1 , __magic_name__ : Tuple=5_12 , __magic_name__ : Union[str, Any]=16 , __magic_name__ : Tuple=2 , __magic_name__ : Tuple=0.0_2 , __magic_name__ : Optional[Any]=3 , __magic_name__ : Optional[int]=None , __magic_name__ : Any=10_00 , ) -> str:
"""simple docstring"""
UpperCAmelCase_ : Tuple = parent
UpperCAmelCase_ : Optional[int] = batch_size
UpperCAmelCase_ : List[str] = seq_length
UpperCAmelCase_ : Dict = is_training
UpperCAmelCase_ : List[str] = use_input_mask
UpperCAmelCase_ : Any = use_token_type_ids
UpperCAmelCase_ : Any = use_labels
UpperCAmelCase_ : Any = vocab_size
UpperCAmelCase_ : Dict = hidden_size
UpperCAmelCase_ : Tuple = num_hidden_layers
UpperCAmelCase_ : Tuple = num_attention_heads
UpperCAmelCase_ : int = intermediate_size
UpperCAmelCase_ : Union[str, Any] = hidden_act
UpperCAmelCase_ : Optional[int] = hidden_dropout_prob
UpperCAmelCase_ : Optional[Any] = attention_probs_dropout_prob
UpperCAmelCase_ : Union[str, Any] = max_position_embeddings
UpperCAmelCase_ : int = type_vocab_size
UpperCAmelCase_ : List[Any] = type_sequence_label_size
UpperCAmelCase_ : int = initializer_range
UpperCAmelCase_ : Dict = num_labels
UpperCAmelCase_ : List[str] = scope
UpperCAmelCase_ : List[str] = range_bbox
def UpperCAmelCase__ ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase_ : List[str] = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
UpperCAmelCase_ : List[str] = bbox[i, j, 3]
UpperCAmelCase_ : Dict = bbox[i, j, 1]
UpperCAmelCase_ : Optional[Any] = t
if bbox[i, j, 2] < bbox[i, j, 0]:
UpperCAmelCase_ : List[str] = bbox[i, j, 2]
UpperCAmelCase_ : Tuple = bbox[i, j, 0]
UpperCAmelCase_ : Union[str, Any] = t
UpperCAmelCase_ : int = None
if self.use_input_mask:
UpperCAmelCase_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
UpperCAmelCase_ : Optional[int] = None
if self.use_token_type_ids:
UpperCAmelCase_ : str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase_ : Dict = None
UpperCAmelCase_ : Tuple = None
if self.use_labels:
UpperCAmelCase_ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase_ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase_ : int = self.get_config()
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels
def UpperCAmelCase__ ( self : Any ) -> List[Any]:
"""simple docstring"""
return LiltConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def UpperCAmelCase__ ( self : List[Any] , __magic_name__ : str , __magic_name__ : Optional[Any] , __magic_name__ : int , __magic_name__ : Optional[Any] , __magic_name__ : int , __magic_name__ : Optional[Any] , __magic_name__ : int , ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ : Any = LiltModel(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
UpperCAmelCase_ : Optional[Any] = model(__magic_name__ , bbox=__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ )
UpperCAmelCase_ : List[Any] = model(__magic_name__ , bbox=__magic_name__ , token_type_ids=__magic_name__ )
UpperCAmelCase_ : Optional[int] = model(__magic_name__ , bbox=__magic_name__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def UpperCAmelCase__ ( self : int , __magic_name__ : Optional[Any] , __magic_name__ : List[str] , __magic_name__ : Any , __magic_name__ : Optional[int] , __magic_name__ : str , __magic_name__ : Optional[int] , __magic_name__ : List[Any] , ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ : Any = self.num_labels
UpperCAmelCase_ : List[Any] = LiltForTokenClassification(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
UpperCAmelCase_ : List[Any] = model(
__magic_name__ , bbox=__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , labels=__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase__ ( self : Any , __magic_name__ : Optional[Any] , __magic_name__ : Dict , __magic_name__ : Any , __magic_name__ : Optional[int] , __magic_name__ : int , __magic_name__ : Tuple , __magic_name__ : Any , ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ : str = LiltForQuestionAnswering(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
UpperCAmelCase_ : Optional[Any] = model(
__magic_name__ , bbox=__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , start_positions=__magic_name__ , end_positions=__magic_name__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase__ ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ : Tuple = self.prepare_config_and_inputs()
(
(
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) ,
) : Optional[int] = config_and_inputs
UpperCAmelCase_ : Tuple = {
'''input_ids''': input_ids,
'''bbox''': bbox,
'''token_type_ids''': token_type_ids,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_torch
class __a (lowerCamelCase , lowerCamelCase , lowerCamelCase , unittest.TestCase ):
__a : Tuple = (
(
LiltModel,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltForQuestionAnswering,
)
if is_torch_available()
else ()
)
__a : Any = (
{
"feature-extraction": LiltModel,
"question-answering": LiltForQuestionAnswering,
"text-classification": LiltForSequenceClassification,
"token-classification": LiltForTokenClassification,
"zero-shot": LiltForSequenceClassification,
}
if is_torch_available()
else {}
)
__a : Union[str, Any] = False
__a : int = False
def UpperCAmelCase__ ( self : List[str] , __magic_name__ : Dict , __magic_name__ : List[Any] , __magic_name__ : Optional[int] , __magic_name__ : Optional[Any] , __magic_name__ : int ) -> str:
"""simple docstring"""
return True
def UpperCAmelCase__ ( self : str ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ : List[Any] = LiltModelTester(self )
UpperCAmelCase_ : List[Any] = ConfigTester(self , config_class=__magic_name__ , hidden_size=37 )
def UpperCAmelCase__ ( self : Union[str, Any] ) -> str:
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self : str ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__magic_name__ )
def UpperCAmelCase__ ( self : str ) -> str:
"""simple docstring"""
UpperCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCAmelCase_ : Tuple = type
self.model_tester.create_and_check_model(*__magic_name__ )
def UpperCAmelCase__ ( self : Union[str, Any] ) -> int:
"""simple docstring"""
UpperCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__magic_name__ )
def UpperCAmelCase__ ( self : str ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__magic_name__ )
@slow
def UpperCAmelCase__ ( self : int ) -> Union[str, Any]:
"""simple docstring"""
for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ : Optional[int] = LiltModel.from_pretrained(__magic_name__ )
self.assertIsNotNone(__magic_name__ )
@require_torch
@slow
class __a (unittest.TestCase ):
def UpperCAmelCase__ ( self : Tuple ) -> Tuple:
"""simple docstring"""
UpperCAmelCase_ : str = LiltModel.from_pretrained('''SCUT-DLVCLab/lilt-roberta-en-base''' ).to(__magic_name__ )
UpperCAmelCase_ : Any = torch.tensor([[1, 2]] , device=__magic_name__ )
UpperCAmelCase_ : int = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=__magic_name__ )
# forward pass
with torch.no_grad():
UpperCAmelCase_ : Optional[int] = model(input_ids=__magic_name__ , bbox=__magic_name__ )
UpperCAmelCase_ : int = torch.Size([1, 2, 7_68] )
UpperCAmelCase_ : List[str] = torch.tensor(
[[-0.0_6_5_3, 0.0_9_5_0, -0.0_0_6_1], [-0.0_5_4_5, 0.0_9_2_6, -0.0_3_2_4]] , device=__magic_name__ , )
self.assertTrue(outputs.last_hidden_state.shape , __magic_name__ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , __magic_name__ , atol=1E-3 ) )
| 644 | 1 |
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision.transforms import functional as F
from transformers import DetrImageProcessor, TableTransformerConfig, TableTransformerForObjectDetection
from transformers.utils import logging
logging.set_verbosity_info()
lowercase : Any = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
lowercase : int = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F"""transformer.encoder.layers.{i}.self_attn.out_proj.weight""", F"""encoder.layers.{i}.self_attn.out_proj.weight""")
)
rename_keys.append(
(F"""transformer.encoder.layers.{i}.self_attn.out_proj.bias""", F"""encoder.layers.{i}.self_attn.out_proj.bias""")
)
rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.weight""", F"""encoder.layers.{i}.fc1.weight"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.bias""", F"""encoder.layers.{i}.fc1.bias"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.weight""", F"""encoder.layers.{i}.fc2.weight"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.bias""", F"""encoder.layers.{i}.fc2.bias"""))
rename_keys.append(
(F"""transformer.encoder.layers.{i}.norm1.weight""", F"""encoder.layers.{i}.self_attn_layer_norm.weight""")
)
rename_keys.append((F"""transformer.encoder.layers.{i}.norm1.bias""", F"""encoder.layers.{i}.self_attn_layer_norm.bias"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.norm2.weight""", F"""encoder.layers.{i}.final_layer_norm.weight"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.norm2.bias""", F"""encoder.layers.{i}.final_layer_norm.bias"""))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(F"""transformer.decoder.layers.{i}.self_attn.out_proj.weight""", F"""decoder.layers.{i}.self_attn.out_proj.weight""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.self_attn.out_proj.bias""", F"""decoder.layers.{i}.self_attn.out_proj.bias""")
)
rename_keys.append(
(
F"""transformer.decoder.layers.{i}.multihead_attn.out_proj.weight""",
F"""decoder.layers.{i}.encoder_attn.out_proj.weight""",
)
)
rename_keys.append(
(
F"""transformer.decoder.layers.{i}.multihead_attn.out_proj.bias""",
F"""decoder.layers.{i}.encoder_attn.out_proj.bias""",
)
)
rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.weight""", F"""decoder.layers.{i}.fc1.weight"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.bias""", F"""decoder.layers.{i}.fc1.bias"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.weight""", F"""decoder.layers.{i}.fc2.weight"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.bias""", F"""decoder.layers.{i}.fc2.bias"""))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm1.weight""", F"""decoder.layers.{i}.self_attn_layer_norm.weight""")
)
rename_keys.append((F"""transformer.decoder.layers.{i}.norm1.bias""", F"""decoder.layers.{i}.self_attn_layer_norm.bias"""))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm2.weight""", F"""decoder.layers.{i}.encoder_attn_layer_norm.weight""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm2.bias""", F"""decoder.layers.{i}.encoder_attn_layer_norm.bias""")
)
rename_keys.append((F"""transformer.decoder.layers.{i}.norm3.weight""", F"""decoder.layers.{i}.final_layer_norm.weight"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.norm3.bias""", F"""decoder.layers.{i}.final_layer_norm.bias"""))
# convolutional projection + query embeddings + layernorm of encoder + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
("""input_proj.weight""", """input_projection.weight"""),
("""input_proj.bias""", """input_projection.bias"""),
("""query_embed.weight""", """query_position_embeddings.weight"""),
("""transformer.encoder.norm.weight""", """encoder.layernorm.weight"""),
("""transformer.encoder.norm.bias""", """encoder.layernorm.bias"""),
("""transformer.decoder.norm.weight""", """decoder.layernorm.weight"""),
("""transformer.decoder.norm.bias""", """decoder.layernorm.bias"""),
("""class_embed.weight""", """class_labels_classifier.weight"""),
("""class_embed.bias""", """class_labels_classifier.bias"""),
("""bbox_embed.layers.0.weight""", """bbox_predictor.layers.0.weight"""),
("""bbox_embed.layers.0.bias""", """bbox_predictor.layers.0.bias"""),
("""bbox_embed.layers.1.weight""", """bbox_predictor.layers.1.weight"""),
("""bbox_embed.layers.1.bias""", """bbox_predictor.layers.1.bias"""),
("""bbox_embed.layers.2.weight""", """bbox_predictor.layers.2.weight"""),
("""bbox_embed.layers.2.bias""", """bbox_predictor.layers.2.bias"""),
]
)
def A_ ( A__ , A__ , A__ ) -> List[Any]:
a__ : Optional[Any] = state_dict.pop(_lowerCamelCase )
a__ : Optional[int] = val
def A_ ( A__ ) -> Tuple:
a__ : Tuple = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
a__ : Optional[Any] = key.replace('backbone.0.body' , 'backbone.conv_encoder.model' )
a__ : Dict = value
else:
a__ : Union[str, Any] = value
return new_state_dict
def A_ ( A__ ) -> Tuple:
a__ : Optional[int] = ''
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
a__ : Optional[Any] = state_dict.pop(F'{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight' )
a__ : Optional[Any] = state_dict.pop(F'{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
a__ : Dict = in_proj_weight[:256, :]
a__ : Dict = in_proj_bias[:256]
a__ : List[str] = in_proj_weight[256:512, :]
a__ : Optional[Any] = in_proj_bias[256:512]
a__ : Optional[int] = in_proj_weight[-256:, :]
a__ : Dict = in_proj_bias[-256:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
a__ : Dict = state_dict.pop(F'{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight' )
a__ : Optional[Any] = state_dict.pop(F'{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
a__ : int = in_proj_weight[:256, :]
a__ : List[Any] = in_proj_bias[:256]
a__ : Tuple = in_proj_weight[256:512, :]
a__ : Optional[int] = in_proj_bias[256:512]
a__ : Any = in_proj_weight[-256:, :]
a__ : int = in_proj_bias[-256:]
# read in weights + bias of input projection layer of cross-attention
a__ : Tuple = state_dict.pop(
F'{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight' )
a__ : int = state_dict.pop(F'{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias' )
# next, add query, keys and values (in that order) of cross-attention to the state dict
a__ : Any = in_proj_weight_cross_attn[:256, :]
a__ : Optional[int] = in_proj_bias_cross_attn[:256]
a__ : List[Any] = in_proj_weight_cross_attn[256:512, :]
a__ : Optional[int] = in_proj_bias_cross_attn[256:512]
a__ : Optional[int] = in_proj_weight_cross_attn[-256:, :]
a__ : Union[str, Any] = in_proj_bias_cross_attn[-256:]
def A_ ( A__ , A__ ) -> List[str]:
a__ , a__ : List[str] = image.size
a__ : Any = max(_lowerCamelCase , _lowerCamelCase )
a__ : int = 800 if 'detection' in checkpoint_url else 1000
a__ : int = target_max_size / current_max_size
a__ : Dict = image.resize((int(round(scale * width ) ), int(round(scale * height ) )) )
return resized_image
def A_ ( A__ ) -> Tuple:
a__ : Union[str, Any] = F.to_tensor(_lowerCamelCase )
a__ : int = F.normalize(_lowerCamelCase , mean=[0.4_85, 0.4_56, 0.4_06] , std=[0.2_29, 0.2_24, 0.2_25] )
return image
@torch.no_grad()
def A_ ( A__ , A__ , A__ ) -> Optional[Any]:
logger.info('Converting model...' )
# load original state dict
a__ : int = torch.hub.load_state_dict_from_url(_lowerCamelCase , map_location='cpu' )
# rename keys
for src, dest in rename_keys:
rename_key(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
a__ : Union[str, Any] = rename_backbone_keys(_lowerCamelCase )
# query, key and value matrices need special treatment
read_in_q_k_v(_lowerCamelCase )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
a__ : str = 'model.'
for key in state_dict.copy().keys():
if not key.startswith('class_labels_classifier' ) and not key.startswith('bbox_predictor' ):
a__ : Optional[Any] = state_dict.pop(_lowerCamelCase )
a__ : Any = val
# create HuggingFace model and load state dict
a__ : Tuple = TableTransformerConfig(
backbone='resnet18' , mask_loss_coefficient=1 , dice_loss_coefficient=1 , ce_loss_coefficient=1 , bbox_loss_coefficient=5 , giou_loss_coefficient=2 , eos_coefficient=0.4 , class_cost=1 , bbox_cost=5 , giou_cost=2 , )
if "detection" in checkpoint_url:
a__ : Tuple = 15
a__ : Optional[int] = 2
a__ : int = {0: 'table', 1: 'table rotated'}
a__ : List[Any] = idalabel
a__ : List[str] = {v: k for k, v in idalabel.items()}
else:
a__ : Dict = 125
a__ : Optional[int] = 6
a__ : Union[str, Any] = {
0: 'table',
1: 'table column',
2: 'table row',
3: 'table column header',
4: 'table projected row header',
5: 'table spanning cell',
}
a__ : List[str] = idalabel
a__ : Optional[int] = {v: k for k, v in idalabel.items()}
a__ : int = DetrImageProcessor(
format='coco_detection' , max_size=800 if 'detection' in checkpoint_url else 1000 )
a__ : Tuple = TableTransformerForObjectDetection(_lowerCamelCase )
model.load_state_dict(_lowerCamelCase )
model.eval()
# verify our conversion
a__ : Tuple = 'example_pdf.png' if 'detection' in checkpoint_url else 'example_table.png'
a__ : Any = hf_hub_download(repo_id='nielsr/example-pdf' , repo_type='dataset' , filename=_lowerCamelCase )
a__ : str = Image.open(_lowerCamelCase ).convert('RGB' )
a__ : Tuple = normalize(resize(_lowerCamelCase , _lowerCamelCase ) ).unsqueeze(0 )
a__ : int = model(_lowerCamelCase )
if "detection" in checkpoint_url:
a__ : List[Any] = (1, 15, 3)
a__ : List[str] = torch.tensor(
[[-6.78_97, -16.9985, 6.79_37], [-8.01_86, -22.2192, 6.96_77], [-7.31_17, -21.0708, 7.40_55]] )
a__ : str = torch.tensor([[0.48_67, 0.17_67, 0.67_32], [0.67_18, 0.44_79, 0.38_30], [0.47_16, 0.17_60, 0.63_64]] )
else:
a__ : Any = (1, 125, 7)
a__ : str = torch.tensor(
[[-18.1430, -8.32_14, 4.82_74], [-18.4685, -7.13_61, -4.26_67], [-26.3693, -9.34_29, -4.99_62]] )
a__ : Any = torch.tensor([[0.49_83, 0.55_95, 0.94_40], [0.49_16, 0.63_15, 0.59_54], [0.61_08, 0.86_37, 0.11_35]] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, :3, :3] , _lowerCamelCase , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , _lowerCamelCase , atol=1E-4 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(F'Saving PyTorch model and image processor to {pytorch_dump_folder_path}...' )
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
model.save_pretrained(_lowerCamelCase )
image_processor.save_pretrained(_lowerCamelCase )
if push_to_hub:
# Push model to HF hub
logger.info('Pushing model to the hub...' )
a__ : List[str] = (
'microsoft/table-transformer-detection'
if 'detection' in checkpoint_url
else 'microsoft/table-transformer-structure-recognition'
)
model.push_to_hub(_lowerCamelCase )
image_processor.push_to_hub(_lowerCamelCase )
if __name__ == "__main__":
lowercase : Dict = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_url""",
default="""https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth""",
type=str,
choices=[
"""https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth""",
"""https://pubtables1m.blob.core.windows.net/model/pubtables1m_structure_detr_r18.pth""",
],
help="""URL of the Table Transformer checkpoint you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
lowercase : Dict = parser.parse_args()
convert_table_transformer_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 302 |
'''simple docstring'''
from typing import Optional, Union
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models.modeling_utils import ModelMixin
class UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ):
@register_to_config
def __init__( self :List[str] , lowercase_ :int = 7_68 , )-> str:
super().__init__()
A__ = nn.Parameter(torch.zeros(1 , lowercase_ ) )
A__ = nn.Parameter(torch.ones(1 , lowercase_ ) )
def UpperCAmelCase_ ( self :List[Any] , lowercase_ :Optional[Union[str, torch.device]] = None , lowercase_ :Optional[torch.dtype] = None , )-> Tuple:
A__ = nn.Parameter(self.mean.to(lowercase_ ).to(lowercase_ ) )
A__ = nn.Parameter(self.std.to(lowercase_ ).to(lowercase_ ) )
return self
def UpperCAmelCase_ ( self :Optional[int] , lowercase_ :str )-> Union[str, Any]:
A__ = (embeds - self.mean) * 1.0 / self.std
return embeds
def UpperCAmelCase_ ( self :Any , lowercase_ :List[Any] )-> Tuple:
A__ = (embeds * self.std) + self.mean
return embeds
| 440 | 0 |
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import pyarrow as pa
if TYPE_CHECKING:
from .features import FeatureType
@dataclass
class UpperCamelCase_ :
'''simple docstring'''
UpperCAmelCase__ = 42
UpperCAmelCase__ = None
# Automatically constructed
UpperCAmelCase__ = "dict"
UpperCAmelCase__ = None
UpperCAmelCase__ = field(default='''Translation''' , init=UpperCAmelCase__ , repr=UpperCAmelCase__ )
def __call__( self : Optional[int]) ->str:
'''simple docstring'''
return pa.struct({lang: pa.string() for lang in sorted(self.languages)})
def SCREAMING_SNAKE_CASE ( self : List[Any]) ->Union["FeatureType", Dict[str, "FeatureType"]]:
'''simple docstring'''
from .features import Value
return {k: Value('''string''') for k in sorted(self.languages)}
@dataclass
class UpperCamelCase_ :
'''simple docstring'''
UpperCAmelCase__ = None
UpperCAmelCase__ = None
UpperCAmelCase__ = None
# Automatically constructed
UpperCAmelCase__ = "dict"
UpperCAmelCase__ = None
UpperCAmelCase__ = field(default='''TranslationVariableLanguages''' , init=UpperCAmelCase__ , repr=UpperCAmelCase__ )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->int:
'''simple docstring'''
A__ = sorted(set(self.languages)) if self.languages else None
A__ = len(self.languages) if self.languages else None
def __call__( self : int) ->List[str]:
'''simple docstring'''
return pa.struct({'''language''': pa.list_(pa.string()), '''translation''': pa.list_(pa.string())})
def SCREAMING_SNAKE_CASE ( self : List[Any] , UpperCAmelCase__ : Any) ->Optional[Any]:
'''simple docstring'''
A__ = set(self.languages)
if self.languages and set(UpperCAmelCase__) - lang_set:
raise ValueError(
f"""Some languages in example ({", ".join(sorted(set(UpperCAmelCase__) - lang_set))}) are not in valid set ({", ".join(UpperCAmelCase__)}).""")
# Convert dictionary into tuples, splitting out cases where there are
# multiple translations for a single language.
A__ = []
for lang, text in translation_dict.items():
if isinstance(UpperCAmelCase__ , UpperCAmelCase__):
translation_tuples.append((lang, text))
else:
translation_tuples.extend([(lang, el) for el in text])
# Ensure translations are in ascending order by language code.
A__ , A__ = zip(*sorted(UpperCAmelCase__))
return {"language": languages, "translation": translations}
def SCREAMING_SNAKE_CASE ( self : Tuple) ->Union["FeatureType", Dict[str, "FeatureType"]]:
'''simple docstring'''
from .features import Sequence, Value
return {
"language": Sequence(Value('''string''')),
"translation": Sequence(Value('''string''')),
}
| 177 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
if is_torch_available():
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
@require_torch
@require_sentencepiece
@require_tokenizers
class UpperCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Any:
'''simple docstring'''
A__ = AutoModelForSeqaSeqLM.from_pretrained('''google/mt5-small''' , return_dict=UpperCAmelCase__).to(UpperCAmelCase__)
A__ = AutoTokenizer.from_pretrained('''google/mt5-small''')
A__ = tokenizer('''Hello there''' , return_tensors='''pt''').input_ids
A__ = tokenizer('''Hi I am''' , return_tensors='''pt''').input_ids
A__ = model(input_ids.to(UpperCAmelCase__) , labels=labels.to(UpperCAmelCase__)).loss
A__ = -(labels.shape[-1] * loss.item())
A__ = -84.9127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE) < 1e-4)
| 177 | 1 |
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class A__ ( __SCREAMING_SNAKE_CASE ):
lowerCamelCase__ : Union[str, Any] =["image_processor", "tokenizer"]
lowerCamelCase__ : List[Any] ="LayoutLMv3ImageProcessor"
lowerCamelCase__ : str =("LayoutLMv3Tokenizer", "LayoutLMv3TokenizerFast")
def __init__( self , lowerCamelCase=None , lowerCamelCase=None , **lowerCamelCase ) -> int:
"""simple docstring"""
__magic_name__ : Union[str, Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , lowerCamelCase , )
__magic_name__ : Dict = kwargs.pop('''feature_extractor''' )
__magic_name__ : Dict = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(lowerCamelCase , lowerCamelCase )
def __call__( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = True , lowerCamelCase = False , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = 0 , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = False , lowerCamelCase = False , lowerCamelCase = False , lowerCamelCase = False , lowerCamelCase = True , lowerCamelCase = None , **lowerCamelCase , ) -> BatchEncoding:
"""simple docstring"""
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
'''You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True.''' )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
'''You cannot provide word labels if you initialized the image processor with apply_ocr set to True.''' )
# first, apply the image processor
__magic_name__ : int = self.image_processor(images=lowerCamelCase , return_tensors=lowerCamelCase )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(lowerCamelCase , lowerCamelCase ):
__magic_name__ : int = [text] # add batch dimension (as the image processor always adds a batch dimension)
__magic_name__ : Tuple = features['''words''']
__magic_name__ : Optional[Any] = self.tokenizer(
text=text if text is not None else features['''words'''] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features['''boxes'''] , word_labels=lowerCamelCase , add_special_tokens=lowerCamelCase , padding=lowerCamelCase , truncation=lowerCamelCase , max_length=lowerCamelCase , stride=lowerCamelCase , pad_to_multiple_of=lowerCamelCase , return_token_type_ids=lowerCamelCase , return_attention_mask=lowerCamelCase , return_overflowing_tokens=lowerCamelCase , return_special_tokens_mask=lowerCamelCase , return_offsets_mapping=lowerCamelCase , return_length=lowerCamelCase , verbose=lowerCamelCase , return_tensors=lowerCamelCase , **lowerCamelCase , )
# add pixel values
__magic_name__ : List[Any] = features.pop('''pixel_values''' )
if return_overflowing_tokens is True:
__magic_name__ : Optional[int] = self.get_overflowing_images(lowerCamelCase , encoded_inputs['''overflow_to_sample_mapping'''] )
__magic_name__ : Dict = images
return encoded_inputs
def lowercase ( self , lowerCamelCase , lowerCamelCase ) -> List[Any]:
"""simple docstring"""
__magic_name__ : Tuple = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(lowerCamelCase ) != len(lowerCamelCase ):
raise ValueError(
'''Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got'''
F''' {len(lowerCamelCase )} and {len(lowerCamelCase )}''' )
return images_with_overflow
def lowercase ( self , *lowerCamelCase , **lowerCamelCase ) -> str:
"""simple docstring"""
return self.tokenizer.batch_decode(*lowerCamelCase , **lowerCamelCase )
def lowercase ( self , *lowerCamelCase , **lowerCamelCase ) -> str:
"""simple docstring"""
return self.tokenizer.decode(*lowerCamelCase , **lowerCamelCase )
@property
def lowercase ( self ) -> List[Any]:
"""simple docstring"""
return ["input_ids", "bbox", "attention_mask", "pixel_values"]
@property
def lowercase ( self ) -> int:
"""simple docstring"""
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , lowerCamelCase , )
return self.image_processor_class
@property
def lowercase ( self ) -> Dict:
"""simple docstring"""
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , lowerCamelCase , )
return self.image_processor
| 154 |
lowercase_ = [4, 1, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
lowercase_ = [3, 7, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
lowercase_ = {
0: '''Sunday''',
1: '''Monday''',
2: '''Tuesday''',
3: '''Wednesday''',
4: '''Thursday''',
5: '''Friday''',
6: '''Saturday''',
}
def lowerCAmelCase ( UpperCAmelCase, UpperCAmelCase, UpperCAmelCase ) ->str:
"""simple docstring"""
assert len(str(UpperCAmelCase ) ) > 2, "year should be in YYYY format"
assert 1 <= month <= 12, "month should be between 1 to 12"
assert 1 <= day <= 31, "day should be between 1 to 31"
# Doomsday algorithm:
__magic_name__ : Optional[Any] = year // 100
__magic_name__ : Any = (5 * (century % 4) + 2) % 7
__magic_name__ : Any = year % 100
__magic_name__ : str = centurian % 12
__magic_name__ : Union[str, Any] = (
(centurian // 12) + centurian_m + (centurian_m // 4) + century_anchor
) % 7
__magic_name__ : Optional[Any] = (
DOOMSDAY_NOT_LEAP[month - 1]
if (year % 4 != 0) or (centurian == 0 and (year % 400) == 0)
else DOOMSDAY_LEAP[month - 1]
)
__magic_name__ : List[Any] = (dooms_day + day - day_anchor) % 7
return WEEK_DAY_NAMES[week_day]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 154 | 1 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_albert import AlbertTokenizer
else:
UpperCamelCase = None
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
UpperCamelCase = {
'vocab_file': {
'albert-base-v1': 'https://huggingface.co/albert-base-v1/resolve/main/spiece.model',
'albert-large-v1': 'https://huggingface.co/albert-large-v1/resolve/main/spiece.model',
'albert-xlarge-v1': 'https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model',
'albert-xxlarge-v1': 'https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model',
'albert-base-v2': 'https://huggingface.co/albert-base-v2/resolve/main/spiece.model',
'albert-large-v2': 'https://huggingface.co/albert-large-v2/resolve/main/spiece.model',
'albert-xlarge-v2': 'https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model',
'albert-xxlarge-v2': 'https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model',
},
'tokenizer_file': {
'albert-base-v1': 'https://huggingface.co/albert-base-v1/resolve/main/tokenizer.json',
'albert-large-v1': 'https://huggingface.co/albert-large-v1/resolve/main/tokenizer.json',
'albert-xlarge-v1': 'https://huggingface.co/albert-xlarge-v1/resolve/main/tokenizer.json',
'albert-xxlarge-v1': 'https://huggingface.co/albert-xxlarge-v1/resolve/main/tokenizer.json',
'albert-base-v2': 'https://huggingface.co/albert-base-v2/resolve/main/tokenizer.json',
'albert-large-v2': 'https://huggingface.co/albert-large-v2/resolve/main/tokenizer.json',
'albert-xlarge-v2': 'https://huggingface.co/albert-xlarge-v2/resolve/main/tokenizer.json',
'albert-xxlarge-v2': 'https://huggingface.co/albert-xxlarge-v2/resolve/main/tokenizer.json',
},
}
UpperCamelCase = {
'albert-base-v1': 512,
'albert-large-v1': 512,
'albert-xlarge-v1': 512,
'albert-xxlarge-v1': 512,
'albert-base-v2': 512,
'albert-large-v2': 512,
'albert-xlarge-v2': 512,
'albert-xxlarge-v2': 512,
}
UpperCamelCase = '▁'
class _a ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCamelCase_ : List[str] = VOCAB_FILES_NAMES
lowerCamelCase_ : Tuple = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase_ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase_ : int = AlbertTokenizer
def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase="[CLS]" , __UpperCAmelCase="[SEP]" , __UpperCAmelCase="<unk>" , __UpperCAmelCase="[SEP]" , __UpperCAmelCase="<pad>" , __UpperCAmelCase="[CLS]" , __UpperCAmelCase="[MASK]" , **__UpperCAmelCase , ):
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
__A : Tuple = (
AddedToken(_a , lstrip=_a , rstrip=_a , normalized=_a )
if isinstance(_a , _a )
else mask_token
)
super().__init__(
_a , tokenizer_file=_a , do_lower_case=_a , remove_space=_a , keep_accents=_a , bos_token=_a , eos_token=_a , unk_token=_a , sep_token=_a , pad_token=_a , cls_token=_a , mask_token=_a , **_a , )
__A : int = do_lower_case
__A : Tuple = remove_space
__A : Optional[Any] = keep_accents
__A : List[Any] = vocab_file
__A : int = False if not self.vocab_file else True
def __UpperCAmelCase( self , __UpperCAmelCase , __UpperCAmelCase = None ):
__A : Tuple = [self.sep_token_id]
__A : str = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def __UpperCAmelCase( self , __UpperCAmelCase , __UpperCAmelCase = None ):
__A : List[str] = [self.sep_token_id]
__A : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __UpperCAmelCase( self , __UpperCAmelCase , __UpperCAmelCase = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(_a ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
__A : Union[str, Any] = os.path.join(
_a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_a ):
copyfile(self.vocab_file , _a )
return (out_vocab_file,)
| 721 | from __future__ import annotations
from collections.abc import Callable
from typing import Any, Generic, TypeVar
UpperCamelCase = TypeVar('T')
class _a ( Generic[T] ):
'''simple docstring'''
def __init__( self , __UpperCAmelCase , __UpperCAmelCase ):
__A : Any | T = None
__A : int = len(__UpperCAmelCase )
__A : list[T] = [any_type for _ in range(self.N )] + arr
__A : Any = fnc
self.build()
def __UpperCAmelCase( self ):
for p in range(self.N - 1 , 0 , -1 ):
__A : Dict = self.fn(self.st[p * 2] , self.st[p * 2 + 1] )
def __UpperCAmelCase( self , __UpperCAmelCase , __UpperCAmelCase ):
p += self.N
__A : List[Any] = v
while p > 1:
__A : Optional[Any] = p // 2
__A : str = self.fn(self.st[p * 2] , self.st[p * 2 + 1] )
def __UpperCAmelCase( self , __UpperCAmelCase , __UpperCAmelCase ): # noqa: E741
__A , __A : Dict = l + self.N, r + self.N
__A : T | None = None
while l <= r:
if l % 2 == 1:
__A : Union[str, Any] = self.st[l] if res is None else self.fn(__UpperCAmelCase , self.st[l] )
if r % 2 == 0:
__A : Optional[Any] = self.st[r] if res is None else self.fn(__UpperCAmelCase , self.st[r] )
__A , __A : str = (l + 1) // 2, (r - 1) // 2
return res
if __name__ == "__main__":
from functools import reduce
UpperCamelCase = [1, 10, -2, 9, -3, 8, 4, -7, 5, 6, 11, -12]
UpperCamelCase = {
0: 7,
1: 2,
2: 6,
3: -14,
4: 5,
5: 4,
6: 7,
7: -10,
8: 9,
9: 10,
10: 12,
11: 1,
}
UpperCamelCase = SegmentTree(test_array, min)
UpperCamelCase = SegmentTree(test_array, max)
UpperCamelCase = SegmentTree(test_array, lambda a, b: a + b)
def lowerCamelCase_ ( ) -> None:
for i in range(len(_lowercase ) ):
for j in range(_lowercase , len(_lowercase ) ):
__A : Dict = reduce(_lowercase , test_array[i : j + 1] )
__A : int = reduce(_lowercase , test_array[i : j + 1] )
__A : Dict = reduce(lambda _lowercase , _lowercase : a + b , test_array[i : j + 1] )
assert min_range == min_segment_tree.query(_lowercase , _lowercase )
assert max_range == max_segment_tree.query(_lowercase , _lowercase )
assert sum_range == sum_segment_tree.query(_lowercase , _lowercase )
test_all_segments()
for index, value in test_updates.items():
UpperCamelCase = value
min_segment_tree.update(index, value)
max_segment_tree.update(index, value)
sum_segment_tree.update(index, value)
test_all_segments()
| 387 | 0 |
'''simple docstring'''
from math import ceil, sqrt
def A__ ( A : Any = 1_00_00_00):
'''simple docstring'''
UpperCamelCase : List[str] = 0
for outer_width in range(3 , (limit // 4) + 2):
if outer_width**2 > limit:
UpperCamelCase : Optional[Any] = max(ceil(sqrt(outer_width**2 - limit)) , 1)
else:
UpperCamelCase : str = 1
if (outer_width - hole_width_lower_bound) % 2:
hole_width_lower_bound += 1
answer += (outer_width - hole_width_lower_bound - 2) // 2 + 1
return answer
if __name__ == "__main__":
print(f"""{solution() = }""")
| 173 | from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_outputs import (
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import logging
from .configuration_regnet import RegNetConfig
_lowercase: Any = logging.get_logger(__name__)
# General docstring
_lowercase: List[Any] = '''RegNetConfig'''
# Base docstring
_lowercase: List[Any] = '''facebook/regnet-y-040'''
_lowercase: int = [1, 1_0_8_8, 7, 7]
# Image classification docstring
_lowercase: Union[str, Any] = '''facebook/regnet-y-040'''
_lowercase: Tuple = '''tabby, tabby cat'''
_lowercase: str = [
'''facebook/regnet-y-040''',
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class lowerCamelCase__ ( nn.Module ):
def __init__( self : Dict , lowercase__ : int , lowercase__ : int , lowercase__ : int = 3 , lowercase__ : int = 1 , lowercase__ : int = 1 , lowercase__ : Optional[str] = "relu" , ):
super().__init__()
_lowerCAmelCase = nn.Convad(
lowercase__ , lowercase__ , kernel_size=lowercase__ , stride=lowercase__ , padding=kernel_size // 2 , groups=lowercase__ , bias=lowercase__ , )
_lowerCAmelCase = nn.BatchNormad(lowercase__ )
_lowerCAmelCase = ACTaFN[activation] if activation is not None else nn.Identity()
def SCREAMING_SNAKE_CASE__ ( self : str , lowercase__ : Union[str, Any] ):
_lowerCAmelCase = self.convolution(lowercase__ )
_lowerCAmelCase = self.normalization(lowercase__ )
_lowerCAmelCase = self.activation(lowercase__ )
return hidden_state
class lowerCamelCase__ ( nn.Module ):
def __init__( self : int , lowercase__ : RegNetConfig ):
super().__init__()
_lowerCAmelCase = RegNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act )
_lowerCAmelCase = config.num_channels
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , lowercase__ : Optional[int] ):
_lowerCAmelCase = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
'Make sure that the channel dimension of the pixel values match with the one set in the configuration.' )
_lowerCAmelCase = self.embedder(lowercase__ )
return hidden_state
class lowerCamelCase__ ( nn.Module ):
def __init__( self : Optional[Any] , lowercase__ : int , lowercase__ : int , lowercase__ : int = 2 ):
super().__init__()
_lowerCAmelCase = nn.Convad(lowercase__ , lowercase__ , kernel_size=1 , stride=lowercase__ , bias=lowercase__ )
_lowerCAmelCase = nn.BatchNormad(lowercase__ )
def SCREAMING_SNAKE_CASE__ ( self : Tuple , lowercase__ : Tensor ):
_lowerCAmelCase = self.convolution(lowercase__ )
_lowerCAmelCase = self.normalization(lowercase__ )
return hidden_state
class lowerCamelCase__ ( nn.Module ):
def __init__( self : Optional[Any] , lowercase__ : int , lowercase__ : int ):
super().__init__()
_lowerCAmelCase = nn.AdaptiveAvgPoolad((1, 1) )
_lowerCAmelCase = nn.Sequential(
nn.Convad(lowercase__ , lowercase__ , kernel_size=1 ) , nn.ReLU() , nn.Convad(lowercase__ , lowercase__ , kernel_size=1 ) , nn.Sigmoid() , )
def SCREAMING_SNAKE_CASE__ ( self : Any , lowercase__ : List[Any] ):
# b c h w -> b c 1 1
_lowerCAmelCase = self.pooler(lowercase__ )
_lowerCAmelCase = self.attention(lowercase__ )
_lowerCAmelCase = hidden_state * attention
return hidden_state
class lowerCamelCase__ ( nn.Module ):
def __init__( self : Dict , lowercase__ : RegNetConfig , lowercase__ : int , lowercase__ : int , lowercase__ : int = 1 ):
super().__init__()
_lowerCAmelCase = in_channels != out_channels or stride != 1
_lowerCAmelCase = max(1 , out_channels // config.groups_width )
_lowerCAmelCase = (
RegNetShortCut(lowercase__ , lowercase__ , stride=lowercase__ ) if should_apply_shortcut else nn.Identity()
)
_lowerCAmelCase = nn.Sequential(
RegNetConvLayer(lowercase__ , lowercase__ , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(lowercase__ , lowercase__ , stride=lowercase__ , groups=lowercase__ , activation=config.hidden_act ) , RegNetConvLayer(lowercase__ , lowercase__ , kernel_size=1 , activation=lowercase__ ) , )
_lowerCAmelCase = ACTaFN[config.hidden_act]
def SCREAMING_SNAKE_CASE__ ( self : Dict , lowercase__ : Any ):
_lowerCAmelCase = hidden_state
_lowerCAmelCase = self.layer(lowercase__ )
_lowerCAmelCase = self.shortcut(lowercase__ )
hidden_state += residual
_lowerCAmelCase = self.activation(lowercase__ )
return hidden_state
class lowerCamelCase__ ( nn.Module ):
def __init__( self : Any , lowercase__ : RegNetConfig , lowercase__ : int , lowercase__ : int , lowercase__ : int = 1 ):
super().__init__()
_lowerCAmelCase = in_channels != out_channels or stride != 1
_lowerCAmelCase = max(1 , out_channels // config.groups_width )
_lowerCAmelCase = (
RegNetShortCut(lowercase__ , lowercase__ , stride=lowercase__ ) if should_apply_shortcut else nn.Identity()
)
_lowerCAmelCase = nn.Sequential(
RegNetConvLayer(lowercase__ , lowercase__ , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(lowercase__ , lowercase__ , stride=lowercase__ , groups=lowercase__ , activation=config.hidden_act ) , RegNetSELayer(lowercase__ , reduced_channels=int(round(in_channels / 4 ) ) ) , RegNetConvLayer(lowercase__ , lowercase__ , kernel_size=1 , activation=lowercase__ ) , )
_lowerCAmelCase = ACTaFN[config.hidden_act]
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , lowercase__ : Tuple ):
_lowerCAmelCase = hidden_state
_lowerCAmelCase = self.layer(lowercase__ )
_lowerCAmelCase = self.shortcut(lowercase__ )
hidden_state += residual
_lowerCAmelCase = self.activation(lowercase__ )
return hidden_state
class lowerCamelCase__ ( nn.Module ):
def __init__( self : Dict , lowercase__ : RegNetConfig , lowercase__ : int , lowercase__ : int , lowercase__ : int = 2 , lowercase__ : int = 2 , ):
super().__init__()
_lowerCAmelCase = RegNetXLayer if config.layer_type == 'x' else RegNetYLayer
_lowerCAmelCase = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(
lowercase__ , lowercase__ , lowercase__ , stride=lowercase__ , ) , *[layer(lowercase__ , lowercase__ , lowercase__ ) for _ in range(depth - 1 )] , )
def SCREAMING_SNAKE_CASE__ ( self : Tuple , lowercase__ : Any ):
_lowerCAmelCase = self.layers(lowercase__ )
return hidden_state
class lowerCamelCase__ ( nn.Module ):
def __init__( self : Dict , lowercase__ : RegNetConfig ):
super().__init__()
_lowerCAmelCase = nn.ModuleList([] )
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
RegNetStage(
lowercase__ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) )
_lowerCAmelCase = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(lowercase__ , config.depths[1:] ):
self.stages.append(RegNetStage(lowercase__ , lowercase__ , lowercase__ , depth=lowercase__ ) )
def SCREAMING_SNAKE_CASE__ ( self : List[str] , lowercase__ : Tensor , lowercase__ : bool = False , lowercase__ : bool = True ):
_lowerCAmelCase = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
_lowerCAmelCase = hidden_states + (hidden_state,)
_lowerCAmelCase = stage_module(lowercase__ )
if output_hidden_states:
_lowerCAmelCase = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=lowercase__ , hidden_states=lowercase__ )
class lowerCamelCase__ ( UpperCAmelCase ):
UpperCamelCase__ =RegNetConfig
UpperCamelCase__ ="regnet"
UpperCamelCase__ ="pixel_values"
UpperCamelCase__ =True
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , lowercase__ : List[Any] ):
if isinstance(lowercase__ , nn.Convad ):
nn.init.kaiming_normal_(module.weight , mode='fan_out' , nonlinearity='relu' )
elif isinstance(lowercase__ , (nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight , 1 )
nn.init.constant_(module.bias , 0 )
def SCREAMING_SNAKE_CASE__ ( self : Any , lowercase__ : List[str] , lowercase__ : List[Any]=False ):
if isinstance(lowercase__ , lowercase__ ):
_lowerCAmelCase = value
_lowercase: Optional[Any] = R'''
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
'''
_lowercase: str = R'''
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConvNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.
'''
@add_start_docstrings(
"The bare RegNet model outputting raw features without any specific head on top." ,UpperCAmelCase ,)
# Copied from transformers.models.resnet.modeling_resnet.ResNetModel with RESNET->REGNET,ResNet->RegNet
class lowerCamelCase__ ( UpperCAmelCase ):
def __init__( self : List[str] , lowercase__ : int ):
super().__init__(lowercase__ )
_lowerCAmelCase = config
_lowerCAmelCase = RegNetEmbeddings(lowercase__ )
_lowerCAmelCase = RegNetEncoder(lowercase__ )
_lowerCAmelCase = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowercase__ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowercase__ , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def SCREAMING_SNAKE_CASE__ ( self : Any , lowercase__ : Tensor , lowercase__ : Optional[bool] = None , lowercase__ : Optional[bool] = None ):
_lowerCAmelCase = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_lowerCAmelCase = return_dict if return_dict is not None else self.config.use_return_dict
_lowerCAmelCase = self.embedder(lowercase__ )
_lowerCAmelCase = self.encoder(
lowercase__ , output_hidden_states=lowercase__ , return_dict=lowercase__ )
_lowerCAmelCase = encoder_outputs[0]
_lowerCAmelCase = self.pooler(lowercase__ )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=lowercase__ , pooler_output=lowercase__ , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
"\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " ,UpperCAmelCase ,)
# Copied from transformers.models.resnet.modeling_resnet.ResNetForImageClassification with RESNET->REGNET,ResNet->RegNet,resnet->regnet
class lowerCamelCase__ ( UpperCAmelCase ):
def __init__( self : str , lowercase__ : Union[str, Any] ):
super().__init__(lowercase__ )
_lowerCAmelCase = config.num_labels
_lowerCAmelCase = RegNetModel(lowercase__ )
# classification head
_lowerCAmelCase = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowercase__ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowercase__ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def SCREAMING_SNAKE_CASE__ ( self : int , lowercase__ : Optional[torch.FloatTensor] = None , lowercase__ : Optional[torch.LongTensor] = None , lowercase__ : Optional[bool] = None , lowercase__ : Optional[bool] = None , ):
_lowerCAmelCase = return_dict if return_dict is not None else self.config.use_return_dict
_lowerCAmelCase = self.regnet(lowercase__ , output_hidden_states=lowercase__ , return_dict=lowercase__ )
_lowerCAmelCase = outputs.pooler_output if return_dict else outputs[1]
_lowerCAmelCase = self.classifier(lowercase__ )
_lowerCAmelCase = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
_lowerCAmelCase = 'regression'
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
_lowerCAmelCase = 'single_label_classification'
else:
_lowerCAmelCase = 'multi_label_classification'
if self.config.problem_type == "regression":
_lowerCAmelCase = MSELoss()
if self.num_labels == 1:
_lowerCAmelCase = loss_fct(logits.squeeze() , labels.squeeze() )
else:
_lowerCAmelCase = loss_fct(lowercase__ , lowercase__ )
elif self.config.problem_type == "single_label_classification":
_lowerCAmelCase = CrossEntropyLoss()
_lowerCAmelCase = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
_lowerCAmelCase = BCEWithLogitsLoss()
_lowerCAmelCase = loss_fct(lowercase__ , lowercase__ )
if not return_dict:
_lowerCAmelCase = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=lowercase__ , logits=lowercase__ , hidden_states=outputs.hidden_states )
| 192 | 0 |
from ....configuration_utils import PretrainedConfig
from ....utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {
"Visual-Attention-Network/van-base": (
"https://huggingface.co/Visual-Attention-Network/van-base/blob/main/config.json"
),
}
class __SCREAMING_SNAKE_CASE ( _a ):
snake_case : Dict = """van"""
def __init__( self , __lowerCAmelCase=224 , __lowerCAmelCase=3 , __lowerCAmelCase=[7, 3, 3, 3] , __lowerCAmelCase=[4, 2, 2, 2] , __lowerCAmelCase=[64, 128, 320, 512] , __lowerCAmelCase=[3, 3, 12, 3] , __lowerCAmelCase=[8, 8, 4, 4] , __lowerCAmelCase="gelu" , __lowerCAmelCase=0.02 , __lowerCAmelCase=1E-6 , __lowerCAmelCase=1E-2 , __lowerCAmelCase=0.0 , __lowerCAmelCase=0.0 , **__lowerCAmelCase , ):
super().__init__(**__lowerCAmelCase )
UpperCamelCase__ = image_size
UpperCamelCase__ = num_channels
UpperCamelCase__ = patch_sizes
UpperCamelCase__ = strides
UpperCamelCase__ = hidden_sizes
UpperCamelCase__ = depths
UpperCamelCase__ = mlp_ratios
UpperCamelCase__ = hidden_act
UpperCamelCase__ = initializer_range
UpperCamelCase__ = layer_norm_eps
UpperCamelCase__ = layer_scale_init_value
UpperCamelCase__ = drop_path_rate
UpperCamelCase__ = dropout_rate
| 548 |
import heapq
import sys
import numpy as np
UpperCamelCase__ = tuple[int, int]
class __SCREAMING_SNAKE_CASE :
def __init__( self ):
UpperCamelCase__ = []
UpperCamelCase__ = set()
def _lowerCamelCase ( self ):
if not self.empty():
return self.elements[0][0]
else:
return float("""inf""" )
def _lowerCamelCase ( self ):
return len(self.elements ) == 0
def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase ):
if item not in self.set:
heapq.heappush(self.elements , (priority, item) )
self.set.add(__lowerCAmelCase )
else:
# update
# print("update", item)
UpperCamelCase__ = []
((UpperCamelCase__) , (UpperCamelCase__)) = heapq.heappop(self.elements )
while x != item:
temp.append((pri, x) )
((UpperCamelCase__) , (UpperCamelCase__)) = heapq.heappop(self.elements )
temp.append((priority, item) )
for pro, xxx in temp:
heapq.heappush(self.elements , (pro, xxx) )
def _lowerCamelCase ( self , __lowerCAmelCase ):
if item in self.set:
self.set.remove(__lowerCAmelCase )
UpperCamelCase__ = []
((UpperCamelCase__) , (UpperCamelCase__)) = heapq.heappop(self.elements )
while x != item:
temp.append((pro, x) )
((UpperCamelCase__) , (UpperCamelCase__)) = heapq.heappop(self.elements )
for prito, yyy in temp:
heapq.heappush(self.elements , (prito, yyy) )
def _lowerCamelCase ( self ):
return self.elements[0][1]
def _lowerCamelCase ( self ):
((UpperCamelCase__) , (UpperCamelCase__)) = heapq.heappop(self.elements )
self.set.remove(__lowerCAmelCase )
return (priority, item)
def _UpperCamelCase (a__ :TPos , a__ :TPos ):
"""simple docstring"""
UpperCamelCase__ = np.array(a__ )
UpperCamelCase__ = np.array(a__ )
return np.linalg.norm(a - b )
def _UpperCamelCase (a__ :TPos , a__ :TPos ):
"""simple docstring"""
return consistent_heuristic(a__ , a__ ) // t
def _UpperCamelCase (a__ :TPos , a__ :TPos ):
"""simple docstring"""
return abs(p[0] - goal[0] ) + abs(p[1] - goal[1] )
def _UpperCamelCase (a__ :TPos , a__ :int , a__ :TPos , a__ :dict[TPos, float] ):
"""simple docstring"""
UpperCamelCase__ = g_function[start] + Wa * heuristics[i](a__ , a__ )
return ans
def _UpperCamelCase (a__ :Optional[Any] , a__ :Union[str, Any] , a__ :int ):
"""simple docstring"""
UpperCamelCase__ = np.chararray((n, n) )
for i in range(a__ ):
for j in range(a__ ):
UpperCamelCase__ = """*"""
for i in range(a__ ):
for j in range(a__ ):
if (j, (n - 1) - i) in blocks:
UpperCamelCase__ = """#"""
UpperCamelCase__ = """-"""
UpperCamelCase__ = back_pointer[goal]
while x != start:
((UpperCamelCase__) , (UpperCamelCase__)) = x
# print(x)
UpperCamelCase__ = """-"""
UpperCamelCase__ = back_pointer[x]
UpperCamelCase__ = """-"""
for i in range(a__ ):
for j in range(a__ ):
if (i, j) == (0, n - 1):
print(grid[i][j] , end=""" """ )
print("""<-- End position""" , end=""" """ )
else:
print(grid[i][j] , end=""" """ )
print()
print("""^""" )
print("""Start position""" )
print()
print("""# is an obstacle""" )
print("""- is the path taken by algorithm""" )
print("""PATH TAKEN BY THE ALGORITHM IS:-""" )
UpperCamelCase__ = back_pointer[goal]
while x != start:
print(a__ , end=""" """ )
UpperCamelCase__ = back_pointer[x]
print(a__ )
sys.exit()
def _UpperCamelCase (a__ :TPos ):
"""simple docstring"""
if p[0] < 0 or p[0] > n - 1:
return False
if p[1] < 0 or p[1] > n - 1:
return False
return True
def _UpperCamelCase (a__ :List[Any] , a__ :Tuple , a__ :Union[str, Any] , a__ :Any , a__ :str , a__ :Optional[Any] , a__ :List[Any] , a__ :List[str] , ):
"""simple docstring"""
for itera in range(a__ ):
open_list[itera].remove_element(a__ )
# print("s", s)
# print("j", j)
((UpperCamelCase__) , (UpperCamelCase__)) = s
UpperCamelCase__ = (x - 1, y)
UpperCamelCase__ = (x + 1, y)
UpperCamelCase__ = (x, y + 1)
UpperCamelCase__ = (x, y - 1)
for neighbours in [left, right, up, down]:
if neighbours not in blocks:
if valid(a__ ) and neighbours not in visited:
# print("neighbour", neighbours)
visited.add(a__ )
UpperCamelCase__ = -1
UpperCamelCase__ = float("""inf""" )
if valid(a__ ) and g_function[neighbours] > g_function[s] + 1:
UpperCamelCase__ = g_function[s] + 1
UpperCamelCase__ = s
if neighbours not in close_list_anchor:
open_list[0].put(a__ , key(a__ , 0 , a__ , a__ ) )
if neighbours not in close_list_inad:
for var in range(1 , a__ ):
if key(a__ , a__ , a__ , a__ ) <= Wa * key(
a__ , 0 , a__ , a__ ):
open_list[j].put(
a__ , key(a__ , a__ , a__ , a__ ) )
def _UpperCamelCase ():
"""simple docstring"""
UpperCamelCase__ = []
for x in range(1 , 5 ):
for y in range(1 , 6 ):
some_list.append((x, y) )
for x in range(15 , 20 ):
some_list.append((x, 17) )
for x in range(10 , 19 ):
for y in range(1 , 15 ):
some_list.append((x, y) )
# L block
for x in range(1 , 4 ):
for y in range(12 , 19 ):
some_list.append((x, y) )
for x in range(3 , 13 ):
for y in range(16 , 19 ):
some_list.append((x, y) )
return some_list
UpperCamelCase__ = {0: consistent_heuristic, 1: heuristic_a, 2: heuristic_a}
UpperCamelCase__ = [
(0, 1),
(1, 1),
(2, 1),
(3, 1),
(4, 1),
(5, 1),
(6, 1),
(7, 1),
(8, 1),
(9, 1),
(10, 1),
(11, 1),
(12, 1),
(13, 1),
(14, 1),
(15, 1),
(16, 1),
(17, 1),
(18, 1),
(19, 1),
]
UpperCamelCase__ = make_common_ground()
UpperCamelCase__ = blocks_blk
# hyper parameters
UpperCamelCase__ = 1
UpperCamelCase__ = 1
UpperCamelCase__ = 20
UpperCamelCase__ = 3 # one consistent and two other inconsistent
# start and end destination
UpperCamelCase__ = (0, 0)
UpperCamelCase__ = (n - 1, n - 1)
UpperCamelCase__ = 1
def _UpperCamelCase (a__ :TPos , a__ :TPos , a__ :int ):
"""simple docstring"""
UpperCamelCase__ = {start: 0, goal: float("""inf""" )}
UpperCamelCase__ = {start: -1, goal: -1}
UpperCamelCase__ = []
UpperCamelCase__ = set()
for i in range(a__ ):
open_list.append(PriorityQueue() )
open_list[i].put(a__ , key(a__ , a__ , a__ , a__ ) )
UpperCamelCase__ = []
UpperCamelCase__ = []
while open_list[0].minkey() < float("""inf""" ):
for i in range(1 , a__ ):
# print(open_list[0].minkey(), open_list[i].minkey())
if open_list[i].minkey() <= Wa * open_list[0].minkey():
global t
t += 1
if g_function[goal] <= open_list[i].minkey():
if g_function[goal] < float("""inf""" ):
do_something(a__ , a__ , a__ )
else:
UpperCamelCase__ , UpperCamelCase__ = open_list[i].top_show()
visited.add(a__ )
expand_state(
a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ , )
close_list_inad.append(a__ )
else:
if g_function[goal] <= open_list[0].minkey():
if g_function[goal] < float("""inf""" ):
do_something(a__ , a__ , a__ )
else:
UpperCamelCase__ = open_list[0].top_show()
visited.add(a__ )
expand_state(
a__ , 0 , a__ , a__ , a__ , a__ , a__ , a__ , )
close_list_anchor.append(a__ )
print("""No path found to goal""" )
print()
for i in range(n - 1 , -1 , -1 ):
for j in range(a__ ):
if (j, i) in blocks:
print("""#""" , end=""" """ )
elif (j, i) in back_pointer:
if (j, i) == (n - 1, n - 1):
print("""*""" , end=""" """ )
else:
print("""-""" , end=""" """ )
else:
print("""*""" , end=""" """ )
if (j, i) == (n - 1, n - 1):
print("""<-- End position""" , end=""" """ )
print()
print("""^""" )
print("""Start position""" )
print()
print("""# is an obstacle""" )
print("""- is the path taken by algorithm""" )
if __name__ == "__main__":
multi_a_star(start, goal, n_heuristic)
| 548 | 1 |
def _lowercase ( __SCREAMING_SNAKE_CASE ) -> list[int]:
UpperCamelCase__ : Union[str, Any] = len(__SCREAMING_SNAKE_CASE )
for i in range(__SCREAMING_SNAKE_CASE ):
for j in range(i + 1 , __SCREAMING_SNAKE_CASE ):
if numbers[j] < numbers[i]:
UpperCamelCase__ , UpperCamelCase__ : Optional[Any] = numbers[j], numbers[i]
return numbers
if __name__ == "__main__":
UpperCAmelCase__ : Any = input('''Enter numbers separated by a comma:\n''').strip()
UpperCAmelCase__ : List[Any] = [int(item) for item in user_input.split(''',''')]
print(exchange_sort(unsorted))
| 410 |
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
UpperCAmelCase__ : int = imread(r'''digital_image_processing/image_data/lena_small.jpg''')
UpperCAmelCase__ : Any = cvtColor(img, COLOR_BGR2GRAY)
def _lowercase ( ) -> Any:
UpperCamelCase__ : Optional[int] = cn.convert_to_negative(__SCREAMING_SNAKE_CASE )
# assert negative_img array for at least one True
assert negative_img.any()
def _lowercase ( ) -> Tuple:
with Image.open('digital_image_processing/image_data/lena_small.jpg' ) as img:
# Work around assertion for response
assert str(cc.change_contrast(__SCREAMING_SNAKE_CASE , 110 ) ).startswith(
'<PIL.Image.Image image mode=RGB size=100x100 at' )
def _lowercase ( ) -> str:
UpperCamelCase__ : str = canny.gen_gaussian_kernel(9 , sigma=1.4 )
# Assert ambiguous array
assert resp.all()
def _lowercase ( ) -> Tuple:
UpperCamelCase__ : Optional[Any] = imread('digital_image_processing/image_data/lena_small.jpg' , 0 )
# assert ambiguous array for all == True
assert canny_img.all()
UpperCamelCase__ : List[str] = canny.canny(__SCREAMING_SNAKE_CASE )
# assert canny array for at least one True
assert canny_array.any()
def _lowercase ( ) -> Dict:
assert gg.gaussian_filter(__SCREAMING_SNAKE_CASE , 5 , sigma=0.9 ).all()
def _lowercase ( ) -> Optional[int]:
# laplace diagonals
UpperCamelCase__ : Optional[int] = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] )
UpperCamelCase__ : Tuple = conv.img_convolve(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).astype(__SCREAMING_SNAKE_CASE )
assert res.any()
def _lowercase ( ) -> List[str]:
assert med.median_filter(__SCREAMING_SNAKE_CASE , 3 ).any()
def _lowercase ( ) -> List[str]:
UpperCamelCase__ , UpperCamelCase__ : int = sob.sobel_filter(__SCREAMING_SNAKE_CASE )
assert grad.any() and theta.any()
def _lowercase ( ) -> Optional[Any]:
UpperCamelCase__ : str = sp.make_sepia(__SCREAMING_SNAKE_CASE , 20 )
assert sepia.all()
def _lowercase ( __SCREAMING_SNAKE_CASE = "digital_image_processing/image_data/lena_small.jpg" ) -> Any:
UpperCamelCase__ : int = bs.Burkes(imread(__SCREAMING_SNAKE_CASE , 1 ) , 120 )
burkes.process()
assert burkes.output_img.any()
def _lowercase ( __SCREAMING_SNAKE_CASE = "digital_image_processing/image_data/lena_small.jpg" , ) -> List[Any]:
UpperCamelCase__ : Union[str, Any] = rs.NearestNeighbour(imread(__SCREAMING_SNAKE_CASE , 1 ) , 400 , 200 )
nn.process()
assert nn.output.any()
def _lowercase ( ) -> List[str]:
UpperCamelCase__ : Optional[Any] = 'digital_image_processing/image_data/lena.jpg'
# Reading the image and converting it to grayscale.
UpperCamelCase__ : List[str] = imread(__SCREAMING_SNAKE_CASE , 0 )
# Test for get_neighbors_pixel function() return not None
UpperCamelCase__ : List[str] = 0
UpperCamelCase__ : Optional[Any] = 0
UpperCamelCase__ : Dict = image[x_coordinate][y_coordinate]
UpperCamelCase__ : str = lbp.get_neighbors_pixel(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
UpperCamelCase__ : List[str] = np.zeros((image.shape[0], image.shape[1]) )
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0 , image.shape[0] ):
for j in range(0 , image.shape[1] ):
UpperCamelCase__ : Any = lbp.local_binary_value(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
assert lbp_image.any()
| 410 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
SCREAMING_SNAKE_CASE__ : Dict = {
"configuration_efficientformer": [
"EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"EfficientFormerConfig",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] = ["EfficientFormerImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : List[str] = [
"EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"EfficientFormerForImageClassification",
"EfficientFormerForImageClassificationWithTeacher",
"EfficientFormerModel",
"EfficientFormerPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] = [
"TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFEfficientFormerForImageClassification",
"TFEfficientFormerForImageClassificationWithTeacher",
"TFEfficientFormerModel",
"TFEfficientFormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_efficientformer import EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, EfficientFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientformer import EfficientFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientformer import (
EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientFormerForImageClassification,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerModel,
EfficientFormerPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
TFEfficientFormerPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 636 | import itertools
import random
import unittest
import numpy as np
from transformers import BatchFeature, SpeechTaFeatureExtractor
from transformers.testing_utils import require_torch
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
SCREAMING_SNAKE_CASE__ : Optional[Any] = random.Random()
def _a ( lowercase__ : List[str] , lowercase__ : List[Any]=1.0 , lowercase__ : Optional[int]=None , lowercase__ : List[str]=None ):
'''simple docstring'''
if rng is None:
SCREAMING_SNAKE_CASE__ : Optional[int] = global_rng
SCREAMING_SNAKE_CASE__ : Optional[Any] = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
class snake_case ( unittest.TestCase ):
def __init__( self : List[Any] , a_ : Optional[Any] , a_ : Union[str, Any]=7 , a_ : Any=400 , a_ : List[Any]=2000 , a_ : Tuple=1 , a_ : Optional[int]=0.0 , a_ : Optional[Any]=1_6000 , a_ : str=True , a_ : Union[str, Any]=80 , a_ : Dict=16 , a_ : Tuple=64 , a_ : Any="hann_window" , a_ : Union[str, Any]=80 , a_ : List[Any]=7600 , a_ : Optional[Any]=1e-1_0 , a_ : Dict=True , )-> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = parent
SCREAMING_SNAKE_CASE__ : List[Any] = batch_size
SCREAMING_SNAKE_CASE__ : str = min_seq_length
SCREAMING_SNAKE_CASE__ : Optional[int] = max_seq_length
SCREAMING_SNAKE_CASE__ : str = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
SCREAMING_SNAKE_CASE__ : int = feature_size
SCREAMING_SNAKE_CASE__ : str = padding_value
SCREAMING_SNAKE_CASE__ : Any = sampling_rate
SCREAMING_SNAKE_CASE__ : Optional[int] = do_normalize
SCREAMING_SNAKE_CASE__ : int = num_mel_bins
SCREAMING_SNAKE_CASE__ : int = hop_length
SCREAMING_SNAKE_CASE__ : str = win_length
SCREAMING_SNAKE_CASE__ : Optional[Any] = win_function
SCREAMING_SNAKE_CASE__ : List[str] = fmin
SCREAMING_SNAKE_CASE__ : Dict = fmax
SCREAMING_SNAKE_CASE__ : int = mel_floor
SCREAMING_SNAKE_CASE__ : Tuple = return_attention_mask
def __lowercase( self : Dict )-> Dict:
"""simple docstring"""
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"do_normalize": self.do_normalize,
"num_mel_bins": self.num_mel_bins,
"hop_length": self.hop_length,
"win_length": self.win_length,
"win_function": self.win_function,
"fmin": self.fmin,
"fmax": self.fmax,
"mel_floor": self.mel_floor,
"return_attention_mask": self.return_attention_mask,
}
def __lowercase( self : List[Any] , a_ : str=False , a_ : List[Any]=False )-> Optional[Any]:
"""simple docstring"""
def _flatten(a_ : int ):
return list(itertools.chain(*a_ ) )
if equal_length:
SCREAMING_SNAKE_CASE__ : Tuple = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
SCREAMING_SNAKE_CASE__ : Optional[int] = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
SCREAMING_SNAKE_CASE__ : int = [np.asarray(a_ ) for x in speech_inputs]
return speech_inputs
def __lowercase( self : Any , a_ : int=False , a_ : Any=False )-> Union[str, Any]:
"""simple docstring"""
if equal_length:
SCREAMING_SNAKE_CASE__ : str = [floats_list((self.max_seq_length, self.num_mel_bins) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
SCREAMING_SNAKE_CASE__ : Tuple = [
floats_list((x, self.num_mel_bins) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
SCREAMING_SNAKE_CASE__ : List[str] = [np.asarray(a_ ) for x in speech_inputs]
return speech_inputs
@require_torch
class snake_case ( UpperCamelCase_ , unittest.TestCase ):
lowercase_ = SpeechTaFeatureExtractor
def __lowercase( self : List[Any] )-> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = SpeechTaFeatureExtractionTester(self )
def __lowercase( self : Any , a_ : Optional[int] )-> List[str]:
"""simple docstring"""
self.assertTrue(np.all(np.mean(a_ , axis=0 ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(a_ , axis=0 ) - 1 ) < 1e-3 ) )
def __lowercase( self : Tuple )-> Dict:
"""simple docstring"""
# Tests that all call wrap to encode_plus and batch_encode_plus
SCREAMING_SNAKE_CASE__ : Optional[int] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
SCREAMING_SNAKE_CASE__ : str = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
SCREAMING_SNAKE_CASE__ : Optional[int] = [np.asarray(a_ ) for speech_input in speech_inputs]
# Test not batched input
SCREAMING_SNAKE_CASE__ : Union[str, Any] = feat_extract(speech_inputs[0] , return_tensors='np' ).input_values
SCREAMING_SNAKE_CASE__ : Union[str, Any] = feat_extract(np_speech_inputs[0] , return_tensors='np' ).input_values
self.assertTrue(np.allclose(a_ , a_ , atol=1e-3 ) )
# Test batched
SCREAMING_SNAKE_CASE__ : List[Any] = feat_extract(a_ , return_tensors='np' ).input_values
SCREAMING_SNAKE_CASE__ : List[str] = feat_extract(a_ , return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(a_ , a_ ):
self.assertTrue(np.allclose(a_ , a_ , atol=1e-3 ) )
def __lowercase( self : List[Any] )-> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
SCREAMING_SNAKE_CASE__ : Dict = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
SCREAMING_SNAKE_CASE__ : int = ['longest', 'max_length', 'do_not_pad']
SCREAMING_SNAKE_CASE__ : Tuple = [None, 1600, None]
for max_length, padding in zip(a_ , a_ ):
SCREAMING_SNAKE_CASE__ : str = feat_extract(a_ , padding=a_ , max_length=a_ , return_tensors='np' )
SCREAMING_SNAKE_CASE__ : Optional[int] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self.assertTrue(input_values[0][800:].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_values[1][:1000] )
self.assertTrue(input_values[0][1000:].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_values[2][:1200] )
def __lowercase( self : List[Any] )-> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
SCREAMING_SNAKE_CASE__ : List[Any] = range(800 , 1400 , 200 )
SCREAMING_SNAKE_CASE__ : int = [floats_list((1, x) )[0] for x in lengths]
SCREAMING_SNAKE_CASE__ : int = ['longest', 'max_length', 'do_not_pad']
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [None, 1600, None]
for max_length, padding in zip(a_ , a_ ):
SCREAMING_SNAKE_CASE__ : List[str] = feat_extract(a_ , max_length=a_ , padding=a_ )
SCREAMING_SNAKE_CASE__ : int = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self._check_zero_mean_unit_variance(input_values[1][:1000] )
self._check_zero_mean_unit_variance(input_values[2][:1200] )
def __lowercase( self : int )-> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
SCREAMING_SNAKE_CASE__ : Tuple = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
SCREAMING_SNAKE_CASE__ : Optional[int] = feat_extract(
a_ , truncation=a_ , max_length=1000 , padding='max_length' , return_tensors='np' )
SCREAMING_SNAKE_CASE__ : Optional[int] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1] )
self._check_zero_mean_unit_variance(input_values[2] )
def __lowercase( self : Optional[Any] )-> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
SCREAMING_SNAKE_CASE__ : Optional[int] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
SCREAMING_SNAKE_CASE__ : List[str] = feat_extract(
a_ , truncation=a_ , max_length=1000 , padding='longest' , return_tensors='np' )
SCREAMING_SNAKE_CASE__ : int = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertTrue(input_values.shape == (3, 1000) )
SCREAMING_SNAKE_CASE__ : Optional[int] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
SCREAMING_SNAKE_CASE__ : str = feat_extract(
a_ , truncation=a_ , max_length=2000 , padding='longest' , return_tensors='np' )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length > longest -> then pad to longest
self.assertTrue(input_values.shape == (3, 1200) )
def __lowercase( self : Any )-> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
SCREAMING_SNAKE_CASE__ : Optional[int] = np.random.rand(100 ).astype(np.floataa )
SCREAMING_SNAKE_CASE__ : int = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
SCREAMING_SNAKE_CASE__ : Any = feature_extractor.pad([{'input_values': inputs}] , return_tensors='np' )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
SCREAMING_SNAKE_CASE__ : Tuple = feature_extractor.pad([{'input_values': inputs}] , return_tensors='pt' )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def __lowercase( self : Any )-> Optional[int]:
"""simple docstring"""
# Tests that all call wrap to encode_plus and batch_encode_plus
SCREAMING_SNAKE_CASE__ : List[str] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
SCREAMING_SNAKE_CASE__ : List[Any] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
SCREAMING_SNAKE_CASE__ : Dict = [np.asarray(a_ ) for speech_input in speech_inputs]
# Test feature size
SCREAMING_SNAKE_CASE__ : Optional[int] = feature_extractor(audio_target=a_ , padding=a_ , return_tensors='np' ).input_values
self.assertTrue(input_values.ndim == 3 )
self.assertTrue(input_values.shape[-1] == feature_extractor.num_mel_bins )
# Test not batched input
SCREAMING_SNAKE_CASE__ : Tuple = feature_extractor(speech_inputs[0] , return_tensors='np' ).input_values
SCREAMING_SNAKE_CASE__ : int = feature_extractor(np_speech_inputs[0] , return_tensors='np' ).input_values
self.assertTrue(np.allclose(a_ , a_ , atol=1e-3 ) )
# Test batched
SCREAMING_SNAKE_CASE__ : Optional[Any] = feature_extractor(a_ , return_tensors='np' ).input_values
SCREAMING_SNAKE_CASE__ : Optional[Any] = feature_extractor(a_ , return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(a_ , a_ ):
self.assertTrue(np.allclose(a_ , a_ , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
SCREAMING_SNAKE_CASE__ : List[str] = [floats_list((1, x) )[0] for x in (800, 800, 800)]
SCREAMING_SNAKE_CASE__ : List[str] = np.asarray(a_ )
SCREAMING_SNAKE_CASE__ : Optional[int] = feature_extractor(a_ , return_tensors='np' ).input_values
SCREAMING_SNAKE_CASE__ : str = feature_extractor(a_ , return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(a_ , a_ ):
self.assertTrue(np.allclose(a_ , a_ , atol=1e-3 ) )
def __lowercase( self : Dict )-> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = self.feat_extract_tester.prepare_inputs_for_target()
SCREAMING_SNAKE_CASE__ : List[Any] = self.feature_extraction_class(**self.feat_extract_dict )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE__ : Any = BatchFeature({input_name: speech_inputs} )
self.assertTrue(all(len(a_ ) == len(a_ ) for x, y in zip(a_ , processed_features[input_name] ) ) )
SCREAMING_SNAKE_CASE__ : str = self.feat_extract_tester.prepare_inputs_for_target(equal_length=a_ )
SCREAMING_SNAKE_CASE__ : Dict = BatchFeature({input_name: speech_inputs} , tensor_type='np' )
SCREAMING_SNAKE_CASE__ : List[Any] = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
SCREAMING_SNAKE_CASE__ : int = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def __lowercase( self : List[Any] )-> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.feat_extract_tester.prepare_inputs_for_target(equal_length=a_ )
SCREAMING_SNAKE_CASE__ : Dict = self.feature_extraction_class(**self.feat_extract_dict )
SCREAMING_SNAKE_CASE__ : Optional[int] = feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE__ : Any = BatchFeature({input_name: speech_inputs} , tensor_type='pt' )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
SCREAMING_SNAKE_CASE__ : Optional[Any] = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def __lowercase( self : Tuple )-> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = self.feature_extraction_class(**self.feat_extract_dict )
SCREAMING_SNAKE_CASE__ : Dict = self.feat_extract_tester.prepare_inputs_for_target()
SCREAMING_SNAKE_CASE__ : str = feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE__ : List[Any] = BatchFeature({input_name: speech_inputs} )
SCREAMING_SNAKE_CASE__ : Optional[int] = feat_extract.num_mel_bins # hack!
SCREAMING_SNAKE_CASE__ : Optional[int] = feat_extract.pad(a_ , padding='longest' , return_tensors='np' )[input_name]
SCREAMING_SNAKE_CASE__ : Any = feat_extract.pad(a_ , padding='longest' , return_tensors='pt' )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1e-2 )
def __lowercase( self : Any )-> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = self.feat_extract_dict
SCREAMING_SNAKE_CASE__ : Optional[Any] = True
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.feature_extraction_class(**a_ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.feat_extract_tester.prepare_inputs_for_target()
SCREAMING_SNAKE_CASE__ : Any = [len(a_ ) for x in speech_inputs]
SCREAMING_SNAKE_CASE__ : Optional[int] = feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = BatchFeature({input_name: speech_inputs} )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = feat_extract.num_mel_bins # hack!
SCREAMING_SNAKE_CASE__ : Any = feat_extract.pad(a_ , padding='longest' , return_tensors='np' )
self.assertIn('attention_mask' , a_ )
self.assertListEqual(list(processed.attention_mask.shape ) , list(processed[input_name].shape[:2] ) )
self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() , a_ )
def __lowercase( self : str )-> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = self.feat_extract_dict
SCREAMING_SNAKE_CASE__ : Union[str, Any] = True
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.feature_extraction_class(**a_ )
SCREAMING_SNAKE_CASE__ : Optional[int] = self.feat_extract_tester.prepare_inputs_for_target()
SCREAMING_SNAKE_CASE__ : Tuple = [len(a_ ) for x in speech_inputs]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE__ : Dict = BatchFeature({input_name: speech_inputs} )
SCREAMING_SNAKE_CASE__ : str = min(a_ )
SCREAMING_SNAKE_CASE__ : Any = feat_extract.num_mel_bins # hack!
SCREAMING_SNAKE_CASE__ : int = feat_extract.pad(
a_ , padding='max_length' , max_length=a_ , truncation=a_ , return_tensors='np' )
self.assertIn('attention_mask' , a_ )
self.assertListEqual(
list(processed_pad.attention_mask.shape ) , [processed_pad[input_name].shape[0], max_length] )
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() , [max_length for x in speech_inputs] )
def __lowercase( self : Optional[int] , a_ : List[str] )-> Any:
"""simple docstring"""
from datasets import load_dataset
SCREAMING_SNAKE_CASE__ : int = load_dataset('hf-internal-testing/librispeech_asr_dummy' , 'clean' , split='validation' )
# automatic decoding with librispeech
SCREAMING_SNAKE_CASE__ : List[Any] = ds.sort('id' ).select(range(a_ ) )[:num_samples]['audio']
return [x["array"] for x in speech_samples]
def __lowercase( self : List[str] )-> List[Any]:
"""simple docstring"""
# fmt: off
SCREAMING_SNAKE_CASE__ : List[Any] = torch.tensor(
[2.3_8_0_4e-0_3, 2.0_7_5_2e-0_3, 1.9_8_3_6e-0_3, 2.1_0_5_7e-0_3, 1.6_1_7_4e-0_3,
3.0_5_1_8e-0_4, 9.1_5_5_3e-0_5, 3.3_5_6_9e-0_4, 9.7_6_5_6e-0_4, 1.8_3_1_1e-0_3,
2.0_1_4_2e-0_3, 2.1_0_5_7e-0_3, 1.7_3_9_5e-0_3, 4.5_7_7_6e-0_4, -3.9_6_7_3e-0_4,
4.5_7_7_6e-0_4, 1.0_0_7_1e-0_3, 9.1_5_5_3e-0_5, 4.8_8_2_8e-0_4, 1.1_5_9_7e-0_3,
7.3_2_4_2e-0_4, 9.4_6_0_4e-0_4, 1.8_0_0_5e-0_3, 1.8_3_1_1e-0_3, 8.8_5_0_1e-0_4,
4.2_7_2_5e-0_4, 4.8_8_2_8e-0_4, 7.3_2_4_2e-0_4, 1.0_9_8_6e-0_3, 2.1_0_5_7e-0_3] )
# fmt: on
SCREAMING_SNAKE_CASE__ : List[str] = self._load_datasamples(1 )
SCREAMING_SNAKE_CASE__ : Optional[Any] = SpeechTaFeatureExtractor()
SCREAMING_SNAKE_CASE__ : List[str] = feature_extractor(a_ , return_tensors='pt' ).input_values
self.assertEquals(input_values.shape , (1, 9_3680) )
self.assertTrue(torch.allclose(input_values[0, :30] , a_ , atol=1e-6 ) )
def __lowercase( self : Tuple )-> List[Any]:
"""simple docstring"""
# fmt: off
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.tensor(
[-2.6870, -3.0104, -3.1356, -3.5352, -3.0044, -3.0353, -3.4719, -3.6777,
-3.1520, -2.9435, -2.6553, -2.8795, -2.9944, -2.5921, -3.0279, -3.0386,
-3.0864, -3.1291, -3.2353, -2.7444, -2.6831, -2.7287, -3.1761, -3.1571,
-3.2726, -3.0582, -3.1007, -3.4533, -3.4695, -3.0998] )
# fmt: on
SCREAMING_SNAKE_CASE__ : Optional[Any] = self._load_datasamples(1 )
SCREAMING_SNAKE_CASE__ : int = SpeechTaFeatureExtractor()
SCREAMING_SNAKE_CASE__ : str = feature_extractor(audio_target=a_ , return_tensors='pt' ).input_values
self.assertEquals(input_values.shape , (1, 366, 80) )
self.assertTrue(torch.allclose(input_values[0, 0, :30] , a_ , atol=1e-4 ) )
| 636 | 1 |
'''simple docstring'''
import inspect
import os
import torch
from transformers import AutoModel
from transformers.testing_utils import mockenv_context
from transformers.trainer_utils import set_seed
import accelerate
from accelerate.accelerator import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils.testing import (
AccelerateTestCase,
TempDirTestCase,
execute_subprocess_async,
require_cuda,
require_fsdp,
require_multi_gpu,
slow,
)
from accelerate.utils.constants import (
FSDP_AUTO_WRAP_POLICY,
FSDP_BACKWARD_PREFETCH,
FSDP_SHARDING_STRATEGY,
FSDP_STATE_DICT_TYPE,
)
from accelerate.utils.dataclasses import FullyShardedDataParallelPlugin
from accelerate.utils.other import patch_environment
set_seed(42)
UpperCamelCase__ : Tuple = "bert-base-cased"
UpperCamelCase__ : Dict = "fp16"
UpperCamelCase__ : Tuple = "bf16"
UpperCamelCase__ : Optional[Any] = [FPaa, BFaa]
@require_fsdp
@require_cuda
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
def lowerCAmelCase__ ( self ) -> int:
'''simple docstring'''
super().setUp()
UpperCAmelCase__ : Union[str, Any] = dict(
ACCELERATE_USE_FSDP='''true''' ,MASTER_ADDR='''localhost''' ,MASTER_PORT='''10999''' ,RANK='''0''' ,LOCAL_RANK='''0''' ,WORLD_SIZE='''1''' ,)
def lowerCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
from torch.distributed.fsdp.fully_sharded_data_parallel import ShardingStrategy
for i, strategy in enumerate(__snake_case ):
UpperCAmelCase__ : List[str] = self.dist_env.copy()
UpperCAmelCase__ : int = f'''{i + 1}'''
UpperCAmelCase__ : Tuple = strategy
with mockenv_context(**__snake_case ):
UpperCAmelCase__ : Union[str, Any] = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.sharding_strategy ,ShardingStrategy(i + 1 ) )
def lowerCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
from torch.distributed.fsdp.fully_sharded_data_parallel import BackwardPrefetch
for i, prefetch_policy in enumerate(__snake_case ):
UpperCAmelCase__ : List[Any] = self.dist_env.copy()
UpperCAmelCase__ : List[Any] = prefetch_policy
with mockenv_context(**__snake_case ):
UpperCAmelCase__ : Union[str, Any] = FullyShardedDataParallelPlugin()
if prefetch_policy == "NO_PREFETCH":
self.assertIsNone(fsdp_plugin.backward_prefetch )
else:
self.assertEqual(fsdp_plugin.backward_prefetch ,BackwardPrefetch(i + 1 ) )
def lowerCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
for i, state_dict_type in enumerate(__snake_case ):
UpperCAmelCase__ : Any = self.dist_env.copy()
UpperCAmelCase__ : Any = state_dict_type
with mockenv_context(**__snake_case ):
UpperCAmelCase__ : str = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.state_dict_type ,StateDictType(i + 1 ) )
if state_dict_type == "FULL_STATE_DICT":
self.assertTrue(fsdp_plugin.state_dict_config.offload_to_cpu )
self.assertTrue(fsdp_plugin.state_dict_config.ranka_only )
def lowerCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase__ : List[Any] = AutoModel.from_pretrained(__snake_case )
for policy in FSDP_AUTO_WRAP_POLICY:
UpperCAmelCase__ : List[Any] = self.dist_env.copy()
UpperCAmelCase__ : Any = policy
if policy == "TRANSFORMER_BASED_WRAP":
UpperCAmelCase__ : Optional[int] = '''BertLayer'''
elif policy == "SIZE_BASED_WRAP":
UpperCAmelCase__ : List[str] = '''2000'''
with mockenv_context(**__snake_case ):
UpperCAmelCase__ : Union[str, Any] = FullyShardedDataParallelPlugin()
fsdp_plugin.set_auto_wrap_policy(__snake_case )
if policy == "NO_WRAP":
self.assertIsNone(fsdp_plugin.auto_wrap_policy )
else:
self.assertIsNotNone(fsdp_plugin.auto_wrap_policy )
UpperCAmelCase__ : Optional[Any] = self.dist_env.copy()
UpperCAmelCase__ : Union[str, Any] = '''TRANSFORMER_BASED_WRAP'''
UpperCAmelCase__ : Optional[int] = '''T5Layer'''
with mockenv_context(**__snake_case ):
UpperCAmelCase__ : Optional[Any] = FullyShardedDataParallelPlugin()
with self.assertRaises(__snake_case ) as cm:
fsdp_plugin.set_auto_wrap_policy(__snake_case )
self.assertTrue('''Could not find the transformer layer class to wrap in the model.''' in str(cm.exception ) )
UpperCAmelCase__ : Optional[Any] = self.dist_env.copy()
UpperCAmelCase__ : int = '''SIZE_BASED_WRAP'''
UpperCAmelCase__ : Any = '''0'''
with mockenv_context(**__snake_case ):
UpperCAmelCase__ : str = FullyShardedDataParallelPlugin()
fsdp_plugin.set_auto_wrap_policy(__snake_case )
self.assertIsNone(fsdp_plugin.auto_wrap_policy )
def lowerCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
from torch.distributed.fsdp.fully_sharded_data_parallel import MixedPrecision
from torch.distributed.fsdp.sharded_grad_scaler import ShardedGradScaler
for mp_dtype in dtypes:
UpperCAmelCase__ : int = self.dist_env.copy()
UpperCAmelCase__ : Tuple = mp_dtype
with mockenv_context(**__snake_case ):
UpperCAmelCase__ : Optional[Any] = Accelerator()
if mp_dtype == "fp16":
UpperCAmelCase__ : int = torch.floataa
elif mp_dtype == "bf16":
UpperCAmelCase__ : Any = torch.bfloataa
UpperCAmelCase__ : Tuple = MixedPrecision(param_dtype=__snake_case ,reduce_dtype=__snake_case ,buffer_dtype=__snake_case )
self.assertEqual(accelerator.state.fsdp_plugin.mixed_precision_policy ,__snake_case )
if mp_dtype == FPaa:
self.assertTrue(isinstance(accelerator.scaler ,__snake_case ) )
elif mp_dtype == BFaa:
self.assertIsNone(accelerator.scaler )
AcceleratorState._reset_state(__snake_case )
def lowerCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
from torch.distributed.fsdp.fully_sharded_data_parallel import CPUOffload
for flag in [True, False]:
UpperCAmelCase__ : Tuple = self.dist_env.copy()
UpperCAmelCase__ : Optional[int] = str(__snake_case ).lower()
with mockenv_context(**__snake_case ):
UpperCAmelCase__ : Optional[int] = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.cpu_offload ,CPUOffload(offload_params=__snake_case ) )
@require_fsdp
@require_multi_gpu
@slow
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
def lowerCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
super().setUp()
UpperCAmelCase__ : Any = 0.82
UpperCAmelCase__ : Any = [
'''fsdp_shard_grad_op_transformer_based_wrap''',
'''fsdp_full_shard_transformer_based_wrap''',
]
UpperCAmelCase__ : Union[str, Any] = {
'''multi_gpu_fp16''': 3200,
'''fsdp_shard_grad_op_transformer_based_wrap_fp16''': 2000,
'''fsdp_full_shard_transformer_based_wrap_fp16''': 1900,
# Disabling below test as it overwhelms the RAM memory usage
# on CI self-hosted runner leading to tests getting killed.
# "fsdp_full_shard_cpu_offload_transformer_based_wrap_fp32": 1500, # fp16 was leading to indefinite hang
}
UpperCAmelCase__ : Dict = 160
UpperCAmelCase__ : Tuple = 160
UpperCAmelCase__ : Tuple = inspect.getfile(accelerate.test_utils )
UpperCAmelCase__ : List[str] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''external_deps'''] )
def lowerCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase__ : List[Any] = os.path.join(self.test_scripts_folder ,'''test_performance.py''' )
UpperCAmelCase__ : List[Any] = ['''accelerate''', '''launch''', '''--num_processes=2''', '''--num_machines=1''', '''--machine_rank=0''', '''--use_fsdp''']
for config in self.performance_configs:
UpperCAmelCase__ : int = cmd.copy()
for i, strategy in enumerate(__snake_case ):
if strategy.lower() in config:
cmd_config.append(f'''--fsdp_sharding_strategy={i+1}''' )
break
if "fp32" in config:
cmd_config.append('''--mixed_precision=no''' )
else:
cmd_config.append('''--mixed_precision=fp16''' )
if "cpu_offload" in config:
cmd_config.append('''--fsdp_offload_params=True''' )
for policy in FSDP_AUTO_WRAP_POLICY:
if policy.lower() in config:
cmd_config.append(f'''--fsdp_auto_wrap_policy={policy}''' )
break
if policy == "TRANSFORMER_BASED_WRAP":
cmd_config.append('''--fsdp_transformer_layer_cls_to_wrap=BertLayer''' )
elif policy == "SIZE_BASED_WRAP":
cmd_config.append('''--fsdp_min_num_params=2000''' )
cmd_config.extend(
[
self.test_file_path,
f'''--output_dir={self.tmpdir}''',
f'''--performance_lower_bound={self.performance_lower_bound}''',
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__snake_case ,env=os.environ.copy() )
def lowerCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
UpperCAmelCase__ : Any = os.path.join(self.test_scripts_folder ,'''test_checkpointing.py''' )
UpperCAmelCase__ : Any = [
'''accelerate''',
'''launch''',
'''--num_processes=2''',
'''--num_machines=1''',
'''--machine_rank=0''',
'''--use_fsdp''',
'''--mixed_precision=fp16''',
'''--fsdp_transformer_layer_cls_to_wrap=BertLayer''',
]
for i, strategy in enumerate(__snake_case ):
UpperCAmelCase__ : Optional[int] = cmd.copy()
cmd_config.append(f'''--fsdp_sharding_strategy={i+1}''' )
if strategy != "FULL_SHARD":
continue
UpperCAmelCase__ : Optional[int] = len(__snake_case )
for state_dict_type in FSDP_STATE_DICT_TYPE:
UpperCAmelCase__ : List[Any] = cmd_config[:state_dict_config_index]
cmd_config.append(f'''--fsdp_state_dict_type={state_dict_type}''' )
cmd_config.extend(
[
self.test_file_path,
f'''--output_dir={self.tmpdir}''',
'''--partial_train_epoch=1''',
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__snake_case ,env=os.environ.copy() )
UpperCAmelCase__ : Tuple = cmd_config[:-1]
UpperCAmelCase__ : Optional[Any] = os.path.join(self.tmpdir ,'''epoch_0''' )
cmd_config.extend(
[
f'''--resume_from_checkpoint={resume_from_checkpoint}''',
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__snake_case ,env=os.environ.copy() )
def lowerCAmelCase__ ( self ) -> Any:
'''simple docstring'''
UpperCAmelCase__ : List[str] = os.path.join(self.test_scripts_folder ,'''test_peak_memory_usage.py''' )
UpperCAmelCase__ : Any = [
'''accelerate''',
'''launch''',
'''--num_processes=2''',
'''--num_machines=1''',
'''--machine_rank=0''',
]
for spec, peak_mem_upper_bound in self.peak_memory_usage_upper_bound.items():
UpperCAmelCase__ : Dict = cmd.copy()
if "fp16" in spec:
cmd_config.extend(['''--mixed_precision=fp16'''] )
else:
cmd_config.extend(['''--mixed_precision=no'''] )
if "multi_gpu" in spec:
continue
else:
cmd_config.extend(['''--use_fsdp'''] )
for i, strategy in enumerate(__snake_case ):
if strategy.lower() in spec:
cmd_config.append(f'''--fsdp_sharding_strategy={i+1}''' )
break
if "cpu_offload" in spec:
cmd_config.append('''--fsdp_offload_params=True''' )
for policy in FSDP_AUTO_WRAP_POLICY:
if policy.lower() in spec:
cmd_config.append(f'''--fsdp_auto_wrap_policy={policy}''' )
break
if policy == "TRANSFORMER_BASED_WRAP":
cmd_config.append('''--fsdp_transformer_layer_cls_to_wrap=BertLayer''' )
elif policy == "SIZE_BASED_WRAP":
cmd_config.append('''--fsdp_min_num_params=2000''' )
cmd_config.extend(
[
self.test_file_path,
f'''--output_dir={self.tmpdir}''',
f'''--peak_memory_upper_bound={peak_mem_upper_bound}''',
f'''--n_train={self.n_train}''',
f'''--n_val={self.n_val}''',
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__snake_case ,env=os.environ.copy() )
| 614 |
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import BatchEncoding, PreTrainedTokenizer
from ...utils import logging
_lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__)
_lowerCAmelCase : List[Any] = "▁"
_lowerCAmelCase : Optional[int] = {
"vocab_file": "vocab.json",
"spm_file": "sentencepiece.bpe.model",
"tokenizer_config_file": "tokenizer_config.json",
}
_lowerCAmelCase : Tuple = {
"vocab_file": {
"facebook/m2m100_418M": "https://huggingface.co/facebook/m2m100_418M/resolve/main/vocab.json",
"facebook/m2m100_1.2B": "https://huggingface.co/facebook/m2m100_1.2B/resolve/main/vocab.json",
},
"spm_file": {
"facebook/m2m100_418M": "https://huggingface.co/facebook/m2m100_418M/resolve/main/sentencepiece.bpe.model",
"facebook/m2m100_1.2B": "https://huggingface.co/facebook/m2m100_1.2B/resolve/main/sentencepiece.bpe.model",
},
"tokenizer_config_file": {
"facebook/m2m100_418M": "https://huggingface.co/facebook/m2m100_418M/resolve/main/tokenizer_config.json",
"facebook/m2m100_1.2B": "https://huggingface.co/facebook/m2m100_1.2B/resolve/main/tokenizer_config.json",
},
}
_lowerCAmelCase : Optional[Any] = {
"facebook/m2m100_418M": 1_024,
}
# fmt: off
_lowerCAmelCase : Any = {
"m2m100": ["af", "am", "ar", "ast", "az", "ba", "be", "bg", "bn", "br", "bs", "ca", "ceb", "cs", "cy", "da", "de", "el", "en", "es", "et", "fa", "ff", "fi", "fr", "fy", "ga", "gd", "gl", "gu", "ha", "he", "hi", "hr", "ht", "hu", "hy", "id", "ig", "ilo", "is", "it", "ja", "jv", "ka", "kk", "km", "kn", "ko", "lb", "lg", "ln", "lo", "lt", "lv", "mg", "mk", "ml", "mn", "mr", "ms", "my", "ne", "nl", "no", "ns", "oc", "or", "pa", "pl", "ps", "pt", "ro", "ru", "sd", "si", "sk", "sl", "so", "sq", "sr", "ss", "su", "sv", "sw", "ta", "th", "tl", "tn", "tr", "uk", "ur", "uz", "vi", "wo", "xh", "yi", "yo", "zh", "zu"],
"wmt21": ["en", "ha", "is", "ja", "cs", "ru", "zh", "de"]
}
class __magic_name__ ( lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE = ['input_ids', 'attention_mask']
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = []
def __init__( self , __snake_case , __snake_case , __snake_case=None , __snake_case=None , __snake_case="<s>" , __snake_case="</s>" , __snake_case="</s>" , __snake_case="<pad>" , __snake_case="<unk>" , __snake_case="m2m100" , __snake_case = None , __snake_case=8 , **__snake_case , ) -> None:
'''simple docstring'''
__a ={} if sp_model_kwargs is None else sp_model_kwargs
__a =language_codes
__a =FAIRSEQ_LANGUAGE_CODES[language_codes]
__a ={lang_code: f'__{lang_code}__' for lang_code in fairseq_language_code}
__a =kwargs.get('additional_special_tokens' , [] )
kwargs["additional_special_tokens"] += [
self.get_lang_token(__snake_case )
for lang_code in fairseq_language_code
if self.get_lang_token(__snake_case ) not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=__snake_case , tgt_lang=__snake_case , bos_token=__snake_case , eos_token=__snake_case , sep_token=__snake_case , unk_token=__snake_case , pad_token=__snake_case , language_codes=__snake_case , sp_model_kwargs=self.sp_model_kwargs , num_madeup_words=__snake_case , **__snake_case , )
__a =vocab_file
__a =load_json(__snake_case )
__a ={v: k for k, v in self.encoder.items()}
__a =spm_file
__a =load_spm(__snake_case , self.sp_model_kwargs )
__a =len(self.encoder )
__a ={
self.get_lang_token(__snake_case ): self.encoder_size + i for i, lang_code in enumerate(__snake_case )
}
__a ={lang_code: self.encoder_size + i for i, lang_code in enumerate(__snake_case )}
__a ={v: k for k, v in self.lang_token_to_id.items()}
__a =src_lang if src_lang is not None else 'en'
__a =tgt_lang
__a =self.get_lang_id(self._src_lang )
self.set_src_lang_special_tokens(self._src_lang )
__a =num_madeup_words
@property
def __magic_name__ ( self ) -> int:
'''simple docstring'''
return len(self.encoder ) + len(self.lang_token_to_id )
@property
def __magic_name__ ( self ) -> str:
'''simple docstring'''
return self._src_lang
@src_lang.setter
def __magic_name__ ( self , __snake_case ) -> None:
'''simple docstring'''
__a =new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __magic_name__ ( self , __snake_case ) -> List[str]:
'''simple docstring'''
return self.sp_model.encode(__snake_case , out_type=__snake_case )
def __magic_name__ ( self , __snake_case ) -> str:
'''simple docstring'''
if token in self.lang_token_to_id:
return self.lang_token_to_id[token]
return self.encoder.get(__snake_case , self.encoder[self.unk_token] )
def __magic_name__ ( self , __snake_case ) -> str:
'''simple docstring'''
if index in self.id_to_lang_token:
return self.id_to_lang_token[index]
return self.decoder.get(__snake_case , self.unk_token )
def __magic_name__ ( self , __snake_case ) -> str:
'''simple docstring'''
__a =[]
__a =''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(__snake_case ) + token
__a =[]
else:
current_sub_tokens.append(__snake_case )
out_string += self.sp_model.decode(__snake_case )
return out_string.strip()
def __magic_name__ ( self , __snake_case , __snake_case = None , __snake_case = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__snake_case , token_ids_a=__snake_case , already_has_special_tokens=__snake_case )
__a =[1] * len(self.prefix_tokens )
__a =[1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(__snake_case )) + suffix_ones
return prefix_ones + ([0] * len(__snake_case )) + ([0] * len(__snake_case )) + suffix_ones
def __magic_name__ ( self , __snake_case , __snake_case = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def __magic_name__ ( self ) -> Dict:
'''simple docstring'''
__a ={self.convert_ids_to_tokens(__snake_case ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> Dict:
'''simple docstring'''
__a =self.__dict__.copy()
__a =None
return state
def __setstate__( self , __snake_case ) -> None:
'''simple docstring'''
__a =d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
__a ={}
__a =load_spm(self.spm_file , self.sp_model_kwargs )
def __magic_name__ ( self , __snake_case , __snake_case = None ) -> Tuple[str]:
'''simple docstring'''
__a =Path(__snake_case )
if not save_dir.is_dir():
raise OSError(f'{save_directory} should be a directory' )
__a =save_dir / (
(filename_prefix + '-' if filename_prefix else '') + self.vocab_files_names['vocab_file']
)
__a =save_dir / (
(filename_prefix + '-' if filename_prefix else '') + self.vocab_files_names['spm_file']
)
save_json(self.encoder , __snake_case )
if os.path.abspath(self.spm_file ) != os.path.abspath(__snake_case ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , __snake_case )
elif not os.path.isfile(self.spm_file ):
with open(__snake_case , 'wb' ) as fi:
__a =self.sp_model.serialized_model_proto()
fi.write(__snake_case )
return (str(__snake_case ), str(__snake_case ))
def __magic_name__ ( self , __snake_case , __snake_case = "en" , __snake_case = None , __snake_case = "ro" , **__snake_case , ) -> BatchEncoding:
'''simple docstring'''
__a =src_lang
__a =tgt_lang
self.set_src_lang_special_tokens(self.src_lang )
return super().prepare_seqaseq_batch(__snake_case , __snake_case , **__snake_case )
def __magic_name__ ( self , __snake_case , __snake_case , __snake_case , **__snake_case ) -> Optional[Any]:
'''simple docstring'''
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' )
__a =src_lang
__a =self(__snake_case , add_special_tokens=__snake_case , **__snake_case )
__a =self.get_lang_id(__snake_case )
__a =tgt_lang_id
return inputs
def __magic_name__ ( self ) -> Any:
'''simple docstring'''
self.set_src_lang_special_tokens(self.src_lang )
def __magic_name__ ( self ) -> Tuple:
'''simple docstring'''
self.set_tgt_lang_special_tokens(self.tgt_lang )
def __magic_name__ ( self , __snake_case ) -> None:
'''simple docstring'''
__a =self.get_lang_token(__snake_case )
__a =self.lang_token_to_id[lang_token]
__a =[self.cur_lang_id]
__a =[self.eos_token_id]
def __magic_name__ ( self , __snake_case ) -> None:
'''simple docstring'''
__a =self.get_lang_token(__snake_case )
__a =self.lang_token_to_id[lang_token]
__a =[self.cur_lang_id]
__a =[self.eos_token_id]
def __magic_name__ ( self , __snake_case ) -> str:
'''simple docstring'''
return self.lang_code_to_token[lang]
def __magic_name__ ( self , __snake_case ) -> int:
'''simple docstring'''
__a =self.get_lang_token(__snake_case )
return self.lang_token_to_id[lang_token]
def UpperCamelCase_( _snake_case : str , _snake_case : Dict[str, Any] ):
"""simple docstring"""
__a =sentencepiece.SentencePieceProcessor(**_snake_case )
spm.Load(str(_snake_case ) )
return spm
def UpperCamelCase_( _snake_case : str ):
"""simple docstring"""
with open(_snake_case , 'r' ) as f:
return json.load(_snake_case )
def UpperCamelCase_( _snake_case : str , _snake_case : str ):
"""simple docstring"""
with open(_snake_case , 'w' ) as f:
json.dump(_snake_case , _snake_case , indent=2 )
| 242 | 0 |
'''simple docstring'''
from __future__ import annotations
def UpperCamelCase__ ( _lowercase : List[Any] , _lowercase : Any = None ) -> list[list[str]]:
__UpperCAmelCase: List[str] = word_bank or []
# create a table
__UpperCAmelCase: int = len(__lowerCAmelCase ) + 1
__UpperCAmelCase: list[list[list[str]]] = []
for _ in range(__lowerCAmelCase ):
table.append([] )
# seed value
__UpperCAmelCase: Union[str, Any] = [[]] # because empty string has empty combination
# iterate through the indices
for i in range(__lowerCAmelCase ):
# condition
if table[i] != []:
for word in word_bank:
# slice condition
if target[i : i + len(__lowerCAmelCase )] == word:
__UpperCAmelCase: list[list[str]] = [
[word, *way] for way in table[i]
]
# adds the word to every combination the current position holds
# now,push that combination to the table[i+len(word)]
table[i + len(__lowerCAmelCase )] += new_combinations
# combinations are in reverse order so reverse for better output
for combination in table[len(__lowerCAmelCase )]:
combination.reverse()
return table[len(__lowerCAmelCase )]
if __name__ == "__main__":
print(all_construct('jwajalapa', ['jwa', 'j', 'w', 'a', 'la', 'lapa']))
print(all_construct('rajamati', ['s', 'raj', 'amat', 'raja', 'ma', 'i', 't']))
print(
all_construct(
'hexagonosaurus',
['h', 'ex', 'hex', 'ag', 'ago', 'ru', 'auru', 'rus', 'go', 'no', 'o', 's'],
)
)
| 712 | '''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import PoolFormerImageProcessor
class a ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , snake_case_ , snake_case_=7 , snake_case_=3 , snake_case_=30 , snake_case_=400 , snake_case_=True , snake_case_=None , snake_case_=0.9 , snake_case_=None , snake_case_=True , snake_case_=[0.5, 0.5, 0.5] , snake_case_=[0.5, 0.5, 0.5] , ):
'''simple docstring'''
__UpperCAmelCase: List[Any] = size if size is not None else {"""shortest_edge""": 30}
__UpperCAmelCase: str = crop_size if crop_size is not None else {"""height""": 30, """width""": 30}
__UpperCAmelCase: Dict = parent
__UpperCAmelCase: Union[str, Any] = batch_size
__UpperCAmelCase: List[Any] = num_channels
__UpperCAmelCase: List[str] = min_resolution
__UpperCAmelCase: List[str] = max_resolution
__UpperCAmelCase: Tuple = do_resize_and_center_crop
__UpperCAmelCase: Union[str, Any] = size
__UpperCAmelCase: str = crop_pct
__UpperCAmelCase: List[Any] = crop_size
__UpperCAmelCase: str = do_normalize
__UpperCAmelCase: Union[str, Any] = image_mean
__UpperCAmelCase: List[str] = image_std
def lowercase_ ( self ):
'''simple docstring'''
return {
"size": self.size,
"do_resize_and_center_crop": self.do_resize_and_center_crop,
"crop_pct": self.crop_pct,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class a ( __lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
__lowerCAmelCase = PoolFormerImageProcessor if is_vision_available() else None
def lowercase_ ( self ):
'''simple docstring'''
__UpperCAmelCase: Any = PoolFormerImageProcessingTester(self )
@property
def lowercase_ ( self ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase_ ( self ):
'''simple docstring'''
__UpperCAmelCase: Optional[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(snake_case_ , """do_resize_and_center_crop""" ) )
self.assertTrue(hasattr(snake_case_ , """size""" ) )
self.assertTrue(hasattr(snake_case_ , """crop_pct""" ) )
self.assertTrue(hasattr(snake_case_ , """do_normalize""" ) )
self.assertTrue(hasattr(snake_case_ , """image_mean""" ) )
self.assertTrue(hasattr(snake_case_ , """image_std""" ) )
def lowercase_ ( self ):
'''simple docstring'''
__UpperCAmelCase: List[str] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 30} )
self.assertEqual(image_processor.crop_size , {"""height""": 30, """width""": 30} )
__UpperCAmelCase: Dict = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42} )
self.assertEqual(image_processor.crop_size , {"""height""": 84, """width""": 84} )
def lowercase_ ( self ):
'''simple docstring'''
pass
def lowercase_ ( self ):
'''simple docstring'''
__UpperCAmelCase: Any = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__UpperCAmelCase: Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case_ )
for image in image_inputs:
self.assertIsInstance(snake_case_ , Image.Image )
# Test not batched input
__UpperCAmelCase: Optional[int] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
__UpperCAmelCase: Any = image_processing(snake_case_ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def lowercase_ ( self ):
'''simple docstring'''
__UpperCAmelCase: Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__UpperCAmelCase: Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case_ , numpify=snake_case_ )
for image in image_inputs:
self.assertIsInstance(snake_case_ , np.ndarray )
# Test not batched input
__UpperCAmelCase: Union[str, Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
__UpperCAmelCase: str = image_processing(snake_case_ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def lowercase_ ( self ):
'''simple docstring'''
__UpperCAmelCase: Any = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__UpperCAmelCase: Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case_ , torchify=snake_case_ )
for image in image_inputs:
self.assertIsInstance(snake_case_ , torch.Tensor )
# Test not batched input
__UpperCAmelCase: Optional[int] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
__UpperCAmelCase: Optional[int] = image_processing(snake_case_ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , ) | 466 | 0 |
'''simple docstring'''
import os
import pytest
import yaml
from datasets.features.features import Features, Value
from datasets.info import DatasetInfo, DatasetInfosDict
@pytest.mark.parametrize(
'''files''' , [
['''full:README.md''', '''dataset_infos.json'''],
['''empty:README.md''', '''dataset_infos.json'''],
['''dataset_infos.json'''],
['''full:README.md'''],
] , )
def lowerCamelCase_ ( A_ , A_ ):
__lowerCamelCase = tmp_path_factory.mktemp('''dset_infos_dir''' )
if "full:README.md" in files:
with open(dataset_infos_dir / '''README.md''' , '''w''' ) as f:
f.write('''---\ndataset_info:\n dataset_size: 42\n---''' )
if "empty:README.md" in files:
with open(dataset_infos_dir / '''README.md''' , '''w''' ) as f:
f.write('''''' )
# we want to support dataset_infos.json for backward compatibility
if "dataset_infos.json" in files:
with open(dataset_infos_dir / '''dataset_infos.json''' , '''w''' ) as f:
f.write('''{"default": {"dataset_size": 42}}''' )
__lowerCamelCase = DatasetInfosDict.from_directory(A_ )
assert dataset_infos
assert dataset_infos["default"].dataset_size == 42
@pytest.mark.parametrize(
'''dataset_info''' , [
DatasetInfo(),
DatasetInfo(
description='''foo''' , features=Features({'''a''': Value('''int32''' )} ) , builder_name='''builder''' , config_name='''config''' , version='''1.0.0''' , splits=[{'''name''': '''train'''}] , download_size=42 , ),
] , )
def lowerCamelCase_ ( A_ , A_ ):
__lowerCamelCase = str(A_ )
dataset_info.write_to_directory(A_ )
__lowerCamelCase = DatasetInfo.from_directory(A_ )
assert dataset_info == reloaded
assert os.path.exists(os.path.join(A_ , '''dataset_info.json''' ) )
def lowerCamelCase_ ( ):
__lowerCamelCase = DatasetInfo(
description='''foo''' , citation='''bar''' , homepage='''https://foo.bar''' , license='''CC0''' , features=Features({'''a''': Value('''int32''' )} ) , post_processed={} , supervised_keys=() , task_templates=[] , builder_name='''builder''' , config_name='''config''' , version='''1.0.0''' , splits=[{'''name''': '''train''', '''num_examples''': 42}] , download_checksums={} , download_size=13_37 , post_processing_size=4_42 , dataset_size=12_34 , size_in_bytes=13_37 + 4_42 + 12_34 , )
__lowerCamelCase = dataset_info._to_yaml_dict()
assert sorted(A_ ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML )
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
assert key in dataset_info_yaml_dict
assert isinstance(dataset_info_yaml_dict[key] , (list, dict, int, str) )
__lowerCamelCase = yaml.safe_dump(A_ )
__lowerCamelCase = yaml.safe_load(A_ )
assert dataset_info_yaml_dict == reloaded
def lowerCamelCase_ ( ):
__lowerCamelCase = DatasetInfo()
__lowerCamelCase = dataset_info._to_yaml_dict()
assert dataset_info_yaml_dict == {}
@pytest.mark.parametrize(
'''dataset_infos_dict''' , [
DatasetInfosDict(),
DatasetInfosDict({'''default''': DatasetInfo()} ),
DatasetInfosDict({'''my_config_name''': DatasetInfo()} ),
DatasetInfosDict(
{
'''default''': DatasetInfo(
description='''foo''' , features=Features({'''a''': Value('''int32''' )} ) , builder_name='''builder''' , config_name='''config''' , version='''1.0.0''' , splits=[{'''name''': '''train'''}] , download_size=42 , )
} ),
DatasetInfosDict(
{
'''v1''': DatasetInfo(dataset_size=42 ),
'''v2''': DatasetInfo(dataset_size=13_37 ),
} ),
] , )
def lowerCamelCase_ ( A_ , A_ ):
__lowerCamelCase = str(A_ )
dataset_infos_dict.write_to_directory(A_ )
__lowerCamelCase = DatasetInfosDict.from_directory(A_ )
# the config_name of the dataset_infos_dict take over the attribute
for config_name, dataset_info in dataset_infos_dict.items():
__lowerCamelCase = config_name
# the yaml representation doesn't include fields like description or citation
# so we just test that we can recover what we can from the yaml
__lowerCamelCase = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() )
assert dataset_infos_dict == reloaded
if dataset_infos_dict:
assert os.path.exists(os.path.join(A_ , '''README.md''' ) )
| 316 |
'''simple docstring'''
import os
from pathlib import Path
def lowerCamelCase_ ( ):
from torch.utils.cpp_extension import load
__lowerCamelCase = Path(A_ ).resolve().parent.parent.parent / '''kernels''' / '''deformable_detr'''
__lowerCamelCase = [
root / filename
for filename in [
'''vision.cpp''',
os.path.join('''cpu''' , '''ms_deform_attn_cpu.cpp''' ),
os.path.join('''cuda''' , '''ms_deform_attn_cuda.cu''' ),
]
]
load(
'''MultiScaleDeformableAttention''' , A_ , with_cuda=A_ , extra_include_paths=[str(A_ )] , extra_cflags=['''-DWITH_CUDA=1'''] , extra_cuda_cflags=[
'''-DCUDA_HAS_FP16=1''',
'''-D__CUDA_NO_HALF_OPERATORS__''',
'''-D__CUDA_NO_HALF_CONVERSIONS__''',
'''-D__CUDA_NO_HALF2_OPERATORS__''',
] , )
import MultiScaleDeformableAttention as MSDA
return MSDA
| 316 | 1 |
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class lowerCamelCase__ ( UpperCAmelCase ):
lowerCamelCase_ : Optional[Any] = ['image_processor', 'tokenizer']
lowerCamelCase_ : Optional[int] = 'LayoutLMv3ImageProcessor'
lowerCamelCase_ : str = ('LayoutLMv3Tokenizer', 'LayoutLMv3TokenizerFast')
def __init__(self : Any , _snake_case : int=None , _snake_case : Dict=None , **_snake_case : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ : Union[str, Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , _snake_case , )
lowerCamelCase_ : List[str] = kwargs.pop('feature_extractor' )
lowerCamelCase_ : Union[str, Any] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(_snake_case , _snake_case )
def __call__(self : List[Any] , _snake_case : str , _snake_case : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , _snake_case : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , _snake_case : Union[List[List[int]], List[List[List[int]]]] = None , _snake_case : Optional[Union[List[int], List[List[int]]]] = None , _snake_case : bool = True , _snake_case : Union[bool, str, PaddingStrategy] = False , _snake_case : Union[bool, str, TruncationStrategy] = None , _snake_case : Optional[int] = None , _snake_case : int = 0 , _snake_case : Optional[int] = None , _snake_case : Optional[bool] = None , _snake_case : Optional[bool] = None , _snake_case : bool = False , _snake_case : bool = False , _snake_case : bool = False , _snake_case : bool = False , _snake_case : bool = True , _snake_case : Optional[Union[str, TensorType]] = None , **_snake_case : Tuple , ) -> BatchEncoding:
"""simple docstring"""
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
'You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True.' )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
'You cannot provide word labels if you initialized the image processor with apply_ocr set to True.' )
# first, apply the image processor
lowerCamelCase_ : Any = self.image_processor(images=_snake_case , return_tensors=_snake_case )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(_snake_case , _snake_case ):
lowerCamelCase_ : Dict = [text] # add batch dimension (as the image processor always adds a batch dimension)
lowerCamelCase_ : List[str] = features['words']
lowerCamelCase_ : Any = self.tokenizer(
text=text if text is not None else features['words'] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features['boxes'] , word_labels=_snake_case , add_special_tokens=_snake_case , padding=_snake_case , truncation=_snake_case , max_length=_snake_case , stride=_snake_case , pad_to_multiple_of=_snake_case , return_token_type_ids=_snake_case , return_attention_mask=_snake_case , return_overflowing_tokens=_snake_case , return_special_tokens_mask=_snake_case , return_offsets_mapping=_snake_case , return_length=_snake_case , verbose=_snake_case , return_tensors=_snake_case , **_snake_case , )
# add pixel values
lowerCamelCase_ : Tuple = features.pop('pixel_values' )
if return_overflowing_tokens is True:
lowerCamelCase_ : List[Any] = self.get_overflowing_images(_snake_case , encoded_inputs['overflow_to_sample_mapping'] )
lowerCamelCase_ : Tuple = images
return encoded_inputs
def UpperCAmelCase_ (self : List[Any] , _snake_case : Any , _snake_case : Tuple ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ : Union[str, Any] = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(_snake_case ) != len(_snake_case ):
raise ValueError(
'Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got'
f' {len(_snake_case )} and {len(_snake_case )}' )
return images_with_overflow
def UpperCAmelCase_ (self : Any , *_snake_case : Union[str, Any] , **_snake_case : Optional[int] ) -> Optional[int]:
"""simple docstring"""
return self.tokenizer.batch_decode(*_snake_case , **_snake_case )
def UpperCAmelCase_ (self : str , *_snake_case : Tuple , **_snake_case : Any ) -> Any:
"""simple docstring"""
return self.tokenizer.decode(*_snake_case , **_snake_case )
@property
def UpperCAmelCase_ (self : List[Any] ) -> Any:
"""simple docstring"""
return ["input_ids", "bbox", "attention_mask", "pixel_values"]
@property
def UpperCAmelCase_ (self : Optional[Any] ) -> Dict:
"""simple docstring"""
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , _snake_case , )
return self.image_processor_class
@property
def UpperCAmelCase_ (self : Any ) -> str:
"""simple docstring"""
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , _snake_case , )
return self.image_processor
| 144 |
from __future__ import annotations
def _a ( lowerCamelCase__ , lowerCamelCase__ ) -> list[int]:
lowerCamelCase_ : List[Any] = 0
lowerCamelCase_ : Union[str, Any] = len(lowerCamelCase__ ) - 1
while i < j:
if nums[i] + nums[j] == target:
return [i, j]
elif nums[i] + nums[j] < target:
lowerCamelCase_ : List[Any] = i + 1
else:
lowerCamelCase_ : Any = j - 1
return []
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f'''{two_pointer([2, 7, 1_1, 1_5], 9) = }''')
| 144 | 1 |
'''simple docstring'''
from __future__ import annotations
import numpy as np
def _lowerCAmelCase (_lowercase ):
"""simple docstring"""
return np.maximum(0 , _lowercase )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 331 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase_ : Optional[int] = logging.get_logger(__name__)
UpperCamelCase_ : str = {
"""abeja/gpt-neox-japanese-2.7b""": """https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/config.json""",
}
class lowerCamelCase__ ( __lowerCamelCase ):
"""simple docstring"""
UpperCamelCase__ = '''gpt_neox_japanese'''
def __init__( self : Tuple ,a__ : str=3_20_00 ,a__ : Union[str, Any]=25_60 ,a__ : Optional[Any]=32 ,a__ : Tuple=32 ,a__ : str=4 ,a__ : int="gelu" ,a__ : Tuple=1.00 ,a__ : Optional[int]=1_00_00 ,a__ : Union[str, Any]=20_48 ,a__ : List[str]=0.02 ,a__ : Optional[int]=1e-5 ,a__ : Union[str, Any]=True ,a__ : int=3_19_96 ,a__ : List[Any]=3_19_99 ,a__ : str=0.1 ,a__ : Dict=0.0 ,**a__ : List[Any] ,):
super().__init__(bos_token_id=a__ ,eos_token_id=a__ ,**a__ )
a__ = vocab_size
a__ = max_position_embeddings
a__ = hidden_size
a__ = num_hidden_layers
a__ = num_attention_heads
a__ = intermediate_multiple_size
a__ = hidden_act
a__ = rotary_pct
a__ = rotary_emb_base
a__ = initializer_range
a__ = layer_norm_eps
a__ = use_cache
a__ = attention_dropout
a__ = hidden_dropout
| 331 | 1 |
import json
import os
import unittest
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class __UpperCamelCase ( _lowerCAmelCase , unittest.TestCase ):
__snake_case :int = BioGptTokenizer
__snake_case :List[Any] = False
def _a ( self : List[str] ) -> Tuple:
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__lowercase = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""w</w>""",
"""r</w>""",
"""t</w>""",
"""lo""",
"""low""",
"""er</w>""",
"""low</w>""",
"""lowest</w>""",
"""newer</w>""",
"""wider</w>""",
"""<unk>""",
]
__lowercase = dict(zip(_lowerCAmelCase , range(len(_lowerCAmelCase ) ) ) )
__lowercase = ["""l o 123""", """lo w 1456""", """e r</w> 1789""", """"""]
__lowercase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
__lowercase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" ) as fp:
fp.write(json.dumps(_lowerCAmelCase ) )
with open(self.merges_file , """w""" ) as fp:
fp.write("""\n""".join(_lowerCAmelCase ) )
def _a ( self : List[str] , _lowerCAmelCase : Any ) -> Dict:
"""simple docstring"""
__lowercase = """lower newer"""
__lowercase = """lower newer"""
return input_text, output_text
def _a ( self : Tuple ) -> List[Any]:
"""simple docstring"""
__lowercase = BioGptTokenizer(self.vocab_file , self.merges_file )
__lowercase = """lower"""
__lowercase = ["""low""", """er</w>"""]
__lowercase = tokenizer.tokenize(_lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
__lowercase = tokens + ["""<unk>"""]
__lowercase = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCAmelCase ) , _lowerCAmelCase )
@slow
def _a ( self : Any ) -> Dict:
"""simple docstring"""
__lowercase = BioGptTokenizer.from_pretrained("""microsoft/biogpt""" )
__lowercase = tokenizer.encode("""sequence builders""" , add_special_tokens=_lowerCAmelCase )
__lowercase = tokenizer.encode("""multi-sequence build""" , add_special_tokens=_lowerCAmelCase )
__lowercase = tokenizer.build_inputs_with_special_tokens(_lowerCAmelCase )
__lowercase = tokenizer.build_inputs_with_special_tokens(_lowerCAmelCase , _lowerCAmelCase )
self.assertTrue(encoded_sentence == [2] + text )
self.assertTrue(encoded_pair == [2] + text + [2] + text_a )
| 53 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__UpperCamelCase : Any = {"""configuration_mra""": ["""MRA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MraConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Dict = [
"""MRA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MraForMaskedLM""",
"""MraForMultipleChoice""",
"""MraForQuestionAnswering""",
"""MraForSequenceClassification""",
"""MraForTokenClassification""",
"""MraLayer""",
"""MraModel""",
"""MraPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
__UpperCamelCase : List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 53 | 1 |
"""simple docstring"""
from __future__ import annotations
from typing import Any
def lowerCamelCase__ ( __snake_case ) -> None:
"""simple docstring"""
create_state_space_tree(__snake_case, [], 0 )
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case ) -> None:
"""simple docstring"""
if index == len(__snake_case ):
print(__snake_case )
return
create_state_space_tree(__snake_case, __snake_case, index + 1 )
current_subsequence.append(sequence[index] )
create_state_space_tree(__snake_case, __snake_case, index + 1 )
current_subsequence.pop()
if __name__ == "__main__":
_a = [3, 1, 2, 4]
generate_all_subsequences(seq)
seq.clear()
seq.extend(["""A""", """B""", """C"""])
generate_all_subsequences(seq)
| 19 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ : Optional[int] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : str = {
"google/mobilenet_v2_1.4_224": "https://huggingface.co/google/mobilenet_v2_1.4_224/resolve/main/config.json",
"google/mobilenet_v2_1.0_224": "https://huggingface.co/google/mobilenet_v2_1.0_224/resolve/main/config.json",
"google/mobilenet_v2_0.75_160": "https://huggingface.co/google/mobilenet_v2_0.75_160/resolve/main/config.json",
"google/mobilenet_v2_0.35_96": "https://huggingface.co/google/mobilenet_v2_0.35_96/resolve/main/config.json",
# See all MobileNetV2 models at https://huggingface.co/models?filter=mobilenet_v2
}
class a_ ( SCREAMING_SNAKE_CASE__ ):
A = '''mobilenet_v2'''
def __init__( self , SCREAMING_SNAKE_CASE=3 , SCREAMING_SNAKE_CASE=224 , SCREAMING_SNAKE_CASE=1.0 , SCREAMING_SNAKE_CASE=8 , SCREAMING_SNAKE_CASE=8 , SCREAMING_SNAKE_CASE=6 , SCREAMING_SNAKE_CASE=32 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE="relu6" , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=0.8 , SCREAMING_SNAKE_CASE=0.0_2 , SCREAMING_SNAKE_CASE=0.0_0_1 , SCREAMING_SNAKE_CASE=255 , **SCREAMING_SNAKE_CASE , ) -> Dict:
"""simple docstring"""
super().__init__(**SCREAMING_SNAKE_CASE )
if depth_multiplier <= 0:
raise ValueError('depth_multiplier must be greater than zero.' )
SCREAMING_SNAKE_CASE_ = num_channels
SCREAMING_SNAKE_CASE_ = image_size
SCREAMING_SNAKE_CASE_ = depth_multiplier
SCREAMING_SNAKE_CASE_ = depth_divisible_by
SCREAMING_SNAKE_CASE_ = min_depth
SCREAMING_SNAKE_CASE_ = expand_ratio
SCREAMING_SNAKE_CASE_ = output_stride
SCREAMING_SNAKE_CASE_ = first_layer_is_expansion
SCREAMING_SNAKE_CASE_ = finegrained_output
SCREAMING_SNAKE_CASE_ = hidden_act
SCREAMING_SNAKE_CASE_ = tf_padding
SCREAMING_SNAKE_CASE_ = classifier_dropout_prob
SCREAMING_SNAKE_CASE_ = initializer_range
SCREAMING_SNAKE_CASE_ = layer_norm_eps
SCREAMING_SNAKE_CASE_ = semantic_loss_ignore_index
class a_ ( SCREAMING_SNAKE_CASE__ ):
A = version.parse('''1.11''' )
@property
def A_( self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict([('pixel_values', {0: 'batch'})] )
@property
def A_( self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "image-classification":
return OrderedDict([('logits', {0: 'batch'})] )
else:
return OrderedDict([('last_hidden_state', {0: 'batch'}), ('pooler_output', {0: 'batch'})] )
@property
def A_( self ) -> float:
"""simple docstring"""
return 1e-4
| 205 | 0 |
"""simple docstring"""
from __future__ import annotations
def __lowerCAmelCase ( __lowerCAmelCase : list , __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : int ) -> list:
_UpperCamelCase : Optional[Any] = []
_UpperCamelCase : List[Any] = input_list[low:mid], input_list[mid : high + 1]
while left and right:
result.append((left if left[0] <= right[0] else right).pop(0 ) )
_UpperCamelCase : List[Any] = result + left + right
return input_list
def __lowerCAmelCase ( __lowerCAmelCase : list ) -> list:
if len(__lowerCAmelCase ) <= 1:
return input_list
_UpperCamelCase : Union[str, Any] = list(__lowerCAmelCase )
# iteration for two-way merging
_UpperCamelCase : Union[str, Any] = 2
while p <= len(__lowerCAmelCase ):
# getting low, high and middle value for merge-sort of single list
for i in range(0 , len(__lowerCAmelCase ) , __lowerCAmelCase ):
_UpperCamelCase : List[Any] = i
_UpperCamelCase : Tuple = i + p - 1
_UpperCamelCase : Any = (low + high + 1) // 2
_UpperCamelCase : Any = merge(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# final merge of last two parts
if p * 2 >= len(__lowerCAmelCase ):
_UpperCamelCase : Any = i
_UpperCamelCase : Union[str, Any] = merge(__lowerCAmelCase , 0 , __lowerCAmelCase , len(__lowerCAmelCase ) - 1 )
break
p *= 2
return input_list
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = input("""Enter numbers separated by a comma:\n""").strip()
if user_input == "":
_SCREAMING_SNAKE_CASE = []
else:
_SCREAMING_SNAKE_CASE = [int(item.strip()) for item in user_input.split(""",""")]
print(iter_merge_sort(unsorted))
| 700 |
"""simple docstring"""
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision.transforms import functional as F
from transformers import DetrImageProcessor, TableTransformerConfig, TableTransformerForObjectDetection
from transformers.utils import logging
logging.set_verbosity_info()
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
_SCREAMING_SNAKE_CASE = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f'transformer.encoder.layers.{i}.self_attn.out_proj.weight', f'encoder.layers.{i}.self_attn.out_proj.weight')
)
rename_keys.append(
(f'transformer.encoder.layers.{i}.self_attn.out_proj.bias', f'encoder.layers.{i}.self_attn.out_proj.bias')
)
rename_keys.append((f'transformer.encoder.layers.{i}.linear1.weight', f'encoder.layers.{i}.fc1.weight'))
rename_keys.append((f'transformer.encoder.layers.{i}.linear1.bias', f'encoder.layers.{i}.fc1.bias'))
rename_keys.append((f'transformer.encoder.layers.{i}.linear2.weight', f'encoder.layers.{i}.fc2.weight'))
rename_keys.append((f'transformer.encoder.layers.{i}.linear2.bias', f'encoder.layers.{i}.fc2.bias'))
rename_keys.append(
(f'transformer.encoder.layers.{i}.norm1.weight', f'encoder.layers.{i}.self_attn_layer_norm.weight')
)
rename_keys.append((f'transformer.encoder.layers.{i}.norm1.bias', f'encoder.layers.{i}.self_attn_layer_norm.bias'))
rename_keys.append((f'transformer.encoder.layers.{i}.norm2.weight', f'encoder.layers.{i}.final_layer_norm.weight'))
rename_keys.append((f'transformer.encoder.layers.{i}.norm2.bias', f'encoder.layers.{i}.final_layer_norm.bias'))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(f'transformer.decoder.layers.{i}.self_attn.out_proj.weight', f'decoder.layers.{i}.self_attn.out_proj.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.self_attn.out_proj.bias', f'decoder.layers.{i}.self_attn.out_proj.bias')
)
rename_keys.append(
(
f'transformer.decoder.layers.{i}.multihead_attn.out_proj.weight',
f'decoder.layers.{i}.encoder_attn.out_proj.weight',
)
)
rename_keys.append(
(
f'transformer.decoder.layers.{i}.multihead_attn.out_proj.bias',
f'decoder.layers.{i}.encoder_attn.out_proj.bias',
)
)
rename_keys.append((f'transformer.decoder.layers.{i}.linear1.weight', f'decoder.layers.{i}.fc1.weight'))
rename_keys.append((f'transformer.decoder.layers.{i}.linear1.bias', f'decoder.layers.{i}.fc1.bias'))
rename_keys.append((f'transformer.decoder.layers.{i}.linear2.weight', f'decoder.layers.{i}.fc2.weight'))
rename_keys.append((f'transformer.decoder.layers.{i}.linear2.bias', f'decoder.layers.{i}.fc2.bias'))
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm1.weight', f'decoder.layers.{i}.self_attn_layer_norm.weight')
)
rename_keys.append((f'transformer.decoder.layers.{i}.norm1.bias', f'decoder.layers.{i}.self_attn_layer_norm.bias'))
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm2.weight', f'decoder.layers.{i}.encoder_attn_layer_norm.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm2.bias', f'decoder.layers.{i}.encoder_attn_layer_norm.bias')
)
rename_keys.append((f'transformer.decoder.layers.{i}.norm3.weight', f'decoder.layers.{i}.final_layer_norm.weight'))
rename_keys.append((f'transformer.decoder.layers.{i}.norm3.bias', f'decoder.layers.{i}.final_layer_norm.bias'))
# convolutional projection + query embeddings + layernorm of encoder + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
("""input_proj.weight""", """input_projection.weight"""),
("""input_proj.bias""", """input_projection.bias"""),
("""query_embed.weight""", """query_position_embeddings.weight"""),
("""transformer.encoder.norm.weight""", """encoder.layernorm.weight"""),
("""transformer.encoder.norm.bias""", """encoder.layernorm.bias"""),
("""transformer.decoder.norm.weight""", """decoder.layernorm.weight"""),
("""transformer.decoder.norm.bias""", """decoder.layernorm.bias"""),
("""class_embed.weight""", """class_labels_classifier.weight"""),
("""class_embed.bias""", """class_labels_classifier.bias"""),
("""bbox_embed.layers.0.weight""", """bbox_predictor.layers.0.weight"""),
("""bbox_embed.layers.0.bias""", """bbox_predictor.layers.0.bias"""),
("""bbox_embed.layers.1.weight""", """bbox_predictor.layers.1.weight"""),
("""bbox_embed.layers.1.bias""", """bbox_predictor.layers.1.bias"""),
("""bbox_embed.layers.2.weight""", """bbox_predictor.layers.2.weight"""),
("""bbox_embed.layers.2.bias""", """bbox_predictor.layers.2.bias"""),
]
)
def __lowerCAmelCase ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Dict ) -> str:
_UpperCamelCase : str = state_dict.pop(__lowerCAmelCase )
_UpperCamelCase : Union[str, Any] = val
def __lowerCAmelCase ( __lowerCAmelCase : Optional[int] ) -> Optional[Any]:
_UpperCamelCase : str = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
_UpperCamelCase : Optional[Any] = key.replace("backbone.0.body" , "backbone.conv_encoder.model" )
_UpperCamelCase : str = value
else:
_UpperCamelCase : str = value
return new_state_dict
def __lowerCAmelCase ( __lowerCAmelCase : str ) -> List[str]:
_UpperCamelCase : Any = ""
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
_UpperCamelCase : Any = state_dict.pop(f"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight" )
_UpperCamelCase : List[str] = state_dict.pop(f"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias" )
# next, add query, keys and values (in that order) to the state dict
_UpperCamelCase : Tuple = in_proj_weight[:256, :]
_UpperCamelCase : Dict = in_proj_bias[:256]
_UpperCamelCase : int = in_proj_weight[256:512, :]
_UpperCamelCase : Tuple = in_proj_bias[256:512]
_UpperCamelCase : str = in_proj_weight[-256:, :]
_UpperCamelCase : int = in_proj_bias[-256:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
_UpperCamelCase : int = state_dict.pop(f"{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight" )
_UpperCamelCase : Optional[int] = state_dict.pop(f"{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias" )
# next, add query, keys and values (in that order) to the state dict
_UpperCamelCase : List[Any] = in_proj_weight[:256, :]
_UpperCamelCase : str = in_proj_bias[:256]
_UpperCamelCase : Optional[Any] = in_proj_weight[256:512, :]
_UpperCamelCase : str = in_proj_bias[256:512]
_UpperCamelCase : Tuple = in_proj_weight[-256:, :]
_UpperCamelCase : Optional[Any] = in_proj_bias[-256:]
# read in weights + bias of input projection layer of cross-attention
_UpperCamelCase : Any = state_dict.pop(
f"{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight" )
_UpperCamelCase : Tuple = state_dict.pop(f"{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias" )
# next, add query, keys and values (in that order) of cross-attention to the state dict
_UpperCamelCase : Optional[Any] = in_proj_weight_cross_attn[:256, :]
_UpperCamelCase : str = in_proj_bias_cross_attn[:256]
_UpperCamelCase : Dict = in_proj_weight_cross_attn[256:512, :]
_UpperCamelCase : int = in_proj_bias_cross_attn[256:512]
_UpperCamelCase : int = in_proj_weight_cross_attn[-256:, :]
_UpperCamelCase : Optional[Any] = in_proj_bias_cross_attn[-256:]
def __lowerCAmelCase ( __lowerCAmelCase : int , __lowerCAmelCase : int ) -> List[Any]:
_UpperCamelCase , _UpperCamelCase : List[str] = image.size
_UpperCamelCase : Dict = max(__lowerCAmelCase , __lowerCAmelCase )
_UpperCamelCase : Tuple = 800 if "detection" in checkpoint_url else 1000
_UpperCamelCase : Tuple = target_max_size / current_max_size
_UpperCamelCase : str = image.resize((int(round(scale * width ) ), int(round(scale * height ) )) )
return resized_image
def __lowerCAmelCase ( __lowerCAmelCase : Optional[Any] ) -> Union[str, Any]:
_UpperCamelCase : Optional[int] = F.to_tensor(__lowerCAmelCase )
_UpperCamelCase : Dict = F.normalize(__lowerCAmelCase , mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] )
return image
@torch.no_grad()
def __lowerCAmelCase ( __lowerCAmelCase : Any , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Optional[int] ) -> Tuple:
logger.info("Converting model..." )
# load original state dict
_UpperCamelCase : Tuple = torch.hub.load_state_dict_from_url(__lowerCAmelCase , map_location="cpu" )
# rename keys
for src, dest in rename_keys:
rename_key(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
_UpperCamelCase : List[str] = rename_backbone_keys(__lowerCAmelCase )
# query, key and value matrices need special treatment
read_in_q_k_v(__lowerCAmelCase )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
_UpperCamelCase : List[Any] = "model."
for key in state_dict.copy().keys():
if not key.startswith("class_labels_classifier" ) and not key.startswith("bbox_predictor" ):
_UpperCamelCase : Dict = state_dict.pop(__lowerCAmelCase )
_UpperCamelCase : Any = val
# create HuggingFace model and load state dict
_UpperCamelCase : str = TableTransformerConfig(
backbone="resnet18" , mask_loss_coefficient=1 , dice_loss_coefficient=1 , ce_loss_coefficient=1 , bbox_loss_coefficient=5 , giou_loss_coefficient=2 , eos_coefficient=0.4 , class_cost=1 , bbox_cost=5 , giou_cost=2 , )
if "detection" in checkpoint_url:
_UpperCamelCase : List[str] = 15
_UpperCamelCase : Optional[Any] = 2
_UpperCamelCase : Optional[Any] = {0: "table", 1: "table rotated"}
_UpperCamelCase : Any = idalabel
_UpperCamelCase : Union[str, Any] = {v: k for k, v in idalabel.items()}
else:
_UpperCamelCase : Any = 125
_UpperCamelCase : List[str] = 6
_UpperCamelCase : str = {
0: "table",
1: "table column",
2: "table row",
3: "table column header",
4: "table projected row header",
5: "table spanning cell",
}
_UpperCamelCase : Optional[Any] = idalabel
_UpperCamelCase : str = {v: k for k, v in idalabel.items()}
_UpperCamelCase : str = DetrImageProcessor(
format="coco_detection" , max_size=800 if "detection" in checkpoint_url else 1000 )
_UpperCamelCase : Any = TableTransformerForObjectDetection(__lowerCAmelCase )
model.load_state_dict(__lowerCAmelCase )
model.eval()
# verify our conversion
_UpperCamelCase : str = "example_pdf.png" if "detection" in checkpoint_url else "example_table.png"
_UpperCamelCase : Dict = hf_hub_download(repo_id="nielsr/example-pdf" , repo_type="dataset" , filename=__lowerCAmelCase )
_UpperCamelCase : Dict = Image.open(__lowerCAmelCase ).convert("RGB" )
_UpperCamelCase : List[Any] = normalize(resize(__lowerCAmelCase , __lowerCAmelCase ) ).unsqueeze(0 )
_UpperCamelCase : str = model(__lowerCAmelCase )
if "detection" in checkpoint_url:
_UpperCamelCase : Any = (1, 15, 3)
_UpperCamelCase : Optional[Any] = torch.tensor(
[[-6.7897, -16.9985, 6.7937], [-8.0186, -22.2192, 6.9677], [-7.3117, -21.0708, 7.4055]] )
_UpperCamelCase : Any = torch.tensor([[0.4867, 0.1767, 0.6732], [0.6718, 0.4479, 0.3830], [0.4716, 0.1760, 0.6364]] )
else:
_UpperCamelCase : Any = (1, 125, 7)
_UpperCamelCase : List[Any] = torch.tensor(
[[-18.1430, -8.3214, 4.8274], [-18.4685, -7.1361, -4.2667], [-26.3693, -9.3429, -4.9962]] )
_UpperCamelCase : Optional[Any] = torch.tensor([[0.4983, 0.5595, 0.9440], [0.4916, 0.6315, 0.5954], [0.6108, 0.8637, 0.1135]] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, :3, :3] , __lowerCAmelCase , atol=1e-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , __lowerCAmelCase , atol=1e-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(f"Saving PyTorch model and image processor to {pytorch_dump_folder_path}..." )
Path(__lowerCAmelCase ).mkdir(exist_ok=__lowerCAmelCase )
model.save_pretrained(__lowerCAmelCase )
image_processor.save_pretrained(__lowerCAmelCase )
if push_to_hub:
# Push model to HF hub
logger.info("Pushing model to the hub..." )
_UpperCamelCase : Any = (
"microsoft/table-transformer-detection"
if "detection" in checkpoint_url
else "microsoft/table-transformer-structure-recognition"
)
model.push_to_hub(__lowerCAmelCase )
image_processor.push_to_hub(__lowerCAmelCase )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_url""",
default="""https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth""",
type=str,
choices=[
"""https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth""",
"""https://pubtables1m.blob.core.windows.net/model/pubtables1m_structure_detr_r18.pth""",
],
help="""URL of the Table Transformer checkpoint you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
_SCREAMING_SNAKE_CASE = parser.parse_args()
convert_table_transformer_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 239 | 0 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
BertTokenizer,
ViltConfig,
ViltForImageAndTextRetrieval,
ViltForImagesAndTextClassification,
ViltForMaskedLM,
ViltForQuestionAnswering,
ViltImageProcessor,
ViltProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
__a = logging.get_logger(__name__)
def a ( snake_case__: List[Any] , snake_case__: Optional[int]=False , snake_case__: Union[str, Any]=False , snake_case__: str=False ):
'''simple docstring'''
lowercase_ = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'''transformer.blocks.{i}.norm1.weight''', F'''vilt.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((F'''transformer.blocks.{i}.norm1.bias''', F'''vilt.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append(
(F'''transformer.blocks.{i}.attn.proj.weight''', F'''vilt.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append(
(F'''transformer.blocks.{i}.attn.proj.bias''', F'''vilt.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((F'''transformer.blocks.{i}.norm2.weight''', F'''vilt.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((F'''transformer.blocks.{i}.norm2.bias''', F'''vilt.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append(
(F'''transformer.blocks.{i}.mlp.fc1.weight''', F'''vilt.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((F'''transformer.blocks.{i}.mlp.fc1.bias''', F'''vilt.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((F'''transformer.blocks.{i}.mlp.fc2.weight''', F'''vilt.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((F'''transformer.blocks.{i}.mlp.fc2.bias''', F'''vilt.encoder.layer.{i}.output.dense.bias''') )
# embeddings
rename_keys.extend(
[
# text embeddings
('''text_embeddings.word_embeddings.weight''', '''vilt.embeddings.text_embeddings.word_embeddings.weight'''),
(
'''text_embeddings.position_embeddings.weight''',
'''vilt.embeddings.text_embeddings.position_embeddings.weight''',
),
('''text_embeddings.position_ids''', '''vilt.embeddings.text_embeddings.position_ids'''),
(
'''text_embeddings.token_type_embeddings.weight''',
'''vilt.embeddings.text_embeddings.token_type_embeddings.weight''',
),
('''text_embeddings.LayerNorm.weight''', '''vilt.embeddings.text_embeddings.LayerNorm.weight'''),
('''text_embeddings.LayerNorm.bias''', '''vilt.embeddings.text_embeddings.LayerNorm.bias'''),
# patch embeddings
('''transformer.cls_token''', '''vilt.embeddings.cls_token'''),
('''transformer.patch_embed.proj.weight''', '''vilt.embeddings.patch_embeddings.projection.weight'''),
('''transformer.patch_embed.proj.bias''', '''vilt.embeddings.patch_embeddings.projection.bias'''),
('''transformer.pos_embed''', '''vilt.embeddings.position_embeddings'''),
# token type embeddings
('''token_type_embeddings.weight''', '''vilt.embeddings.token_type_embeddings.weight'''),
] )
# final layernorm + pooler
rename_keys.extend(
[
('''transformer.norm.weight''', '''vilt.layernorm.weight'''),
('''transformer.norm.bias''', '''vilt.layernorm.bias'''),
('''pooler.dense.weight''', '''vilt.pooler.dense.weight'''),
('''pooler.dense.bias''', '''vilt.pooler.dense.bias'''),
] )
# classifier head(s)
if vqa_model:
# classification head
rename_keys.extend(
[
('''vqa_classifier.0.weight''', '''classifier.0.weight'''),
('''vqa_classifier.0.bias''', '''classifier.0.bias'''),
('''vqa_classifier.1.weight''', '''classifier.1.weight'''),
('''vqa_classifier.1.bias''', '''classifier.1.bias'''),
('''vqa_classifier.3.weight''', '''classifier.3.weight'''),
('''vqa_classifier.3.bias''', '''classifier.3.bias'''),
] )
elif nlvr_model:
# classification head
rename_keys.extend(
[
('''nlvr2_classifier.0.weight''', '''classifier.0.weight'''),
('''nlvr2_classifier.0.bias''', '''classifier.0.bias'''),
('''nlvr2_classifier.1.weight''', '''classifier.1.weight'''),
('''nlvr2_classifier.1.bias''', '''classifier.1.bias'''),
('''nlvr2_classifier.3.weight''', '''classifier.3.weight'''),
('''nlvr2_classifier.3.bias''', '''classifier.3.bias'''),
] )
else:
pass
return rename_keys
def a ( snake_case__: Any , snake_case__: Union[str, Any] ):
'''simple docstring'''
for i in range(config.num_hidden_layers ):
lowercase_ = '''vilt.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowercase_ = state_dict.pop(F'''transformer.blocks.{i}.attn.qkv.weight''' )
lowercase_ = state_dict.pop(F'''transformer.blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
lowercase_ = in_proj_weight[
: config.hidden_size, :
]
lowercase_ = in_proj_bias[: config.hidden_size]
lowercase_ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowercase_ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowercase_ = in_proj_weight[
-config.hidden_size :, :
]
lowercase_ = in_proj_bias[-config.hidden_size :]
def a ( snake_case__: Any ):
'''simple docstring'''
lowercase_ = ['''head.weight''', '''head.bias''']
for k in ignore_keys:
state_dict.pop(snake_case__ , snake_case__ )
def a ( snake_case__: Optional[int] , snake_case__: List[Any] , snake_case__: Any ):
'''simple docstring'''
lowercase_ = dct.pop(snake_case__ )
lowercase_ = val
@torch.no_grad()
def a ( snake_case__: Any , snake_case__: Union[str, Any] ):
'''simple docstring'''
lowercase_ = ViltConfig(image_size=384 , patch_size=32 , tie_word_embeddings=snake_case__ )
lowercase_ = False
lowercase_ = False
lowercase_ = False
lowercase_ = False
if "vqa" in checkpoint_url:
lowercase_ = True
lowercase_ = 3_129
lowercase_ = '''huggingface/label-files'''
lowercase_ = '''vqa2-id2label.json'''
lowercase_ = json.load(open(hf_hub_download(snake_case__ , snake_case__ , repo_type='''dataset''' ) , '''r''' ) )
lowercase_ = {int(snake_case__ ): v for k, v in idalabel.items()}
lowercase_ = idalabel
lowercase_ = {v: k for k, v in idalabel.items()}
lowercase_ = ViltForQuestionAnswering(snake_case__ )
elif "nlvr" in checkpoint_url:
lowercase_ = True
lowercase_ = 2
lowercase_ = {0: '''False''', 1: '''True'''}
lowercase_ = {v: k for k, v in config.idalabel.items()}
lowercase_ = 3
lowercase_ = ViltForImagesAndTextClassification(snake_case__ )
elif "irtr" in checkpoint_url:
lowercase_ = True
lowercase_ = ViltForImageAndTextRetrieval(snake_case__ )
elif "mlm_itm" in checkpoint_url:
lowercase_ = True
lowercase_ = ViltForMaskedLM(snake_case__ )
else:
raise ValueError('''Unknown model type''' )
# load state_dict of original model, remove and rename some keys
lowercase_ = torch.hub.load_state_dict_from_url(snake_case__ , map_location='''cpu''' )['''state_dict''']
lowercase_ = create_rename_keys(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
for src, dest in rename_keys:
rename_key(snake_case__ , snake_case__ , snake_case__ )
read_in_q_k_v(snake_case__ , snake_case__ )
if mlm_model or irtr_model:
lowercase_ = ['''itm_score.fc.weight''', '''itm_score.fc.bias''']
for k in ignore_keys:
state_dict.pop(snake_case__ , snake_case__ )
# load state dict into HuggingFace model
model.eval()
if mlm_model:
lowercase_ , lowercase_ = model.load_state_dict(snake_case__ , strict=snake_case__ )
assert missing_keys == ["mlm_score.decoder.bias"]
else:
model.load_state_dict(snake_case__ )
# Define processor
lowercase_ = ViltImageProcessor(size=384 )
lowercase_ = BertTokenizer.from_pretrained('''bert-base-uncased''' )
lowercase_ = ViltProcessor(snake_case__ , snake_case__ )
# Forward pass on example inputs (image + text)
if nlvr_model:
lowercase_ = Image.open(requests.get('''https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg''' , stream=snake_case__ ).raw )
lowercase_ = Image.open(requests.get('''https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg''' , stream=snake_case__ ).raw )
lowercase_ = (
'''The left image contains twice the number of dogs as the right image, and at least two dogs in total are'''
''' standing.'''
)
lowercase_ = processor(snake_case__ , snake_case__ , return_tensors='''pt''' )
lowercase_ = processor(snake_case__ , snake_case__ , return_tensors='''pt''' )
lowercase_ = model(
input_ids=encoding_a.input_ids , pixel_values=encoding_a.pixel_values , pixel_values_a=encoding_a.pixel_values , )
else:
lowercase_ = Image.open(requests.get('''http://images.cocodataset.org/val2017/000000039769.jpg''' , stream=snake_case__ ).raw )
if mlm_model:
lowercase_ = '''a bunch of [MASK] laying on a [MASK].'''
else:
lowercase_ = '''How many cats are there?'''
lowercase_ = processor(snake_case__ , snake_case__ , return_tensors='''pt''' )
lowercase_ = model(**snake_case__ )
# Verify outputs
if mlm_model:
lowercase_ = torch.Size([1, 11, 30_522] )
lowercase_ = torch.tensor([-1_2.5_0_6_1, -1_2.5_1_2_3, -1_2.5_1_7_4] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , snake_case__ , atol=1e-4 )
# verify masked token prediction equals "cats"
lowercase_ = outputs.logits[0, 4, :].argmax(-1 ).item()
assert tokenizer.decode([predicted_id] ) == "cats"
elif vqa_model:
lowercase_ = torch.Size([1, 3_129] )
lowercase_ = torch.tensor([-1_5.9_4_9_5, -1_8.1_4_7_2, -1_0.3_0_4_1] )
assert torch.allclose(outputs.logits[0, :3] , snake_case__ , atol=1e-4 )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , snake_case__ , atol=1e-4 )
# verify vqa prediction equals "2"
lowercase_ = outputs.logits.argmax(-1 ).item()
assert model.config.idalabel[predicted_idx] == "2"
elif nlvr_model:
lowercase_ = torch.Size([1, 2] )
lowercase_ = torch.tensor([-2.8_7_2_1, 2.1_2_9_1] )
assert torch.allclose(outputs.logits[0, :3] , snake_case__ , atol=1e-4 )
assert outputs.logits.shape == expected_shape
Path(snake_case__ ).mkdir(exist_ok=snake_case__ )
print(F'''Saving model and processor to {pytorch_dump_folder_path}''' )
model.save_pretrained(snake_case__ )
processor.save_pretrained(snake_case__ )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://github.com/dandelin/ViLT/releases/download/200k/vilt_200k_mlm_itm.ckpt',
type=str,
help='URL of the checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
__a = parser.parse_args()
convert_vilt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 97 |
def a ( snake_case__: int ):
'''simple docstring'''
if not isinstance(snake_case__ , snake_case__ ):
raise ValueError('''Input must be an integer''' )
if input_num <= 0:
raise ValueError('''Input must be positive''' )
return sum(
divisor for divisor in range(1 , input_num // 2 + 1 ) if input_num % divisor == 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 97 | 1 |
from manim import *
class A ( lowerCamelCase_ ):
def lowercase__ ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
UpperCamelCase_ = Rectangle(height=0.5 , width=0.5 )
UpperCamelCase_ = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
UpperCamelCase_ = [mem.copy() for i in range(6 )]
UpperCamelCase_ = [mem.copy() for i in range(6 )]
UpperCamelCase_ = VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 )
UpperCamelCase_ = VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 )
UpperCamelCase_ = VGroup(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 )
UpperCamelCase_ = Text('CPU' , font_size=24 )
UpperCamelCase_ = Group(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0.5 , aligned_edge=__UpperCAmelCase )
cpu.move_to([-2.5, -0.5, 0] )
self.add(__UpperCAmelCase )
UpperCamelCase_ = [mem.copy() for i in range(4 )]
UpperCamelCase_ = VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 )
UpperCamelCase_ = Text('GPU' , font_size=24 )
UpperCamelCase_ = Group(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0.5 , aligned_edge=__UpperCAmelCase )
gpu.move_to([-1, -1, 0] )
self.add(__UpperCAmelCase )
UpperCamelCase_ = [mem.copy() for i in range(6 )]
UpperCamelCase_ = VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 )
UpperCamelCase_ = Text('Model' , font_size=24 )
UpperCamelCase_ = Group(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0.5 , aligned_edge=__UpperCAmelCase )
model.move_to([3, -1.0, 0] )
self.add(__UpperCAmelCase )
UpperCamelCase_ = []
for i, rect in enumerate(__UpperCAmelCase ):
rect.set_stroke(__UpperCAmelCase )
# target = fill.copy().set_fill(YELLOW, opacity=0.7)
# target.move_to(rect)
# self.add(target)
UpperCamelCase_ = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(__UpperCAmelCase , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=__UpperCAmelCase )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(cpu_targs[0] , direction=__UpperCAmelCase , buff=0.0 )
else:
cpu_target.next_to(cpu_targs[i - 1] , direction=__UpperCAmelCase , buff=0.0 )
self.add(__UpperCAmelCase )
cpu_targs.append(__UpperCAmelCase )
UpperCamelCase_ = [mem.copy() for i in range(6 )]
UpperCamelCase_ = VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 )
UpperCamelCase_ = Text('Loaded Checkpoint' , font_size=24 )
UpperCamelCase_ = Group(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , aligned_edge=__UpperCAmelCase , buff=0.4 )
checkpoint.move_to([3, 0.5, 0] )
UpperCamelCase_ = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
UpperCamelCase_ = MarkupText(
f'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(__UpperCAmelCase , __UpperCAmelCase )
UpperCamelCase_ = MarkupText(
f'''<span fgcolor=\'{BLUE}\'>●</span> Checkpoint''' , font_size=18 , )
blue_text.next_to(__UpperCAmelCase , DOWN * 2.4 , aligned_edge=key_text.get_left() )
UpperCamelCase_ = MarkupText(
f'''Next, a <i><span fgcolor="{BLUE}">second</span></i> model is loaded into memory,\nwith the weights of a <span fgcolor="{BLUE}">single shard</span>.''' , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(__UpperCAmelCase ) , Write(__UpperCAmelCase ) )
self.play(Write(__UpperCAmelCase , run_time=1 ) , Create(__UpperCAmelCase , run_time=1 ) )
UpperCamelCase_ = []
UpperCamelCase_ = []
for i, rect in enumerate(__UpperCAmelCase ):
UpperCamelCase_ = fill.copy().set_fill(__UpperCAmelCase , opacity=0.7 )
target.move_to(__UpperCAmelCase )
first_animations.append(GrowFromCenter(__UpperCAmelCase , run_time=1 ) )
UpperCamelCase_ = target.copy()
cpu_target.generate_target()
if i < 5:
cpu_target.target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.target.move_to(cpu_right_col_base[i - 5] )
second_animations.append(MoveToTarget(__UpperCAmelCase , run_time=1.5 ) )
self.play(*__UpperCAmelCase )
self.play(*__UpperCAmelCase )
self.wait()
| 559 |
import unittest
from transformers import AlbertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
)
from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class A :
def __init__( self : List[Any] , __UpperCAmelCase : int , __UpperCAmelCase : Union[str, Any]=13 , __UpperCAmelCase : str=7 , __UpperCAmelCase : Dict=True , __UpperCAmelCase : str=True , __UpperCAmelCase : List[str]=True , __UpperCAmelCase : Dict=True , __UpperCAmelCase : Any=99 , __UpperCAmelCase : Union[str, Any]=16 , __UpperCAmelCase : Union[str, Any]=36 , __UpperCAmelCase : Optional[int]=6 , __UpperCAmelCase : Union[str, Any]=6 , __UpperCAmelCase : List[str]=6 , __UpperCAmelCase : Union[str, Any]=37 , __UpperCAmelCase : Dict="gelu" , __UpperCAmelCase : Any=0.1 , __UpperCAmelCase : Union[str, Any]=0.1 , __UpperCAmelCase : Dict=512 , __UpperCAmelCase : List[Any]=16 , __UpperCAmelCase : Union[str, Any]=2 , __UpperCAmelCase : Optional[Any]=0.02 , __UpperCAmelCase : Any=3 , __UpperCAmelCase : Optional[int]=4 , __UpperCAmelCase : Optional[Any]=None , ) -> List[str]:
"""simple docstring"""
UpperCamelCase_ = parent
UpperCamelCase_ = batch_size
UpperCamelCase_ = seq_length
UpperCamelCase_ = is_training
UpperCamelCase_ = use_input_mask
UpperCamelCase_ = use_token_type_ids
UpperCamelCase_ = use_labels
UpperCamelCase_ = vocab_size
UpperCamelCase_ = embedding_size
UpperCamelCase_ = hidden_size
UpperCamelCase_ = num_hidden_layers
UpperCamelCase_ = num_hidden_groups
UpperCamelCase_ = num_attention_heads
UpperCamelCase_ = intermediate_size
UpperCamelCase_ = hidden_act
UpperCamelCase_ = hidden_dropout_prob
UpperCamelCase_ = attention_probs_dropout_prob
UpperCamelCase_ = max_position_embeddings
UpperCamelCase_ = type_vocab_size
UpperCamelCase_ = type_sequence_label_size
UpperCamelCase_ = initializer_range
UpperCamelCase_ = num_labels
UpperCamelCase_ = num_choices
UpperCamelCase_ = scope
def lowercase__ ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
UpperCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase_ = None
if self.use_input_mask:
UpperCamelCase_ = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase_ = None
if self.use_token_type_ids:
UpperCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCamelCase_ = None
UpperCamelCase_ = None
UpperCamelCase_ = None
if self.use_labels:
UpperCamelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase_ = ids_tensor([self.batch_size] , self.num_choices )
UpperCamelCase_ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase__ ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
return AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , )
def lowercase__ ( self : List[str] , __UpperCAmelCase : int , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : List[Any] ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase_ = AlbertModel(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
UpperCamelCase_ = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase )
UpperCamelCase_ = model(__UpperCAmelCase , token_type_ids=__UpperCAmelCase )
UpperCamelCase_ = model(__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowercase__ ( self : Tuple , __UpperCAmelCase : int , __UpperCAmelCase : Tuple , __UpperCAmelCase : str , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Dict , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Optional[int] ) -> int:
"""simple docstring"""
UpperCamelCase_ = AlbertForPreTraining(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
UpperCamelCase_ = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase , sentence_order_label=__UpperCAmelCase , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels) )
def lowercase__ ( self : List[Any] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Optional[int] ) -> Tuple:
"""simple docstring"""
UpperCamelCase_ = AlbertForMaskedLM(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
UpperCamelCase_ = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase__ ( self : Tuple , __UpperCAmelCase : Any , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Dict , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : str , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase_ = AlbertForQuestionAnswering(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
UpperCamelCase_ = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , start_positions=__UpperCAmelCase , end_positions=__UpperCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase__ ( self : Tuple , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : List[str] , __UpperCAmelCase : Tuple , __UpperCAmelCase : Tuple , __UpperCAmelCase : List[Any] , __UpperCAmelCase : int ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase_ = self.num_labels
UpperCamelCase_ = AlbertForSequenceClassification(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
UpperCamelCase_ = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase__ ( self : List[str] , __UpperCAmelCase : int , __UpperCAmelCase : List[str] , __UpperCAmelCase : Tuple , __UpperCAmelCase : List[str] , __UpperCAmelCase : Any , __UpperCAmelCase : Any , __UpperCAmelCase : Any ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase_ = self.num_labels
UpperCamelCase_ = AlbertForTokenClassification(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
UpperCamelCase_ = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowercase__ ( self : Any , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Tuple , __UpperCAmelCase : Any , __UpperCAmelCase : Tuple , __UpperCAmelCase : Dict , __UpperCAmelCase : int , __UpperCAmelCase : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase_ = self.num_choices
UpperCamelCase_ = AlbertForMultipleChoice(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
UpperCamelCase_ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCamelCase_ = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCamelCase_ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCamelCase_ = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowercase__ ( self : int ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase_ = self.prepare_config_and_inputs()
(
(
UpperCamelCase_
) , (
UpperCamelCase_
) , (
UpperCamelCase_
) , (
UpperCamelCase_
) , (
UpperCamelCase_
) , (
UpperCamelCase_
) , (
UpperCamelCase_
) ,
) = config_and_inputs
UpperCamelCase_ = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class A ( lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = (
(
AlbertModel,
AlbertForPreTraining,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertForQuestionAnswering,
)
if is_torch_available()
else ()
)
_SCREAMING_SNAKE_CASE : Union[str, Any] = (
{
'''feature-extraction''': AlbertModel,
'''fill-mask''': AlbertForMaskedLM,
'''question-answering''': AlbertForQuestionAnswering,
'''text-classification''': AlbertForSequenceClassification,
'''token-classification''': AlbertForTokenClassification,
'''zero-shot''': AlbertForSequenceClassification,
}
if is_torch_available()
else {}
)
_SCREAMING_SNAKE_CASE : int = True
def lowercase__ ( self : Any , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : int=False ) -> List[Any]:
"""simple docstring"""
UpperCamelCase_ = super()._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase , return_labels=__UpperCAmelCase )
if return_labels:
if model_class in get_values(__UpperCAmelCase ):
UpperCamelCase_ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=__UpperCAmelCase )
UpperCamelCase_ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__UpperCAmelCase )
return inputs_dict
def lowercase__ ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase_ = AlbertModelTester(self )
UpperCamelCase_ = ConfigTester(self , config_class=__UpperCAmelCase , hidden_size=37 )
def lowercase__ ( self : str ) -> Any:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowercase__ ( self : str ) -> int:
"""simple docstring"""
UpperCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase )
def lowercase__ ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*__UpperCAmelCase )
def lowercase__ ( self : List[Any] ) -> Dict:
"""simple docstring"""
UpperCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__UpperCAmelCase )
def lowercase__ ( self : Any ) -> int:
"""simple docstring"""
UpperCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__UpperCAmelCase )
def lowercase__ ( self : Dict ) -> int:
"""simple docstring"""
UpperCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__UpperCAmelCase )
def lowercase__ ( self : Any ) -> Tuple:
"""simple docstring"""
UpperCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__UpperCAmelCase )
def lowercase__ ( self : int ) -> Dict:
"""simple docstring"""
UpperCamelCase_ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCamelCase_ = type
self.model_tester.create_and_check_model(*__UpperCAmelCase )
@slow
def lowercase__ ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase_ = AlbertModel.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
@require_torch
class A ( unittest.TestCase ):
@slow
def lowercase__ ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase_ = AlbertModel.from_pretrained('albert-base-v2' )
UpperCamelCase_ = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
UpperCamelCase_ = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
UpperCamelCase_ = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase )[0]
UpperCamelCase_ = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , __UpperCAmelCase )
UpperCamelCase_ = torch.tensor(
[[[-0.6_513, 1.5_035, -0.2_766], [-0.6_515, 1.5_046, -0.2_780], [-0.6_512, 1.5_049, -0.2_784]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __UpperCAmelCase , atol=1E-4 ) )
| 559 | 1 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Callable
__snake_case : Dict = list[list[float | int]]
def _lowercase ( lowerCamelCase__ : Matrix, lowerCamelCase__ : Matrix ):
_a = len(lowerCamelCase__ )
_a = [[0 for _ in range(size + 1 )] for _ in range(lowerCamelCase__ )]
_a = 42
_a = 42
_a = 42
_a = 42
_a = 42
_a = 42
for row in range(lowerCamelCase__ ):
for col in range(lowerCamelCase__ ):
_a = matrix[row][col]
_a = vector[row][0]
_a = 0
_a = 0
while row < size and col < size:
# pivoting
_a = max((abs(augmented[rowa][col] ), rowa) for rowa in range(lowerCamelCase__, lowerCamelCase__ ) )[
1
]
if augmented[pivot_row][col] == 0:
col += 1
continue
else:
_a , _a = augmented[pivot_row], augmented[row]
for rowa in range(row + 1, lowerCamelCase__ ):
_a = augmented[rowa][col] / augmented[row][col]
_a = 0
for cola in range(col + 1, size + 1 ):
augmented[rowa][cola] -= augmented[row][cola] * ratio
row += 1
col += 1
# back substitution
for col in range(1, lowerCamelCase__ ):
for row in range(lowerCamelCase__ ):
_a = augmented[row][col] / augmented[col][col]
for cola in range(lowerCamelCase__, size + 1 ):
augmented[row][cola] -= augmented[col][cola] * ratio
# round to get rid of numbers like 2.000000000000004
return [
[round(augmented[row][size] / augmented[row][row], 10 )] for row in range(lowerCamelCase__ )
]
def _lowercase ( lowerCamelCase__ : list[int] ):
_a = len(lowerCamelCase__ )
_a = [[0 for _ in range(lowerCamelCase__ )] for _ in range(lowerCamelCase__ )]
_a = [[0] for _ in range(lowerCamelCase__ )]
_a = 42
_a = 42
_a = 42
_a = 42
for x_val, y_val in enumerate(lowerCamelCase__ ):
for col in range(lowerCamelCase__ ):
_a = (x_val + 1) ** (size - col - 1)
_a = y_val
_a = solve(lowerCamelCase__, lowerCamelCase__ )
def interpolated_func(lowerCamelCase__ : int ) -> int:
return sum(
round(coeffs[x_val][0] ) * (var ** (size - x_val - 1))
for x_val in range(lowerCamelCase__ ) )
return interpolated_func
def _lowercase ( lowerCamelCase__ : int ):
return (
1
- variable
+ variable**2
- variable**3
+ variable**4
- variable**5
+ variable**6
- variable**7
+ variable**8
- variable**9
+ variable**10
)
def _lowercase ( lowerCamelCase__ : Callable[[int], int] = question_function, lowerCamelCase__ : int = 10 ):
_a = [func(lowerCamelCase__ ) for x_val in range(1, order + 1 )]
_a = [
interpolate(data_points[:max_coeff] ) for max_coeff in range(1, order + 1 )
]
_a = 0
_a = 42
_a = 42
for poly in polynomials:
_a = 1
while func(lowerCamelCase__ ) == poly(lowerCamelCase__ ):
x_val += 1
ret += poly(lowerCamelCase__ )
return ret
if __name__ == "__main__":
print(f'''{solution() = }''')
| 131 |
'''simple docstring'''
def _lowercase ( lowerCamelCase__ : int ):
if not isinstance(lowerCamelCase__, lowerCamelCase__ ):
raise TypeError("only integers accepted as input" )
else:
_a = str(abs(lowerCamelCase__ ) )
_a = [list(lowerCamelCase__ ) for char in range(len(lowerCamelCase__ ) )]
for index in range(len(lowerCamelCase__ ) ):
num_transpositions[index].pop(lowerCamelCase__ )
return max(
int("".join(list(lowerCamelCase__ ) ) ) for transposition in num_transpositions )
if __name__ == "__main__":
__import__("doctest").testmod()
| 131 | 1 |
"""simple docstring"""
A__ : int = {
'Pillow': 'Pillow<10.0.0',
'accelerate': 'accelerate>=0.20.3',
'av': 'av==9.2.0',
'beautifulsoup4': 'beautifulsoup4',
'black': 'black~=23.1',
'codecarbon': 'codecarbon==1.2.0',
'cookiecutter': 'cookiecutter==1.7.3',
'dataclasses': 'dataclasses',
'datasets': 'datasets!=2.5.0',
'decord': 'decord==0.6.0',
'deepspeed': 'deepspeed>=0.9.3',
'diffusers': 'diffusers',
'dill': 'dill<0.3.5',
'evaluate': 'evaluate>=0.2.0',
'fairscale': 'fairscale>0.3',
'faiss-cpu': 'faiss-cpu',
'fastapi': 'fastapi',
'filelock': 'filelock',
'flax': 'flax>=0.4.1,<=0.7.0',
'ftfy': 'ftfy',
'fugashi': 'fugashi>=1.0',
'GitPython': 'GitPython<3.1.19',
'hf-doc-builder': 'hf-doc-builder>=0.3.0',
'huggingface-hub': 'huggingface-hub>=0.14.1,<1.0',
'importlib_metadata': 'importlib_metadata',
'ipadic': 'ipadic>=1.0.0,<2.0',
'isort': 'isort>=5.5.4',
'jax': 'jax>=0.2.8,!=0.3.2,<=0.4.13',
'jaxlib': 'jaxlib>=0.1.65,<=0.4.13',
'jieba': 'jieba',
'kenlm': 'kenlm',
'keras-nlp': 'keras-nlp>=0.3.1',
'librosa': 'librosa',
'nltk': 'nltk',
'natten': 'natten>=0.14.6',
'numpy': 'numpy>=1.17',
'onnxconverter-common': 'onnxconverter-common',
'onnxruntime-tools': 'onnxruntime-tools>=1.4.2',
'onnxruntime': 'onnxruntime>=1.4.0',
'opencv-python': 'opencv-python',
'optuna': 'optuna',
'optax': 'optax>=0.0.8,<=0.1.4',
'packaging': 'packaging>=20.0',
'parameterized': 'parameterized',
'phonemizer': 'phonemizer',
'protobuf': 'protobuf',
'psutil': 'psutil',
'pyyaml': 'pyyaml>=5.1',
'pydantic': 'pydantic<2',
'pytest': 'pytest>=7.2.0',
'pytest-timeout': 'pytest-timeout',
'pytest-xdist': 'pytest-xdist',
'python': 'python>=3.8.0',
'ray[tune]': 'ray[tune]',
'regex': 'regex!=2019.12.17',
'requests': 'requests',
'rhoknp': 'rhoknp>=1.1.0,<1.3.1',
'rjieba': 'rjieba',
'rouge-score': 'rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1',
'ruff': 'ruff>=0.0.241,<=0.0.259',
'sacrebleu': 'sacrebleu>=1.4.12,<2.0.0',
'sacremoses': 'sacremoses',
'safetensors': 'safetensors>=0.3.1',
'sagemaker': 'sagemaker>=2.31.0',
'scikit-learn': 'scikit-learn',
'sentencepiece': 'sentencepiece>=0.1.91,!=0.1.92',
'sigopt': 'sigopt',
'starlette': 'starlette',
'sudachipy': 'sudachipy>=0.6.6',
'sudachidict_core': 'sudachidict_core>=20220729',
'tensorflow-cpu': 'tensorflow-cpu>=2.6,<2.14',
'tensorflow': 'tensorflow>=2.6,<2.14',
'tensorflow-text': 'tensorflow-text<2.14',
'tf2onnx': 'tf2onnx',
'timeout-decorator': 'timeout-decorator',
'timm': 'timm',
'tokenizers': 'tokenizers>=0.11.1,!=0.11.3,<0.14',
'torch': 'torch>=1.9,!=1.12.0',
'torchaudio': 'torchaudio',
'torchvision': 'torchvision',
'pyctcdecode': 'pyctcdecode>=0.4.0',
'tqdm': 'tqdm>=4.27',
'unidic': 'unidic>=1.0.2',
'unidic_lite': 'unidic_lite>=1.0.7',
'urllib3': 'urllib3<2.0.0',
'uvicorn': 'uvicorn',
}
| 272 |
"""simple docstring"""
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pytest
from datasets.arrow_dataset import Dataset
from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex
from .utils import require_elasticsearch, require_faiss
A__ : Any = pytest.mark.integration
@require_faiss
class __magic_name__ ( SCREAMING_SNAKE_CASE__ ):
def lowercase_ ( self ) -> List[Any]:
"""simple docstring"""
_lowercase: str = Dataset.from_dict({'''filename''': ['''my_name-train''' + '''_''' + str(A_ ) for x in np.arange(30 ).tolist()]} )
return dset
def lowercase_ ( self ) -> int:
"""simple docstring"""
import faiss
_lowercase: Dataset = self._create_dummy_dataset()
_lowercase: List[str] = dset.map(
lambda A_ , A_ : {"vecs": i * np.ones(5 , dtype=np.floataa )} , with_indices=A_ , keep_in_memory=A_ )
_lowercase: Dict = dset.add_faiss_index('''vecs''' , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT )
_lowercase , _lowercase: Union[str, Any] = dset.get_nearest_examples('''vecs''' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['''filename'''][0] , '''my_name-train_29''' )
dset.drop_index('''vecs''' )
def lowercase_ ( self ) -> List[Any]:
"""simple docstring"""
import faiss
_lowercase: Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='''vecs''' , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT , )
_lowercase , _lowercase: Dict = dset.get_nearest_examples('''vecs''' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['''filename'''][0] , '''my_name-train_29''' )
def lowercase_ ( self ) -> List[str]:
"""simple docstring"""
import faiss
_lowercase: Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='''vecs''' , metric_type=faiss.METRIC_INNER_PRODUCT , )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=A_ ) as tmp_file:
dset.save_faiss_index('''vecs''' , tmp_file.name )
dset.load_faiss_index('''vecs2''' , tmp_file.name )
os.unlink(tmp_file.name )
_lowercase , _lowercase: Dict = dset.get_nearest_examples('''vecs2''' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['''filename'''][0] , '''my_name-train_29''' )
def lowercase_ ( self ) -> int:
"""simple docstring"""
_lowercase: Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='''vecs''' )
dset.drop_index('''vecs''' )
self.assertRaises(A_ , partial(dset.get_nearest_examples , '''vecs2''' , np.ones(5 , dtype=np.floataa ) ) )
def lowercase_ ( self ) -> List[Any]:
"""simple docstring"""
from elasticsearch import Elasticsearch
_lowercase: Dataset = self._create_dummy_dataset()
with patch('''elasticsearch.Elasticsearch.search''' ) as mocked_search, patch(
'''elasticsearch.client.IndicesClient.create''' ) as mocked_index_create, patch('''elasticsearch.helpers.streaming_bulk''' ) as mocked_bulk:
_lowercase: List[Any] = {'''acknowledged''': True}
mocked_bulk.return_value([(True, None)] * 30 )
_lowercase: List[str] = {'''hits''': {'''hits''': [{'''_score''': 1, '''_id''': 29}]}}
_lowercase: int = Elasticsearch()
dset.add_elasticsearch_index('''filename''' , es_client=A_ )
_lowercase , _lowercase: Dict = dset.get_nearest_examples('''filename''' , '''my_name-train_29''' )
self.assertEqual(examples['''filename'''][0] , '''my_name-train_29''' )
@require_faiss
class __magic_name__ ( SCREAMING_SNAKE_CASE__ ):
def lowercase_ ( self ) -> Any:
"""simple docstring"""
import faiss
_lowercase: str = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
# add vectors
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsNotNone(index.faiss_index )
self.assertEqual(index.faiss_index.ntotal , 5 )
index.add_vectors(np.zeros((5, 5) , dtype=np.floataa ) )
self.assertEqual(index.faiss_index.ntotal , 10 )
# single query
_lowercase: List[Any] = np.zeros(5 , dtype=np.floataa )
_lowercase: int = 1
_lowercase , _lowercase: Optional[Any] = index.search(A_ )
self.assertRaises(A_ , index.search , query.reshape(-1 , 1 ) )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
# batched queries
_lowercase: Tuple = np.eye(5 , dtype=np.floataa )[::-1]
_lowercase , _lowercase: str = index.search_batch(A_ )
self.assertRaises(A_ , index.search_batch , queries[0] )
_lowercase: Tuple = [scores[0] for scores in total_scores]
_lowercase: Union[str, Any] = [indices[0] for indices in total_indices]
self.assertGreater(np.min(A_ ) , 0 )
self.assertListEqual([4, 3, 2, 1, 0] , A_ )
def lowercase_ ( self ) -> Union[str, Any]:
"""simple docstring"""
import faiss
_lowercase: Union[str, Any] = FaissIndex(string_factory='''Flat''' )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
_lowercase: Dict = FaissIndex(string_factory='''LSH''' )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexLSH )
with self.assertRaises(A_ ):
_lowercase: List[str] = FaissIndex(string_factory='''Flat''' , custom_index=faiss.IndexFlat(5 ) )
def lowercase_ ( self ) -> List[str]:
"""simple docstring"""
import faiss
_lowercase: Any = faiss.IndexFlat(5 )
_lowercase: List[Any] = FaissIndex(custom_index=A_ )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
def lowercase_ ( self ) -> str:
"""simple docstring"""
import faiss
_lowercase: Tuple = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=A_ ) as tmp_file:
index.save(tmp_file.name )
_lowercase: Optional[Any] = FaissIndex.load(tmp_file.name )
os.unlink(tmp_file.name )
_lowercase: Optional[Any] = np.zeros(5 , dtype=np.floataa )
_lowercase: Union[str, Any] = 1
_lowercase , _lowercase: Tuple = index.search(A_ )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
@require_faiss
def _lowerCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
import faiss
_lowercase: Dict = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
_lowercase: Tuple = '''index.faiss'''
_lowercase: str = f'''mock://{index_name}'''
index.save(_UpperCamelCase , storage_options=mockfs.storage_options )
_lowercase: List[Any] = FaissIndex.load(_UpperCamelCase , storage_options=mockfs.storage_options )
_lowercase: Union[str, Any] = np.zeros(5 , dtype=np.floataa )
_lowercase: Dict = 1
_lowercase , _lowercase: str = index.search(_UpperCamelCase )
assert scores[0] > 0
assert indices[0] == 1
@require_elasticsearch
class __magic_name__ ( SCREAMING_SNAKE_CASE__ ):
def lowercase_ ( self ) -> int:
"""simple docstring"""
from elasticsearch import Elasticsearch
with patch('''elasticsearch.Elasticsearch.search''' ) as mocked_search, patch(
'''elasticsearch.client.IndicesClient.create''' ) as mocked_index_create, patch('''elasticsearch.helpers.streaming_bulk''' ) as mocked_bulk:
_lowercase: int = Elasticsearch()
_lowercase: Tuple = {'''acknowledged''': True}
_lowercase: Tuple = ElasticSearchIndex(es_client=A_ )
mocked_bulk.return_value([(True, None)] * 3 )
index.add_documents(['''foo''', '''bar''', '''foobar'''] )
# single query
_lowercase: Dict = '''foo'''
_lowercase: Union[str, Any] = {'''hits''': {'''hits''': [{'''_score''': 1, '''_id''': 0}]}}
_lowercase , _lowercase: Optional[Any] = index.search(A_ )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# single query with timeout
_lowercase: Optional[int] = '''foo'''
_lowercase: Union[str, Any] = {'''hits''': {'''hits''': [{'''_score''': 1, '''_id''': 0}]}}
_lowercase , _lowercase: List[Any] = index.search(A_ , request_timeout=30 )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# batched queries
_lowercase: Union[str, Any] = ['''foo''', '''bar''', '''foobar''']
_lowercase: str = {'''hits''': {'''hits''': [{'''_score''': 1, '''_id''': 1}]}}
_lowercase , _lowercase: Optional[int] = index.search_batch(A_ )
_lowercase: Any = [scores[0] for scores in total_scores]
_lowercase: List[str] = [indices[0] for indices in total_indices]
self.assertGreater(np.min(A_ ) , 0 )
self.assertListEqual([1, 1, 1] , A_ )
# batched queries with timeout
_lowercase: List[str] = ['''foo''', '''bar''', '''foobar''']
_lowercase: Dict = {'''hits''': {'''hits''': [{'''_score''': 1, '''_id''': 1}]}}
_lowercase , _lowercase: Optional[int] = index.search_batch(A_ , request_timeout=30 )
_lowercase: Optional[Any] = [scores[0] for scores in total_scores]
_lowercase: Dict = [indices[0] for indices in total_indices]
self.assertGreater(np.min(A_ ) , 0 )
self.assertListEqual([1, 1, 1] , A_ )
| 272 | 1 |
'''simple docstring'''
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
# and perform gradient accumulation
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
SCREAMING_SNAKE_CASE_: Union[str, Any] =16
SCREAMING_SNAKE_CASE_: List[str] =32
def lowerCAmelCase_ ( snake_case_ : Accelerator , snake_case_ : int = 16 ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ = AutoTokenizer.from_pretrained("bert-base-cased" )
UpperCAmelCase_ = load_dataset("glue" , "mrpc" )
def tokenize_function(snake_case_ : List[Any] ):
# max_length=None => use the model max length (it's actually the default)
UpperCAmelCase_ = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=snake_case_ , max_length=snake_case_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
UpperCAmelCase_ = datasets.map(
snake_case_ , batched=snake_case_ , remove_columns=["idx", "sentence1", "sentence2"] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
UpperCAmelCase_ = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(snake_case_ : List[Any] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
UpperCAmelCase_ = 1_28 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
UpperCAmelCase_ = 16
elif accelerator.mixed_precision != "no":
UpperCAmelCase_ = 8
else:
UpperCAmelCase_ = None
return tokenizer.pad(
snake_case_ , padding="longest" , max_length=snake_case_ , pad_to_multiple_of=snake_case_ , return_tensors="pt" , )
# Instantiate dataloaders.
UpperCAmelCase_ = DataLoader(
tokenized_datasets["train"] , shuffle=snake_case_ , collate_fn=snake_case_ , batch_size=snake_case_ )
UpperCAmelCase_ = DataLoader(
tokenized_datasets["validation"] , shuffle=snake_case_ , collate_fn=snake_case_ , batch_size=snake_case_ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
SCREAMING_SNAKE_CASE_: Tuple =mocked_dataloaders # noqa: F811
def lowerCAmelCase_ ( snake_case_ : Union[str, Any] , snake_case_ : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
if os.environ.get("TESTING_MOCKED_DATALOADERS" , snake_case_ ) == "1":
UpperCAmelCase_ = 2
# New Code #
UpperCAmelCase_ = int(args.gradient_accumulation_steps )
# Initialize accelerator
UpperCAmelCase_ = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=snake_case_ )
if accelerator.distributed_type == DistributedType.TPU and gradient_accumulation_steps > 1:
raise NotImplementedError(
"Gradient accumulation on TPUs is currently not supported. Pass `gradient_accumulation_steps=1`" )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCAmelCase_ = config["lr"]
UpperCAmelCase_ = int(config["num_epochs"] )
UpperCAmelCase_ = int(config["seed"] )
UpperCAmelCase_ = int(config["batch_size"] )
UpperCAmelCase_ = evaluate.load("glue" , "mrpc" )
set_seed(snake_case_ )
UpperCAmelCase_ , UpperCAmelCase_ = get_dataloaders(snake_case_ , snake_case_ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCAmelCase_ = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=snake_case_ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
UpperCAmelCase_ = model.to(accelerator.device )
# Instantiate optimizer
UpperCAmelCase_ = AdamW(params=model.parameters() , lr=snake_case_ )
# Instantiate scheduler
UpperCAmelCase_ = get_linear_schedule_with_warmup(
optimizer=snake_case_ , num_warmup_steps=1_00 , num_training_steps=(len(snake_case_ ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = accelerator.prepare(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
# Now we train the model
for epoch in range(snake_case_ ):
model.train()
for step, batch in enumerate(snake_case_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(snake_case_ ):
UpperCAmelCase_ = model(**snake_case_ )
UpperCAmelCase_ = output.loss
accelerator.backward(snake_case_ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(snake_case_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
UpperCAmelCase_ = model(**snake_case_ )
UpperCAmelCase_ = outputs.logits.argmax(dim=-1 )
UpperCAmelCase_ , UpperCAmelCase_ = accelerator.gather_for_metrics((predictions, batch["labels"]) )
metric.add_batch(
predictions=snake_case_ , references=snake_case_ , )
UpperCAmelCase_ = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"""epoch {epoch}:""" , snake_case_ )
def lowerCAmelCase_ ( ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument(
"--mixed_precision" , type=snake_case_ , default=snake_case_ , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
# New Code #
parser.add_argument(
"--gradient_accumulation_steps" , type=snake_case_ , default=1 , help="The number of minibatches to be ran before gradients are accumulated." , )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." )
UpperCAmelCase_ = parser.parse_args()
UpperCAmelCase_ = {"lr": 2E-5, "num_epochs": 3, "seed": 42, "batch_size": 16}
training_function(snake_case_ , snake_case_ )
if __name__ == "__main__":
main()
| 78 |
'''simple docstring'''
import argparse
import collections
import numpy as np
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import MTaConfig, UMTaEncoderModel, UMTaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Dict:
return params[f"""{prefix}/{prefix}/relpos_bias/rel_embedding"""][:, i, :]
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_="attention" ) -> int:
_a : int = np.ascontiguousarray(params[f"""{prefix}/{prefix}/{layer_name}/key/kernel"""][:, i, :, :] )
_a : Dict = k_tmp.reshape(k_tmp.shape[0] , k_tmp.shape[1] * k_tmp.shape[2] )
_a : Any = np.ascontiguousarray(params[f"""{prefix}/{prefix}/{layer_name}/out/kernel"""][:, i, :, :] )
_a : int = o_tmp.reshape(o_tmp.shape[0] * o_tmp.shape[1] , o_tmp.shape[2] )
_a : List[str] = np.ascontiguousarray(params[f"""{prefix}/{prefix}/{layer_name}/query/kernel"""][:, i, :, :] )
_a : Dict = q_tmp.reshape(q_tmp.shape[0] , q_tmp.shape[1] * q_tmp.shape[2] )
_a : List[str] = np.ascontiguousarray(params[f"""{prefix}/{prefix}/{layer_name}/value/kernel"""][:, i, :, :] )
_a : int = v_tmp.reshape(v_tmp.shape[0] , v_tmp.shape[1] * v_tmp.shape[2] )
return k, o, q, v
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=False ) -> Optional[Any]:
if split_mlp_wi:
_a : Dict = params[f"""{prefix}/{prefix}/mlp/wi_0/kernel"""][:, i, :]
_a : Dict = params[f"""{prefix}/{prefix}/mlp/wi_1/kernel"""][:, i, :]
_a : Optional[int] = (wi_a, wi_a)
else:
_a : Tuple = params[f"""{prefix}/{prefix}/mlp/wi/kernel"""][:, i, :]
_a : Tuple = params[f"""{prefix}/{prefix}/mlp/wo/kernel"""][:, i, :]
return wi, wo
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Any:
return params[f"""{prefix}/{prefix}/{layer_name}/scale"""][:, i]
def __lowerCamelCase ( lowerCAmelCase_ , *, lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = False ) -> Any:
_a : Dict = traverse_util.flatten_dict(variables['target'] )
_a : Tuple = {'/'.join(lowerCAmelCase_ ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
_a : int = 'encoder/encoder/mlp/wi_0/kernel' in old
print('Split MLP:' , lowerCAmelCase_ )
_a : str = collections.OrderedDict()
# Shared embeddings.
_a : List[str] = old['token_embedder/embedding']
# Encoder.
for i in range(lowerCAmelCase_ ):
# Block i, layer 0 (Self Attention).
_a : Any = tax_layer_norm_lookup(lowerCAmelCase_ , lowerCAmelCase_ , 'encoder' , 'pre_attention_layer_norm' )
_a , _a , _a , _a : List[str] = tax_attention_lookup(lowerCAmelCase_ , lowerCAmelCase_ , 'encoder' , 'attention' )
_a : str = layer_norm
_a : Union[str, Any] = k.T
_a : Dict = o.T
_a : int = q.T
_a : List[str] = v.T
# Block i, layer 1 (MLP).
_a : Optional[int] = tax_layer_norm_lookup(lowerCAmelCase_ , lowerCAmelCase_ , 'encoder' , 'pre_mlp_layer_norm' )
_a , _a : Any = tax_mlp_lookup(lowerCAmelCase_ , lowerCAmelCase_ , 'encoder' , lowerCAmelCase_ )
_a : Optional[Any] = layer_norm
if split_mlp_wi:
_a : Tuple = wi[0].T
_a : List[str] = wi[1].T
else:
_a : Union[str, Any] = wi.T
_a : Any = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
_a : Any = tax_relpos_bias_lookup(
lowerCAmelCase_ , lowerCAmelCase_ , 'encoder' ).T
_a : Optional[Any] = old['encoder/encoder_norm/scale']
if not scalable_attention:
_a : Union[str, Any] = tax_relpos_bias_lookup(
lowerCAmelCase_ , 0 , 'encoder' ).T
_a : Any = tax_relpos_bias_lookup(
lowerCAmelCase_ , 0 , 'decoder' ).T
if not is_encoder_only:
# Decoder.
for i in range(lowerCAmelCase_ ):
# Block i, layer 0 (Self Attention).
_a : Dict = tax_layer_norm_lookup(lowerCAmelCase_ , lowerCAmelCase_ , 'decoder' , 'pre_self_attention_layer_norm' )
_a , _a , _a , _a : Union[str, Any] = tax_attention_lookup(lowerCAmelCase_ , lowerCAmelCase_ , 'decoder' , 'self_attention' )
_a : str = layer_norm
_a : List[Any] = k.T
_a : Union[str, Any] = o.T
_a : int = q.T
_a : List[Any] = v.T
# Block i, layer 1 (Cross Attention).
_a : List[Any] = tax_layer_norm_lookup(lowerCAmelCase_ , lowerCAmelCase_ , 'decoder' , 'pre_cross_attention_layer_norm' )
_a , _a , _a , _a : Optional[int] = tax_attention_lookup(lowerCAmelCase_ , lowerCAmelCase_ , 'decoder' , 'encoder_decoder_attention' )
_a : str = layer_norm
_a : Union[str, Any] = k.T
_a : Union[str, Any] = o.T
_a : Any = q.T
_a : str = v.T
# Block i, layer 2 (MLP).
_a : str = tax_layer_norm_lookup(lowerCAmelCase_ , lowerCAmelCase_ , 'decoder' , 'pre_mlp_layer_norm' )
_a , _a : int = tax_mlp_lookup(lowerCAmelCase_ , lowerCAmelCase_ , 'decoder' , lowerCAmelCase_ )
_a : List[str] = layer_norm
if split_mlp_wi:
_a : List[str] = wi[0].T
_a : Union[str, Any] = wi[1].T
else:
_a : Optional[Any] = wi.T
_a : List[str] = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
_a : int = tax_relpos_bias_lookup(lowerCAmelCase_ , lowerCAmelCase_ , 'decoder' ).T
_a : List[Any] = old['decoder/decoder_norm/scale']
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
_a : List[Any] = old['decoder/logits_dense/kernel'].T
return new
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> str:
_a : List[str] = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
_a : Tuple = state_dict['shared.weight']
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
_a : Union[str, Any] = state_dict['shared.weight']
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print('Using shared word embeddings as lm_head.' )
_a : Optional[Any] = state_dict['shared.weight']
return state_dict
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> int:
_a : Any = checkpoints.load_tax_checkpoint(lowerCAmelCase_ )
_a : str = convert_tax_to_pytorch(
lowerCAmelCase_ , num_layers=config.num_layers , is_encoder_only=lowerCAmelCase_ , scalable_attention=lowerCAmelCase_ )
_a : Dict = make_state_dict(lowerCAmelCase_ , lowerCAmelCase_ )
model.load_state_dict(lowerCAmelCase_ , strict=lowerCAmelCase_ )
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = False , lowerCAmelCase_ = False , ) -> str:
_a : int = MTaConfig.from_json_file(lowerCAmelCase_ )
print(f"""Building PyTorch model from configuration: {config}""" )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
_a : Any = UMTaEncoderModel(lowerCAmelCase_ )
else:
_a : Optional[int] = UMTaForConditionalGeneration(lowerCAmelCase_ )
# Load weights from tf checkpoint
load_tax_weights_in_ta(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(lowerCAmelCase_ )
# Verify that we can load the checkpoint.
model.from_pretrained(lowerCAmelCase_ )
print('Done' )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser(description='''Converts a native T5X checkpoint into a PyTorch checkpoint.''')
# Required parameters
parser.add_argument(
'''--t5x_checkpoint_path''', default=None, type=str, required=True, help='''Path to the T5X checkpoint.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--is_encoder_only''', action='''store_true''', help='''Check if the model is encoder-decoder model''', default=False
)
parser.add_argument(
'''--scalable_attention''',
action='''store_true''',
help='''Whether the model uses scaled attention (umt5 model)''',
default=False,
)
__lowerCAmelCase = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path,
args.config_file,
args.pytorch_dump_path,
args.is_encoder_only,
args.scalable_attention,
)
| 358 | 0 |
"""simple docstring"""
import argparse
import glob
import logging
import os
import sys
import time
from collections import defaultdict
from pathlib import Path
from typing import Dict, List, Tuple
import numpy as np
import pytorch_lightning as pl
import torch
from callbacks import SeqaSeqLoggingCallback, get_checkpoint_callback, get_early_stopping_callback
from torch import nn
from torch.utils.data import DataLoader
from transformers import MBartTokenizer, TaForConditionalGeneration
from transformers.models.bart.modeling_bart import shift_tokens_right
from utils import (
ROUGE_KEYS,
LegacySeqaSeqDataset,
SeqaSeqDataset,
assert_all_frozen,
calculate_bleu,
calculate_rouge,
check_output_dir,
flatten_list,
freeze_embeds,
freeze_params,
get_git_info,
label_smoothed_nll_loss,
lmap,
pickle_save,
save_git_info,
save_json,
use_task_specific_params,
)
# need the parent dir module
sys.path.insert(2, str(Path(__file__).resolve().parents[1]))
from lightning_base import BaseTransformer, add_generic_args, generic_train # noqa
lowercase__ : List[str] = logging.getLogger(__name__)
class UpperCamelCase__ ( a__ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = "summarization"
_SCREAMING_SNAKE_CASE = ["loss"]
_SCREAMING_SNAKE_CASE = ROUGE_KEYS
_SCREAMING_SNAKE_CASE = "rouge2"
def __init__( self : Tuple , SCREAMING_SNAKE_CASE_ : int , **SCREAMING_SNAKE_CASE_ : List[Any] ):
if hparams.sortish_sampler and hparams.gpus > 1:
lowerCAmelCase_ : Optional[Any] = False
elif hparams.max_tokens_per_batch is not None:
if hparams.gpus > 1:
raise NotImplementedError('Dynamic Batch size does not work for multi-gpu training' )
if hparams.sortish_sampler:
raise ValueError('--sortish_sampler and --max_tokens_per_batch may not be used simultaneously' )
super().__init__(lowerCamelCase_ , num_labels=lowerCamelCase_ , mode=self.mode , **lowerCamelCase_ )
use_task_specific_params(self.model , 'summarization' )
save_git_info(self.hparams.output_dir )
lowerCAmelCase_ : str = Path(self.output_dir ) / 'metrics.json'
lowerCAmelCase_ : Dict = Path(self.output_dir ) / 'hparams.pkl'
pickle_save(self.hparams , self.hparams_save_path )
lowerCAmelCase_ : int = 0
lowerCAmelCase_ : int = defaultdict(lowerCamelCase_ )
lowerCAmelCase_ : Tuple = self.config.model_type
lowerCAmelCase_ : str = self.config.tgt_vocab_size if self.model_type == 'fsmt' else self.config.vocab_size
lowerCAmelCase_ : List[str] = {
'data_dir': self.hparams.data_dir,
'max_source_length': self.hparams.max_source_length,
'prefix': self.model.config.prefix or '',
}
lowerCAmelCase_ : str = {
'train': self.hparams.n_train,
'val': self.hparams.n_val,
'test': self.hparams.n_test,
}
lowerCAmelCase_ : Tuple = {k: v if v >= 0 else None for k, v in n_observations_per_split.items()}
lowerCAmelCase_ : Union[str, Any] = {
'train': self.hparams.max_target_length,
'val': self.hparams.val_max_target_length,
'test': self.hparams.test_max_target_length,
}
assert self.target_lens["train"] <= self.target_lens["val"], F"target_lens: {self.target_lens}"
assert self.target_lens["train"] <= self.target_lens["test"], F"target_lens: {self.target_lens}"
if self.hparams.freeze_embeds:
freeze_embeds(self.model )
if self.hparams.freeze_encoder:
freeze_params(self.model.get_encoder() )
assert_all_frozen(self.model.get_encoder() )
lowerCAmelCase_ : Any = get_git_info()['repo_sha']
lowerCAmelCase_ : Optional[int] = hparams.num_workers
lowerCAmelCase_ : str = None # default to config
if self.model.config.decoder_start_token_id is None and isinstance(self.tokenizer , lowerCamelCase_ ):
lowerCAmelCase_ : List[str] = self.tokenizer.lang_code_to_id[hparams.tgt_lang]
lowerCAmelCase_ : Dict = self.decoder_start_token_id
lowerCAmelCase_ : List[Any] = (
SeqaSeqDataset if hasattr(self.tokenizer , 'prepare_seq2seq_batch' ) else LegacySeqaSeqDataset
)
lowerCAmelCase_ : Optional[int] = False
lowerCAmelCase_ : Optional[Any] = self.model.config.num_beams if self.hparams.eval_beams is None else self.hparams.eval_beams
if self.hparams.eval_max_gen_length is not None:
lowerCAmelCase_ : List[str] = self.hparams.eval_max_gen_length
else:
lowerCAmelCase_ : Optional[Any] = self.model.config.max_length
lowerCAmelCase_ : List[str] = self.default_val_metric if self.hparams.val_metric is None else self.hparams.val_metric
def SCREAMING_SNAKE_CASE__ ( self : int , SCREAMING_SNAKE_CASE_ : str ):
lowerCAmelCase_ : Tuple = {
k: self.tokenizer.batch_decode(v.tolist() ) if 'mask' not in k else v.shape for k, v in batch.items()
}
save_json(lowerCamelCase_ , Path(self.output_dir ) / 'text_batch.json' )
save_json({k: v.tolist() for k, v in batch.items()} , Path(self.output_dir ) / 'tok_batch.json' )
lowerCAmelCase_ : str = True
return readable_batch
def SCREAMING_SNAKE_CASE__ ( self : str , SCREAMING_SNAKE_CASE_ : Any , **SCREAMING_SNAKE_CASE_ : Any ):
return self.model(lowerCamelCase_ , **lowerCamelCase_ )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , SCREAMING_SNAKE_CASE_ : Optional[int] ):
lowerCAmelCase_ : List[Any] = self.tokenizer.batch_decode(
lowerCamelCase_ , skip_special_tokens=lowerCamelCase_ , clean_up_tokenization_spaces=lowerCamelCase_ )
return lmap(str.strip , lowerCamelCase_ )
def SCREAMING_SNAKE_CASE__ ( self : int , SCREAMING_SNAKE_CASE_ : str ):
lowerCAmelCase_ : Tuple = self.tokenizer.pad_token_id
lowerCAmelCase_ ,lowerCAmelCase_ : Union[str, Any] = batch['input_ids'], batch['attention_mask']
lowerCAmelCase_ : List[str] = batch['labels']
if isinstance(self.model , lowerCamelCase_ ):
lowerCAmelCase_ : List[Any] = self.model._shift_right(lowerCamelCase_ )
else:
lowerCAmelCase_ : Union[str, Any] = shift_tokens_right(lowerCamelCase_ , lowerCamelCase_ )
if not self.already_saved_batch: # This would be slightly better if it only happened on rank zero
lowerCAmelCase_ : List[Any] = decoder_input_ids
self.save_readable_batch(lowerCamelCase_ )
lowerCAmelCase_ : Tuple = self(lowerCamelCase_ , attention_mask=lowerCamelCase_ , decoder_input_ids=lowerCamelCase_ , use_cache=lowerCamelCase_ )
lowerCAmelCase_ : Any = outputs['logits']
if self.hparams.label_smoothing == 0:
# Same behavior as modeling_bart.py, besides ignoring pad_token_id
lowerCAmelCase_ : Dict = nn.CrossEntropyLoss(ignore_index=lowerCamelCase_ )
assert lm_logits.shape[-1] == self.vocab_size
lowerCAmelCase_ : Optional[int] = ce_loss_fct(lm_logits.view(-1 , lm_logits.shape[-1] ) , tgt_ids.view(-1 ) )
else:
lowerCAmelCase_ : List[Any] = nn.functional.log_softmax(lowerCamelCase_ , dim=-1 )
lowerCAmelCase_ ,lowerCAmelCase_ : str = label_smoothed_nll_loss(
lowerCamelCase_ , lowerCamelCase_ , self.hparams.label_smoothing , ignore_index=lowerCamelCase_ )
return (loss,)
@property
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
return self.tokenizer.pad_token_id
def SCREAMING_SNAKE_CASE__ ( self : str , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Optional[Any] ):
lowerCAmelCase_ : Tuple = self._step(lowerCamelCase_ )
lowerCAmelCase_ : int = dict(zip(self.loss_names , lowerCamelCase_ ) )
# tokens per batch
lowerCAmelCase_ : int = batch['input_ids'].ne(self.pad ).sum() + batch['labels'].ne(self.pad ).sum()
lowerCAmelCase_ : Optional[int] = batch['input_ids'].shape[0]
lowerCAmelCase_ : int = batch['input_ids'].eq(self.pad ).sum()
lowerCAmelCase_ : Optional[int] = batch['input_ids'].eq(self.pad ).float().mean()
# TODO(SS): make a wandb summary metric for this
return {"loss": loss_tensors[0], "log": logs}
def SCREAMING_SNAKE_CASE__ ( self : Any , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : str ):
return self._generative_step(lowerCamelCase_ )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : List[str]="val" ):
self.step_count += 1
lowerCAmelCase_ : Dict = {k: torch.stack([x[k] for x in outputs] ).mean() for k in self.loss_names}
lowerCAmelCase_ : str = losses['loss']
lowerCAmelCase_ : List[Any] = {
k: np.array([x[k] for x in outputs] ).mean() for k in self.metric_names + ['gen_time', 'gen_len']
}
lowerCAmelCase_ : int = (
generative_metrics[self.val_metric] if self.val_metric in generative_metrics else losses[self.val_metric]
)
lowerCAmelCase_ : Any = torch.tensor(lowerCamelCase_ ).type_as(lowerCamelCase_ )
generative_metrics.update({k: v.item() for k, v in losses.items()} )
losses.update(lowerCamelCase_ )
lowerCAmelCase_ : List[str] = {F"{prefix}_avg_{k}": x for k, x in losses.items()}
lowerCAmelCase_ : Any = self.step_count
self.metrics[prefix].append(lowerCamelCase_ ) # callback writes this to self.metrics_save_path
lowerCAmelCase_ : Union[str, Any] = flatten_list([x['preds'] for x in outputs] )
return {
"log": all_metrics,
"preds": preds,
F"{prefix}_loss": loss,
F"{prefix}_{self.val_metric}": metric_tensor,
}
def SCREAMING_SNAKE_CASE__ ( self : Dict , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Tuple ):
return calculate_rouge(lowerCamelCase_ , lowerCamelCase_ )
def SCREAMING_SNAKE_CASE__ ( self : int , SCREAMING_SNAKE_CASE_ : Optional[int] ):
lowerCAmelCase_ : Union[str, Any] = time.time()
# parser.add_argument('--eval_max_gen_length', type=int, default=None, help='never generate more than n tokens')
lowerCAmelCase_ : Union[str, Any] = self.model.generate(
batch['input_ids'] , attention_mask=batch['attention_mask'] , use_cache=lowerCamelCase_ , decoder_start_token_id=self.decoder_start_token_id , num_beams=self.eval_beams , max_length=self.eval_max_length , )
lowerCAmelCase_ : Tuple = (time.time() - ta) / batch['input_ids'].shape[0]
lowerCAmelCase_ : List[Any] = self.ids_to_clean_text(lowerCamelCase_ )
lowerCAmelCase_ : Any = self.ids_to_clean_text(batch['labels'] )
lowerCAmelCase_ : List[Any] = self._step(lowerCamelCase_ )
lowerCAmelCase_ : Optional[int] = dict(zip(self.loss_names , lowerCamelCase_ ) )
lowerCAmelCase_ : str = self.calc_generative_metrics(lowerCamelCase_ , lowerCamelCase_ )
lowerCAmelCase_ : Optional[Any] = np.mean(lmap(lowerCamelCase_ , lowerCamelCase_ ) )
base_metrics.update(gen_time=lowerCamelCase_ , gen_len=lowerCamelCase_ , preds=lowerCamelCase_ , target=lowerCamelCase_ , **lowerCamelCase_ )
return base_metrics
def SCREAMING_SNAKE_CASE__ ( self : Any , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : List[str] ):
return self._generative_step(lowerCamelCase_ )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : List[str] ):
return self.validation_epoch_end(lowerCamelCase_ , prefix='test' )
def SCREAMING_SNAKE_CASE__ ( self : Tuple , SCREAMING_SNAKE_CASE_ : List[Any] ):
lowerCAmelCase_ : Dict = self.n_obs[type_path]
lowerCAmelCase_ : str = self.target_lens[type_path]
lowerCAmelCase_ : Dict = self.dataset_class(
self.tokenizer , type_path=lowerCamelCase_ , n_obs=lowerCamelCase_ , max_target_length=lowerCamelCase_ , **self.dataset_kwargs , )
return dataset
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : str = False ):
lowerCAmelCase_ : Union[str, Any] = self.get_dataset(lowerCamelCase_ )
if self.hparams.sortish_sampler and type_path != "test" and type_path != "val":
lowerCAmelCase_ : Any = dataset.make_sortish_sampler(lowerCamelCase_ , distributed=self.hparams.gpus > 1 )
return DataLoader(
lowerCamelCase_ , batch_size=lowerCamelCase_ , collate_fn=dataset.collate_fn , shuffle=lowerCamelCase_ , num_workers=self.num_workers , sampler=lowerCamelCase_ , )
elif self.hparams.max_tokens_per_batch is not None and type_path != "test" and type_path != "val":
lowerCAmelCase_ : Any = dataset.make_dynamic_sampler(
self.hparams.max_tokens_per_batch , distributed=self.hparams.gpus > 1 )
return DataLoader(
lowerCamelCase_ , batch_sampler=lowerCamelCase_ , collate_fn=dataset.collate_fn , num_workers=self.num_workers , )
else:
return DataLoader(
lowerCamelCase_ , batch_size=lowerCamelCase_ , collate_fn=dataset.collate_fn , shuffle=lowerCamelCase_ , num_workers=self.num_workers , sampler=lowerCamelCase_ , )
def SCREAMING_SNAKE_CASE__ ( self : Any ):
lowerCAmelCase_ : Union[str, Any] = self.get_dataloader('train' , batch_size=self.hparams.train_batch_size , shuffle=lowerCamelCase_ )
return dataloader
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
return self.get_dataloader('val' , batch_size=self.hparams.eval_batch_size )
def SCREAMING_SNAKE_CASE__ ( self : str ):
return self.get_dataloader('test' , batch_size=self.hparams.eval_batch_size )
@staticmethod
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[int] ):
BaseTransformer.add_model_specific_args(lowerCamelCase_ , lowerCamelCase_ )
add_generic_args(lowerCamelCase_ , lowerCamelCase_ )
parser.add_argument(
'--max_source_length' , default=1_0_2_4 , type=lowerCamelCase_ , help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
) , )
parser.add_argument(
'--max_target_length' , default=5_6 , type=lowerCamelCase_ , help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
) , )
parser.add_argument(
'--val_max_target_length' , default=1_4_2 , type=lowerCamelCase_ , help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
) , )
parser.add_argument(
'--test_max_target_length' , default=1_4_2 , type=lowerCamelCase_ , help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
) , )
parser.add_argument('--freeze_encoder' , action='store_true' )
parser.add_argument('--freeze_embeds' , action='store_true' )
parser.add_argument('--sortish_sampler' , action='store_true' , default=lowerCamelCase_ )
parser.add_argument('--overwrite_output_dir' , action='store_true' , default=lowerCamelCase_ )
parser.add_argument('--max_tokens_per_batch' , type=lowerCamelCase_ , default=lowerCamelCase_ )
parser.add_argument('--logger_name' , type=lowerCamelCase_ , choices=['default', 'wandb', 'wandb_shared'] , default='default' )
parser.add_argument('--n_train' , type=lowerCamelCase_ , default=-1 , required=lowerCamelCase_ , help='# examples. -1 means use all.' )
parser.add_argument('--n_val' , type=lowerCamelCase_ , default=5_0_0 , required=lowerCamelCase_ , help='# examples. -1 means use all.' )
parser.add_argument('--n_test' , type=lowerCamelCase_ , default=-1 , required=lowerCamelCase_ , help='# examples. -1 means use all.' )
parser.add_argument(
'--task' , type=lowerCamelCase_ , default='summarization' , required=lowerCamelCase_ , help='# examples. -1 means use all.' )
parser.add_argument('--label_smoothing' , type=lowerCamelCase_ , default=0.0 , required=lowerCamelCase_ )
parser.add_argument('--src_lang' , type=lowerCamelCase_ , default='' , required=lowerCamelCase_ )
parser.add_argument('--tgt_lang' , type=lowerCamelCase_ , default='' , required=lowerCamelCase_ )
parser.add_argument('--eval_beams' , type=lowerCamelCase_ , default=lowerCamelCase_ , required=lowerCamelCase_ )
parser.add_argument(
'--val_metric' , type=lowerCamelCase_ , default=lowerCamelCase_ , required=lowerCamelCase_ , choices=['bleu', 'rouge2', 'loss', None] )
parser.add_argument('--eval_max_gen_length' , type=lowerCamelCase_ , default=lowerCamelCase_ , help='never generate more than n tokens' )
parser.add_argument('--save_top_k' , type=lowerCamelCase_ , default=1 , required=lowerCamelCase_ , help='How many checkpoints to save' )
parser.add_argument(
'--early_stopping_patience' , type=lowerCamelCase_ , default=-1 , required=lowerCamelCase_ , help=(
'-1 means never early stop. early_stopping_patience is measured in validation checks, not epochs. So'
' val_check_interval will effect it.'
) , )
return parser
class UpperCamelCase__ ( a__ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = "translation"
_SCREAMING_SNAKE_CASE = ["loss"]
_SCREAMING_SNAKE_CASE = ["bleu"]
_SCREAMING_SNAKE_CASE = "bleu"
def __init__( self : Any , SCREAMING_SNAKE_CASE_ : List[Any] , **SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
super().__init__(lowerCamelCase_ , **lowerCamelCase_ )
lowerCAmelCase_ : Tuple = hparams.src_lang
lowerCAmelCase_ : str = hparams.tgt_lang
def SCREAMING_SNAKE_CASE__ ( self : str , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
return calculate_bleu(lowerCamelCase_ , lowerCamelCase_ )
def lowercase ( lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Union[str, Any]=None ) -> SummarizationModule:
"""simple docstring"""
Path(args.output_dir ).mkdir(exist_ok=lowerCAmelCase__ )
check_output_dir(lowerCAmelCase__ , expected_items=3 )
if model is None:
if "summarization" in args.task:
lowerCAmelCase_ : List[str] = SummarizationModule(lowerCAmelCase__ )
else:
lowerCAmelCase_ : Optional[int] = TranslationModule(lowerCAmelCase__ )
lowerCAmelCase_ : Any = Path(args.data_dir ).name
if (
args.logger_name == "default"
or args.fast_dev_run
or str(args.output_dir ).startswith('/tmp' )
or str(args.output_dir ).startswith('/var' )
):
lowerCAmelCase_ : List[Any] = True # don't pollute wandb logs unnecessarily
elif args.logger_name == "wandb":
from pytorch_lightning.loggers import WandbLogger
lowerCAmelCase_ : List[Any] = os.environ.get('WANDB_PROJECT' , lowerCAmelCase__ )
lowerCAmelCase_ : int = WandbLogger(name=model.output_dir.name , project=lowerCAmelCase__ )
elif args.logger_name == "wandb_shared":
from pytorch_lightning.loggers import WandbLogger
lowerCAmelCase_ : Dict = WandbLogger(name=model.output_dir.name , project=f"hf_{dataset}" )
if args.early_stopping_patience >= 0:
lowerCAmelCase_ : int = get_early_stopping_callback(model.val_metric , args.early_stopping_patience )
else:
lowerCAmelCase_ : Dict = False
lowerCAmelCase_ : str = args.val_metric == 'loss'
lowerCAmelCase_ : Tuple = generic_train(
lowerCAmelCase__ , lowerCAmelCase__ , logging_callback=SeqaSeqLoggingCallback() , checkpoint_callback=get_checkpoint_callback(
args.output_dir , model.val_metric , args.save_top_k , lowerCAmelCase__ ) , early_stopping_callback=lowerCAmelCase__ , logger=lowerCAmelCase__ , )
pickle_save(model.hparams , model.output_dir / 'hparams.pkl' )
if not args.do_predict:
return model
lowerCAmelCase_ : Any = ''
lowerCAmelCase_ : str = sorted(glob.glob(os.path.join(args.output_dir , '*.ckpt' ) , recursive=lowerCAmelCase__ ) )
if checkpoints:
lowerCAmelCase_ : Any = checkpoints[-1]
lowerCAmelCase_ : Optional[Any] = checkpoints[-1]
trainer.logger.log_hyperparams(model.hparams )
# test() without a model tests using the best checkpoint automatically
trainer.test()
return model
if __name__ == "__main__":
lowercase__ : Any = argparse.ArgumentParser()
lowercase__ : Dict = pl.Trainer.add_argparse_args(parser)
lowercase__ : int = SummarizationModule.add_model_specific_args(parser, os.getcwd())
lowercase__ : Any = parser.parse_args()
main(args)
| 710 |
"""simple docstring"""
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DeformableDetrImageProcessor
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Tuple , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Any=7 , SCREAMING_SNAKE_CASE_ : Tuple=3 , SCREAMING_SNAKE_CASE_ : str=3_0 , SCREAMING_SNAKE_CASE_ : List[Any]=4_0_0 , SCREAMING_SNAKE_CASE_ : str=True , SCREAMING_SNAKE_CASE_ : Optional[Any]=None , SCREAMING_SNAKE_CASE_ : int=True , SCREAMING_SNAKE_CASE_ : Tuple=[0.5, 0.5, 0.5] , SCREAMING_SNAKE_CASE_ : Optional[int]=[0.5, 0.5, 0.5] , SCREAMING_SNAKE_CASE_ : Union[str, Any]=True , SCREAMING_SNAKE_CASE_ : int=1 / 2_5_5 , SCREAMING_SNAKE_CASE_ : str=True , ):
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
lowerCAmelCase_ : List[Any] = size if size is not None else {'shortest_edge': 1_8, 'longest_edge': 1_3_3_3}
lowerCAmelCase_ : Tuple = parent
lowerCAmelCase_ : Optional[Any] = batch_size
lowerCAmelCase_ : Tuple = num_channels
lowerCAmelCase_ : int = min_resolution
lowerCAmelCase_ : Optional[int] = max_resolution
lowerCAmelCase_ : Union[str, Any] = do_resize
lowerCAmelCase_ : Union[str, Any] = size
lowerCAmelCase_ : Optional[int] = do_normalize
lowerCAmelCase_ : str = image_mean
lowerCAmelCase_ : Tuple = image_std
lowerCAmelCase_ : Dict = do_rescale
lowerCAmelCase_ : Tuple = rescale_factor
lowerCAmelCase_ : Tuple = do_pad
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Dict=False ):
if not batched:
lowerCAmelCase_ : Any = image_inputs[0]
if isinstance(SCREAMING_SNAKE_CASE_ , Image.Image ):
lowerCAmelCase_ ,lowerCAmelCase_ : Dict = image.size
else:
lowerCAmelCase_ ,lowerCAmelCase_ : int = image.shape[1], image.shape[2]
if w < h:
lowerCAmelCase_ : Any = int(self.size['shortest_edge'] * h / w )
lowerCAmelCase_ : int = self.size['shortest_edge']
elif w > h:
lowerCAmelCase_ : List[str] = self.size['shortest_edge']
lowerCAmelCase_ : Dict = int(self.size['shortest_edge'] * w / h )
else:
lowerCAmelCase_ : Tuple = self.size['shortest_edge']
lowerCAmelCase_ : Any = self.size['shortest_edge']
else:
lowerCAmelCase_ : Union[str, Any] = []
for image in image_inputs:
lowerCAmelCase_ ,lowerCAmelCase_ : int = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
lowerCAmelCase_ : Any = max(SCREAMING_SNAKE_CASE_ , key=lambda SCREAMING_SNAKE_CASE_ : item[0] )[0]
lowerCAmelCase_ : List[str] = max(SCREAMING_SNAKE_CASE_ , key=lambda SCREAMING_SNAKE_CASE_ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class UpperCamelCase__ ( lowercase_, unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = DeformableDetrImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE__ ( self : str ):
lowerCAmelCase_ : Optional[Any] = DeformableDetrImageProcessingTester(self )
@property
def SCREAMING_SNAKE_CASE__ ( self : str ):
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
lowerCAmelCase_ : List[str] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , 'image_mean' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , 'image_std' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , 'do_normalize' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , 'do_resize' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , 'do_rescale' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , 'do_pad' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , 'size' ) )
def SCREAMING_SNAKE_CASE__ ( self : Any ):
lowerCAmelCase_ : Dict = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 1_8, 'longest_edge': 1_3_3_3} )
self.assertEqual(image_processor.do_pad , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Optional[Any] = self.image_processing_class.from_dict(
self.image_processor_dict , size=4_2 , max_size=8_4 , pad_and_return_pixel_mask=SCREAMING_SNAKE_CASE_ )
self.assertEqual(image_processor.size , {'shortest_edge': 4_2, 'longest_edge': 8_4} )
self.assertEqual(image_processor.do_pad , SCREAMING_SNAKE_CASE_ )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
pass
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
# Initialize image_processing
lowerCAmelCase_ : int = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCAmelCase_ : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE_ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , Image.Image )
# Test not batched input
lowerCAmelCase_ : List[str] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
lowerCAmelCase_ ,lowerCAmelCase_ : Dict = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCAmelCase_ ,lowerCAmelCase_ : Optional[Any] = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE_ , batched=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Union[str, Any] = image_processing(SCREAMING_SNAKE_CASE_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
# Initialize image_processing
lowerCAmelCase_ : Any = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCAmelCase_ : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE_ , numpify=SCREAMING_SNAKE_CASE_ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , np.ndarray )
# Test not batched input
lowerCAmelCase_ : List[str] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
lowerCAmelCase_ ,lowerCAmelCase_ : List[str] = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCAmelCase_ : List[Any] = image_processing(SCREAMING_SNAKE_CASE_ , return_tensors='pt' ).pixel_values
lowerCAmelCase_ ,lowerCAmelCase_ : int = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE_ , batched=SCREAMING_SNAKE_CASE_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
# Initialize image_processing
lowerCAmelCase_ : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCAmelCase_ : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE_ , torchify=SCREAMING_SNAKE_CASE_ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , torch.Tensor )
# Test not batched input
lowerCAmelCase_ : str = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
lowerCAmelCase_ ,lowerCAmelCase_ : Optional[int] = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCAmelCase_ : Tuple = image_processing(SCREAMING_SNAKE_CASE_ , return_tensors='pt' ).pixel_values
lowerCAmelCase_ ,lowerCAmelCase_ : str = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE_ , batched=SCREAMING_SNAKE_CASE_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def SCREAMING_SNAKE_CASE__ ( self : int ):
# prepare image and target
lowerCAmelCase_ : int = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' , 'r' ) as f:
lowerCAmelCase_ : Dict = json.loads(f.read() )
lowerCAmelCase_ : Any = {'image_id': 3_9_7_6_9, 'annotations': target}
# encode them
lowerCAmelCase_ : Tuple = DeformableDetrImageProcessor()
lowerCAmelCase_ : List[Any] = image_processing(images=SCREAMING_SNAKE_CASE_ , annotations=SCREAMING_SNAKE_CASE_ , return_tensors='pt' )
# verify pixel values
lowerCAmelCase_ : str = torch.Size([1, 3, 8_0_0, 1_0_6_6] )
self.assertEqual(encoding['pixel_values'].shape , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Optional[int] = torch.tensor([0.27_96, 0.31_38, 0.34_81] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , SCREAMING_SNAKE_CASE_ , atol=1E-4 ) )
# verify area
lowerCAmelCase_ : int = torch.tensor([58_87.96_00, 1_12_50.20_61, 48_93_53.84_38, 83_71_22.75_00, 14_79_67.51_56, 16_57_32.34_38] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , SCREAMING_SNAKE_CASE_ ) )
# verify boxes
lowerCAmelCase_ : Optional[int] = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Dict = torch.tensor([0.55_03, 0.27_65, 0.06_04, 0.22_15] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , SCREAMING_SNAKE_CASE_ , atol=1E-3 ) )
# verify image_id
lowerCAmelCase_ : Union[str, Any] = torch.tensor([3_9_7_6_9] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , SCREAMING_SNAKE_CASE_ ) )
# verify is_crowd
lowerCAmelCase_ : Dict = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , SCREAMING_SNAKE_CASE_ ) )
# verify class_labels
lowerCAmelCase_ : Any = torch.tensor([7_5, 7_5, 6_3, 6_5, 1_7, 1_7] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , SCREAMING_SNAKE_CASE_ ) )
# verify orig_size
lowerCAmelCase_ : Optional[int] = torch.tensor([4_8_0, 6_4_0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , SCREAMING_SNAKE_CASE_ ) )
# verify size
lowerCAmelCase_ : str = torch.tensor([8_0_0, 1_0_6_6] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , SCREAMING_SNAKE_CASE_ ) )
@slow
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
# prepare image, target and masks_path
lowerCAmelCase_ : Dict = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' , 'r' ) as f:
lowerCAmelCase_ : Optional[Any] = json.loads(f.read() )
lowerCAmelCase_ : Optional[int] = {'file_name': '000000039769.png', 'image_id': 3_9_7_6_9, 'segments_info': target}
lowerCAmelCase_ : List[str] = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic' )
# encode them
lowerCAmelCase_ : Any = DeformableDetrImageProcessor(format='coco_panoptic' )
lowerCAmelCase_ : int = image_processing(images=SCREAMING_SNAKE_CASE_ , annotations=SCREAMING_SNAKE_CASE_ , masks_path=SCREAMING_SNAKE_CASE_ , return_tensors='pt' )
# verify pixel values
lowerCAmelCase_ : List[str] = torch.Size([1, 3, 8_0_0, 1_0_6_6] )
self.assertEqual(encoding['pixel_values'].shape , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : str = torch.tensor([0.27_96, 0.31_38, 0.34_81] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , SCREAMING_SNAKE_CASE_ , atol=1E-4 ) )
# verify area
lowerCAmelCase_ : Dict = torch.tensor([14_79_79.68_75, 16_55_27.04_69, 48_46_38.59_38, 1_12_92.93_75, 58_79.65_62, 76_34.11_47] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , SCREAMING_SNAKE_CASE_ ) )
# verify boxes
lowerCAmelCase_ : Union[str, Any] = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Dict = torch.tensor([0.26_25, 0.54_37, 0.46_88, 0.86_25] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , SCREAMING_SNAKE_CASE_ , atol=1E-3 ) )
# verify image_id
lowerCAmelCase_ : Union[str, Any] = torch.tensor([3_9_7_6_9] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , SCREAMING_SNAKE_CASE_ ) )
# verify is_crowd
lowerCAmelCase_ : Any = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , SCREAMING_SNAKE_CASE_ ) )
# verify class_labels
lowerCAmelCase_ : int = torch.tensor([1_7, 1_7, 6_3, 7_5, 7_5, 9_3] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , SCREAMING_SNAKE_CASE_ ) )
# verify masks
lowerCAmelCase_ : int = 8_2_2_8_7_3
self.assertEqual(encoding['labels'][0]['masks'].sum().item() , SCREAMING_SNAKE_CASE_ )
# verify orig_size
lowerCAmelCase_ : Union[str, Any] = torch.tensor([4_8_0, 6_4_0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , SCREAMING_SNAKE_CASE_ ) )
# verify size
lowerCAmelCase_ : str = torch.tensor([8_0_0, 1_0_6_6] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , SCREAMING_SNAKE_CASE_ ) )
| 317 | 0 |
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("""TEST_SAGEMAKER""" , """False""")) is not True , reason="""Skipping test because should only be run when releasing minor transformers version""" , )
@pytest.mark.usefixtures("""sm_env""")
@parameterized_class(
[
{
"""framework""": """pytorch""",
"""script""": """run_glue_model_parallelism.py""",
"""model_name_or_path""": """roberta-large""",
"""instance_type""": """ml.p3dn.24xlarge""",
"""results""": {"""train_runtime""": 1_6_0_0, """eval_accuracy""": 0.3, """eval_loss""": 1.2},
},
{
"""framework""": """pytorch""",
"""script""": """run_glue.py""",
"""model_name_or_path""": """roberta-large""",
"""instance_type""": """ml.p3dn.24xlarge""",
"""results""": {"""train_runtime""": 1_6_0_0, """eval_accuracy""": 0.3, """eval_loss""": 1.2},
},
])
class _snake_case ( unittest.TestCase):
def A__ ( self : Union[str, Any] ):
if self.framework == "pytorch":
subprocess.run(
F'''cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'''.split(), encoding="utf-8", check=a__, )
assert hasattr(self, "env" )
def A__ ( self : Any, __lowercase : Dict ):
# configuration for running training on smdistributed Model Parallel
lowercase__ = {
"enabled": True,
"processes_per_host": 8,
}
lowercase__ = {
"enabled": True,
"parameters": {
"microbatches": 4,
"placement_strategy": "spread",
"pipeline": "interleaved",
"optimize": "speed",
"partitions": 4,
"ddp": True,
},
}
lowercase__ = {"smdistributed": {"modelparallel": smp_options}, "mpi": mpi_options}
lowercase__ = "trainer" if self.script == "run_glue.py" else "smtrainer"
# creates estimator
return HuggingFace(
entry_point=self.script, source_dir=self.env.test_path, role=self.env.role, image_uri=self.env.image_uri, base_job_name=F'''{self.env.base_job_name}-{instance_count}-smp-{name_extension}''', instance_count=a__, instance_type=self.instance_type, debugger_hook_config=a__, hyperparameters={
**self.env.hyperparameters,
"model_name_or_path": self.model_name_or_path,
"max_steps": 500,
}, metric_definitions=self.env.metric_definitions, distribution=a__, py_version="py36", )
def A__ ( self : Tuple, __lowercase : List[Any] ):
TrainingJobAnalytics(a__ ).export_csv(F'''{self.env.test_path}/{job_name}_metrics.csv''' )
@parameterized.expand([(1,)] )
def A__ ( self : Dict, __lowercase : List[str] ):
# create estimator
lowercase__ = self.create_estimator(a__ )
# run training
estimator.fit()
# result dataframe
lowercase__ = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
lowercase__ = list(result_metrics_df[result_metrics_df.metric_name == "eval_accuracy"]["value"] )
lowercase__ = list(result_metrics_df[result_metrics_df.metric_name == "eval_loss"]["value"] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
lowercase__ = (
Session().describe_training_job(estimator.latest_training_job.name ).get("TrainingTimeInSeconds", 99_9999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["eval_accuracy"] for t in eval_accuracy )
assert all(t <= self.results["eval_loss"] for t in eval_loss )
# dump tests result into json file to share in PR
with open(F'''{estimator.latest_training_job.name}.json''', "w" ) as outfile:
json.dump({"train_time": train_runtime, "eval_accuracy": eval_accuracy, "eval_loss": eval_loss}, a__ )
| 413 |
'''simple docstring'''
import unittest
from transformers.testing_utils import CaptureStdout
from transformers.tools.python_interpreter import evaluate
def _lowerCAmelCase (_lowercase ):
"""simple docstring"""
return x + 2
class lowerCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def lowerCAmelCase_ ( self : Union[str, Any] ):
a__ = "x = 3"
a__ = {}
a__ = evaluate(a__ ,{} ,state=a__ )
assert result == 3
self.assertDictEqual(a__ ,{"x": 3} )
a__ = "x = y"
a__ = {"y": 5}
a__ = evaluate(a__ ,{} ,state=a__ )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(a__ ,{"x": 5, "y": 5} )
def lowerCAmelCase_ ( self : str ):
a__ = "y = add_two(x)"
a__ = {"x": 3}
a__ = evaluate(a__ ,{"add_two": add_two} ,state=a__ )
assert result == 5
self.assertDictEqual(a__ ,{"x": 3, "y": 5} )
# Won't work without the tool
with CaptureStdout() as out:
a__ = evaluate(a__ ,{} ,state=a__ )
assert result is None
assert "tried to execute add_two" in out.out
def lowerCAmelCase_ ( self : Any ):
a__ = "x = 3"
a__ = {}
a__ = evaluate(a__ ,{} ,state=a__ )
assert result == 3
self.assertDictEqual(a__ ,{"x": 3} )
def lowerCAmelCase_ ( self : Dict ):
a__ = "test_dict = {'x': x, 'y': add_two(x)}"
a__ = {"x": 3}
a__ = evaluate(a__ ,{"add_two": add_two} ,state=a__ )
self.assertDictEqual(a__ ,{"x": 3, "y": 5} )
self.assertDictEqual(a__ ,{"x": 3, "test_dict": {"x": 3, "y": 5}} )
def lowerCAmelCase_ ( self : Dict ):
a__ = "x = 3\ny = 5"
a__ = {}
a__ = evaluate(a__ ,{} ,state=a__ )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(a__ ,{"x": 3, "y": 5} )
def lowerCAmelCase_ ( self : str ):
a__ = "text = f'This is x: {x}.'"
a__ = {"x": 3}
a__ = evaluate(a__ ,{} ,state=a__ )
# evaluate returns the value of the last assignment.
assert result == "This is x: 3."
self.assertDictEqual(a__ ,{"x": 3, "text": "This is x: 3."} )
def lowerCAmelCase_ ( self : Union[str, Any] ):
a__ = "if x <= 3:\n y = 2\nelse:\n y = 5"
a__ = {"x": 3}
a__ = evaluate(a__ ,{} ,state=a__ )
# evaluate returns the value of the last assignment.
assert result == 2
self.assertDictEqual(a__ ,{"x": 3, "y": 2} )
a__ = {"x": 8}
a__ = evaluate(a__ ,{} ,state=a__ )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(a__ ,{"x": 8, "y": 5} )
def lowerCAmelCase_ ( self : List[Any] ):
a__ = "test_list = [x, add_two(x)]"
a__ = {"x": 3}
a__ = evaluate(a__ ,{"add_two": add_two} ,state=a__ )
self.assertListEqual(a__ ,[3, 5] )
self.assertDictEqual(a__ ,{"x": 3, "test_list": [3, 5]} )
def lowerCAmelCase_ ( self : Any ):
a__ = "y = x"
a__ = {"x": 3}
a__ = evaluate(a__ ,{} ,state=a__ )
assert result == 3
self.assertDictEqual(a__ ,{"x": 3, "y": 3} )
def lowerCAmelCase_ ( self : Tuple ):
a__ = "test_list = [x, add_two(x)]\ntest_list[1]"
a__ = {"x": 3}
a__ = evaluate(a__ ,{"add_two": add_two} ,state=a__ )
assert result == 5
self.assertDictEqual(a__ ,{"x": 3, "test_list": [3, 5]} )
a__ = "test_dict = {'x': x, 'y': add_two(x)}\ntest_dict['y']"
a__ = {"x": 3}
a__ = evaluate(a__ ,{"add_two": add_two} ,state=a__ )
assert result == 5
self.assertDictEqual(a__ ,{"x": 3, "test_dict": {"x": 3, "y": 5}} )
def lowerCAmelCase_ ( self : List[Any] ):
a__ = "x = 0\nfor i in range(3):\n x = i"
a__ = {}
a__ = evaluate(a__ ,{"range": range} ,state=a__ )
assert result == 2
self.assertDictEqual(a__ ,{"x": 2, "i": 2} )
| 331 | 0 |
"""simple docstring"""
def lowerCAmelCase_ ( UpperCamelCase__ : List[Any] = 1000000 ):
"""simple docstring"""
__lowercase = 1
__lowercase = 1
__lowercase = {1: 1}
for inputa in range(2 , _lowerCAmelCase ):
__lowercase = 0
__lowercase = inputa
while True:
if number in counters:
counter += counters[number]
break
if number % 2 == 0:
number //= 2
counter += 1
else:
__lowercase = (3 * number) + 1
counter += 1
if inputa not in counters:
__lowercase = counter
if counter > pre_counter:
__lowercase = inputa
__lowercase = counter
return largest_number
if __name__ == "__main__":
print(solution(int(input().strip())))
| 701 |
"""simple docstring"""
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import MaMaaaTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from transformers.utils import is_sentencepiece_available
if is_sentencepiece_available():
from transformers.models.mam_aaa.tokenization_mam_aaa import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
if is_sentencepiece_available():
UpperCAmelCase__ =get_tests_dir("fixtures/test_sentencepiece.model")
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
UpperCAmelCase__ =12_8022
UpperCAmelCase__ =12_8028
@require_sentencepiece
class lowerCamelCase__ ( _a , unittest.TestCase ):
a : Dict = MaMaaaTokenizer
a : Optional[int] = False
a : str = False
a : Any = True
def SCREAMING_SNAKE_CASE_ ( self : int ):
'''simple docstring'''
super().setUp()
__lowercase = ["""</s>""", """<unk>""", """▁This""", """▁is""", """▁a""", """▁t""", """est""", """\u0120""", """<pad>"""]
__lowercase = dict(zip(A_ , range(len(A_ ) ) ) )
__lowercase = Path(self.tmpdirname )
save_json(A_ , save_dir / VOCAB_FILES_NAMES["""vocab_file"""] )
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(A_ , save_dir / VOCAB_FILES_NAMES["""spm_file"""] )
__lowercase = MaMaaaTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , **A_ : List[Any] ):
'''simple docstring'''
return MaMaaaTokenizer.from_pretrained(self.tmpdirname , **A_ )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , A_ : Optional[int] ):
'''simple docstring'''
return (
"This is a test",
"This is a test",
)
def SCREAMING_SNAKE_CASE_ ( self : str ):
'''simple docstring'''
__lowercase = """</s>"""
__lowercase = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(A_ ) , A_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(A_ ) , A_ )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
'''simple docstring'''
__lowercase = self.get_tokenizer()
__lowercase = list(tokenizer.get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """</s>""" )
self.assertEqual(vocab_keys[1] , """<unk>""" )
self.assertEqual(vocab_keys[-1] , """<s>""" )
self.assertEqual(len(A_ ) , tokenizer.vocab_size + len(tokenizer.get_added_vocab() ) )
@unittest.skip("""Skip this test while all models are still to be uploaded.""" )
def SCREAMING_SNAKE_CASE_ ( self : int ):
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE_ ( self : Any ):
'''simple docstring'''
__lowercase = self.get_tokenizer()
__lowercase = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(A_ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(A_ ) , [2, 3, 4, 5, 6] , )
__lowercase = tokenizer.convert_ids_to_tokens([2, 3, 4, 5, 6] )
self.assertListEqual(A_ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
__lowercase = tokenizer.convert_tokens_to_string(A_ )
self.assertEqual(A_ , """This is a test""" )
@slow
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
'''simple docstring'''
__lowercase = {"""input_ids""": [[1_2_8_0_2_2, 1_1_0_1_0_8, 3_9_7, 1_1, 3_8_2_7_2, 2_2_4_7, 1_2_4_8_1_1, 2_8_5, 1_8_1_0_5, 1_5_8_6, 2_0_7, 7, 3_9_5_3_4, 4_4_2_8, 3_9_7, 1_0_1_9, 1_8_1_0_5, 1_5_8_6, 2_0_7, 7, 4_1_3_3_7, 1_6_7_8_6, 2_4_1, 7, 2_0_2_1_4, 1_7, 1_2_5_6_9_0, 1_0_3_9_8, 7, 4_4_3_7_8, 5_8_0_6_9, 6_8_3_4_2, 7_7_9_8, 7_3_4_3, 1_1, 2_9_9, 3_3_3_1_0, 4, 1_5_8, 3_7_3_5_0, 9_4_0_7_7, 4_5_6_9, 2_9_9, 3_3_3_1_0, 9_0, 4, 5_2_8_4_0, 2_9_0, 4, 3_1_2_7_0, 1_1_2, 2_9_9, 6_8_2, 4, 5_2_8_4_0, 3_9_9_5_3, 1_4_0_7_9, 1_9_3, 5_2_5_1_9, 9_0_8_9_4, 1_7_8_9_4, 1_2_0_6_9_7, 1_1, 4_0_4_4_5, 5_5_1, 1_7, 1_0_1_9, 5_2_5_1_9, 9_0_8_9_4, 1_7_7_5_6, 9_6_3, 1_1, 4_0_4_4_5, 4_8_0, 1_7, 9_7_9_2, 1_1_2_0, 5_1_7_3, 1_3_9_3, 6_2_4_0, 1_6_7_8_6, 2_4_1, 1_2_0_9_9_6, 2_8, 1_2_4_5, 1_3_9_3, 1_1_8_2_4_0, 1_1_1_2_3, 1_0_1_9, 9_3_6_1_2, 2_6_9_1, 1_0_6_1_8, 9_8_0_5_8, 1_2_0_4_0_9, 1_9_2_8, 2_7_9, 4, 4_0_6_8_3, 3_6_7, 1_7_8, 2_0_7, 1_0_1_9, 1_0_3, 1_0_3_1_2_1, 5_0_6, 6_5_2_9_6, 5, 2], [1_2_8_0_2_2, 2_1_2_1_7, 3_6_7, 1_1_7, 1_2_5_4_5_0, 1_2_8, 7_1_9, 7, 7_3_0_8, 4_0, 9_3_6_1_2, 1_2_6_6_9, 1_1_1_6, 1_6_7_0_4, 7_1, 1_7_7_8_5, 3_6_9_9, 1_5_5_9_2, 3_5, 1_4_4, 9_5_8_4, 2_4_1, 1_1_9_4_3, 7_1_3, 9_5_0, 7_9_9, 2_2_4_7, 8_8_4_2_7, 1_5_0, 1_4_9, 1_1_8_8_1_3, 1_2_0_7_0_6, 1_0_1_9, 1_0_6_9_0_6, 8_1_5_1_8, 2_8, 1_2_2_4, 2_2_7_9_9, 3_9_7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1_2_8_0_2_2, 1_6_5_8, 1_2_3_3_1_1, 5_1_5_5, 5_5_7_8, 4_7_2_2, 2_7_9, 1_4_9_4_7, 2_3_6_6, 1_1_2_0, 1_1_9_7, 1_4, 1_3_4_8, 9_2_3_2, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=A_ , model_name="""facebook/m2m100_418M""" , revision="""c168bae485c864188cf9aa0e4108b0b6934dc91e""" , )
@require_torch
@require_sentencepiece
@require_tokenizers
class lowerCamelCase__ ( unittest.TestCase ):
a : Optional[int] = """facebook/m2m100_418M"""
a : str = [
"""In my opinion, there are two levels of response from the French government.""",
"""NSA Affair Emphasizes Complete Lack of Debate on Intelligence""",
]
a : int = [
"""Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.""",
"""L'affaire NSA souligne l'absence totale de débat sur le renseignement""",
]
# fmt: off
a : Optional[int] = [EN_CODE, 5_93, 19_49, 11_57_81, 4, 7_15_86, 42_34, 6_06_33, 12_62_33, 4_32, 12_38_08, 1_55_92, 11_97, 11_71_32, 12_06_18, 5, 2]
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : str ):
'''simple docstring'''
__lowercase = MaMaaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang="""en""" , tgt_lang="""fr""" )
__lowercase = 1
return cls
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
'''simple docstring'''
self.assertEqual(self.tokenizer.get_lang_id("""ar""" ) , 1_2_8_0_0_6 )
self.assertEqual(self.tokenizer.get_lang_id("""en""" ) , 1_2_8_0_2_2 )
self.assertEqual(self.tokenizer.get_lang_id("""ro""" ) , 1_2_8_0_7_6 )
self.assertEqual(self.tokenizer.get_lang_id("""mr""" ) , 1_2_8_0_6_3 )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
'''simple docstring'''
__lowercase = self.tokenizer.get_vocab()
self.assertEqual(len(A_ ) , self.tokenizer.vocab_size )
self.assertEqual(vocab["""<unk>"""] , 3 )
self.assertIn(self.tokenizer.get_lang_token("""en""" ) , A_ )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
'''simple docstring'''
__lowercase = """en"""
__lowercase = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , A_ )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
'''simple docstring'''
self.assertIn(A_ , self.tokenizer.all_special_ids )
# fmt: off
__lowercase = [FR_CODE, 5_3_6_4, 8_2, 8_6_4_2, 4, 2_9_4, 4_7, 8, 1_4_0_2_8, 1_3_6, 3_2_8_6, 9_7_0_6, 6, 9_0_7_9_7, 6, 1_4_4_0_1_2, 1_6_2, 8_8_1_2_8, 3_0_0_6_1, 5, 2]
# fmt: on
__lowercase = self.tokenizer.decode(A_ , skip_special_tokens=A_ )
__lowercase = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=A_ )
self.assertEqual(A_ , A_ )
self.assertNotIn(self.tokenizer.eos_token , A_ )
def SCREAMING_SNAKE_CASE_ ( self : int ):
'''simple docstring'''
__lowercase = tempfile.mkdtemp()
__lowercase = self.tokenizer.lang_token_to_id
self.tokenizer.save_pretrained(A_ )
__lowercase = MaMaaaTokenizer.from_pretrained(A_ )
self.assertDictEqual(new_tok.lang_token_to_id , A_ )
@require_torch
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
'''simple docstring'''
__lowercase = """en"""
__lowercase = """fr"""
__lowercase = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=A_ , return_tensors="""pt""" )
__lowercase = shift_tokens_right(
batch["""labels"""] , self.tokenizer.pad_token_id , self.tokenizer.eos_token_id )
for k in batch:
__lowercase = batch[k].tolist()
# batch = {k: v.tolist() for k,v in batch.items()}
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
# batch.decoder_inputs_ids[0][0] ==
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == FR_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2] == [2, FR_CODE]
@require_torch
def SCREAMING_SNAKE_CASE_ ( self : int ):
'''simple docstring'''
__lowercase = """mr"""
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("""mr""" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
__lowercase = """zh"""
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("""zh""" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
@require_torch
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
'''simple docstring'''
__lowercase = """mr"""
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("""mr""" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
__lowercase = """zh"""
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("""zh""" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
@require_torch
def SCREAMING_SNAKE_CASE_ ( self : int ):
'''simple docstring'''
__lowercase = self.tokenizer._build_translation_inputs("""A test""" , return_tensors="""pt""" , src_lang="""en""" , tgt_lang="""ar""" )
self.assertEqual(
nested_simplify(A_ ) , {
# en_XX, A, test, EOS
"""input_ids""": [[1_2_8_0_2_2, 5_8, 4_1_8_3, 2]],
"""attention_mask""": [[1, 1, 1, 1]],
# ar_AR
"""forced_bos_token_id""": 1_2_8_0_0_6,
} , )
| 442 | 0 |
'''simple docstring'''
import inspect
import unittest
import numpy as np
from transformers import BeitConfig
from transformers.testing_utils import require_flax, require_vision, slow
from transformers.utils import cached_property, is_flax_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor
if is_flax_available():
import jax
from transformers import FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling, FlaxBeitModel
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class _snake_case( unittest.TestCase ):
def __init__(self : Any , a : Tuple , a : str=1_00 , a : List[str]=13 , a : Union[str, Any]=30 , a : Union[str, Any]=2 , a : Optional[Any]=3 , a : Union[str, Any]=True , a : Tuple=True , a : Any=32 , a : Union[str, Any]=5 , a : List[str]=4 , a : str=37 , a : Union[str, Any]="gelu" , a : List[Any]=0.1 , a : Optional[Any]=0.1 , a : Any=10 , a : Any=0.02 , a : Tuple=3 , ) -> Dict:
"""simple docstring"""
A__ = parent
A__ = vocab_size
A__ = batch_size
A__ = image_size
A__ = patch_size
A__ = num_channels
A__ = is_training
A__ = use_labels
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = intermediate_size
A__ = hidden_act
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = type_sequence_label_size
A__ = initializer_range
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
A__ = (image_size // patch_size) ** 2
A__ = num_patches + 1
def _UpperCamelCase (self : Optional[int] ) -> Dict:
"""simple docstring"""
A__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A__ = None
if self.use_labels:
A__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A__ = BeitConfig(
vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=a , initializer_range=self.initializer_range , )
return config, pixel_values, labels
def _UpperCamelCase (self : str , a : str , a : Optional[int] , a : str ) -> str:
"""simple docstring"""
A__ = FlaxBeitModel(config=a )
A__ = model(a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCamelCase (self : Any , a : List[Any] , a : Tuple , a : str ) -> str:
"""simple docstring"""
A__ = FlaxBeitForMaskedImageModeling(config=a )
A__ = model(a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) )
def _UpperCamelCase (self : Optional[Any] , a : Optional[int] , a : Any , a : str ) -> Any:
"""simple docstring"""
A__ = self.type_sequence_label_size
A__ = FlaxBeitForImageClassification(config=a )
A__ = model(a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
A__ = 1
A__ = FlaxBeitForImageClassification(a )
A__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
A__ = model(a )
def _UpperCamelCase (self : Optional[int] ) -> int:
"""simple docstring"""
A__ = self.prepare_config_and_inputs()
(
(
A__
) , (
A__
) , (
A__
) ,
) = config_and_inputs
A__ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_flax
class _snake_case( UpperCAmelCase , unittest.TestCase ):
__snake_case: Any = (
(FlaxBeitModel, FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling) if is_flax_available() else ()
)
def _UpperCamelCase (self : List[str] ) -> None:
"""simple docstring"""
A__ = FlaxBeitModelTester(self )
A__ = ConfigTester(self , config_class=a , has_text_modality=a , hidden_size=37 )
def _UpperCamelCase (self : str ) -> Any:
"""simple docstring"""
self.config_tester.run_common_tests()
def _UpperCamelCase (self : str ) -> Dict:
"""simple docstring"""
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = model_class(a )
A__ = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A__ = [*signature.parameters.keys()]
A__ = ['pixel_values']
self.assertListEqual(arg_names[:1] , a )
def _UpperCamelCase (self : List[Any] ) -> Optional[int]:
"""simple docstring"""
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
A__ = self._prepare_for_class(a , a )
A__ = model_class(a )
@jax.jit
def model_jitted(a : Dict , **a : Dict ):
return model(pixel_values=a , **a )
with self.subTest('JIT Enabled' ):
A__ = model_jitted(**a ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
A__ = model_jitted(**a ).to_tuple()
self.assertEqual(len(a ) , len(a ) )
for jitted_output, output in zip(a , a ):
self.assertEqual(jitted_output.shape , output.shape )
def _UpperCamelCase (self : List[Any] ) -> Optional[int]:
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a )
def _UpperCamelCase (self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*a )
def _UpperCamelCase (self : Dict ) -> Optional[int]:
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*a )
@slow
def _UpperCamelCase (self : List[Any] ) -> Any:
"""simple docstring"""
for model_class_name in self.all_model_classes:
A__ = model_class_name.from_pretrained('microsoft/beit-base-patch16-224' )
A__ = model(np.ones((1, 3, 2_24, 2_24) ) )
self.assertIsNotNone(a )
def _A ( ):
'''simple docstring'''
A__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_vision
@require_flax
class _snake_case( unittest.TestCase ):
@cached_property
def _UpperCamelCase (self : Optional[Any] ) -> Tuple:
"""simple docstring"""
return BeitImageProcessor.from_pretrained('microsoft/beit-base-patch16-224' ) if is_vision_available() else None
@slow
def _UpperCamelCase (self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
A__ = FlaxBeitForMaskedImageModeling.from_pretrained('microsoft/beit-base-patch16-224-pt22k' )
A__ = self.default_image_processor
A__ = prepare_img()
A__ = image_processor(images=a , return_tensors='np' ).pixel_values
# prepare bool_masked_pos
A__ = np.ones((1, 1_96) , dtype=a )
# forward pass
A__ = model(pixel_values=a , bool_masked_pos=a )
A__ = outputs.logits
# verify the logits
A__ = (1, 1_96, 81_92)
self.assertEqual(logits.shape , a )
A__ = np.array(
[[-3.2437, 0.5072, -13.9174], [-3.2456, 0.4948, -13.9401], [-3.2033, 0.5121, -13.8550]] )
self.assertTrue(np.allclose(logits[bool_masked_pos][:3, :3] , a , atol=1e-2 ) )
@slow
def _UpperCamelCase (self : Any ) -> int:
"""simple docstring"""
A__ = FlaxBeitForImageClassification.from_pretrained('microsoft/beit-base-patch16-224' )
A__ = self.default_image_processor
A__ = prepare_img()
A__ = image_processor(images=a , return_tensors='np' )
# forward pass
A__ = model(**a )
A__ = outputs.logits
# verify the logits
A__ = (1, 10_00)
self.assertEqual(logits.shape , a )
A__ = np.array([-1.2385, -1.0987, -1.0108] )
self.assertTrue(np.allclose(logits[0, :3] , a , atol=1e-4 ) )
A__ = 2_81
self.assertEqual(logits.argmax(-1 ).item() , a )
@slow
def _UpperCamelCase (self : List[Any] ) -> Tuple:
"""simple docstring"""
A__ = FlaxBeitForImageClassification.from_pretrained('microsoft/beit-large-patch16-224-pt22k-ft22k' )
A__ = self.default_image_processor
A__ = prepare_img()
A__ = image_processor(images=a , return_tensors='np' )
# forward pass
A__ = model(**a )
A__ = outputs.logits
# verify the logits
A__ = (1, 2_18_41)
self.assertEqual(logits.shape , a )
A__ = np.array([1.6881, -0.2787, 0.5901] )
self.assertTrue(np.allclose(logits[0, :3] , a , atol=1e-4 ) )
A__ = 23_96
self.assertEqual(logits.argmax(-1 ).item() , a )
| 531 |
'''simple docstring'''
import socket
def _A ( ):
'''simple docstring'''
A__ = socket.socket(socket.AF_INET ,socket.SOCK_STREAM )
A__ = socket.gethostname()
A__ = 12312
sock.connect((host, port) )
sock.send(B'Hello server!' )
with open('Received_file' ,'wb' ) as out_file:
print('File opened' )
print('Receiving data...' )
while True:
A__ = sock.recv(1024 )
if not data:
break
out_file.write(UpperCAmelCase )
print('Successfully received the file' )
sock.close()
print('Connection closed' )
if __name__ == "__main__":
main()
| 531 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available
a__ : Union[str, Any] = {'''tokenization_herbert''': ['''HerbertTokenizer''']}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Union[str, Any] = ['''HerbertTokenizerFast''']
if TYPE_CHECKING:
from .tokenization_herbert import HerbertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_herbert_fast import HerbertTokenizerFast
else:
import sys
a__ : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 715 |
import gc
import unittest
import torch
from parameterized import parameterized
from diffusers import AutoencoderKL
from diffusers.utils import floats_tensor, load_hf_numpy, require_torch_gpu, slow, torch_all_close, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class a_ ( a__ , a__ , unittest.TestCase ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = AutoencoderKL
__SCREAMING_SNAKE_CASE : Optional[int] = 'sample'
__SCREAMING_SNAKE_CASE : Any = 1E-2
@property
def __lowerCAmelCase ( self ) ->Tuple:
SCREAMING_SNAKE_CASE : List[Any] = 4
SCREAMING_SNAKE_CASE : List[Any] = 3
SCREAMING_SNAKE_CASE : int = (32, 32)
SCREAMING_SNAKE_CASE : Tuple = floats_tensor((batch_size, num_channels) + sizes ).to(_lowerCamelCase )
return {"sample": image}
@property
def __lowerCAmelCase ( self ) ->str:
return (3, 32, 32)
@property
def __lowerCAmelCase ( self ) ->Dict:
return (3, 32, 32)
def __lowerCAmelCase ( self ) ->Tuple:
SCREAMING_SNAKE_CASE : Optional[int] = {
'''block_out_channels''': [32, 64],
'''in_channels''': 3,
'''out_channels''': 3,
'''down_block_types''': ['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''],
'''up_block_types''': ['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''],
'''latent_channels''': 4,
}
SCREAMING_SNAKE_CASE : Union[str, Any] = self.dummy_input
return init_dict, inputs_dict
def __lowerCAmelCase ( self ) ->Dict:
pass
def __lowerCAmelCase ( self ) ->Optional[Any]:
pass
@unittest.skipIf(torch_device == '''mps''' , '''Gradient checkpointing skipped on MPS''' )
def __lowerCAmelCase ( self ) ->Dict:
# enable deterministic behavior for gradient checkpointing
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = self.prepare_init_args_and_inputs_for_common()
SCREAMING_SNAKE_CASE : Optional[Any] = self.model_class(**_lowerCamelCase )
model.to(_lowerCamelCase )
assert not model.is_gradient_checkpointing and model.training
SCREAMING_SNAKE_CASE : Tuple = model(**_lowerCamelCase ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model.zero_grad()
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.randn_like(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[Any] = (out - labels).mean()
loss.backward()
# re-instantiate the model now enabling gradient checkpointing
SCREAMING_SNAKE_CASE : str = self.model_class(**_lowerCamelCase )
# clone model
model_a.load_state_dict(model.state_dict() )
model_a.to(_lowerCamelCase )
model_a.enable_gradient_checkpointing()
assert model_a.is_gradient_checkpointing and model_a.training
SCREAMING_SNAKE_CASE : Any = model_a(**_lowerCamelCase ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model_a.zero_grad()
SCREAMING_SNAKE_CASE : Tuple = (out_a - labels).mean()
loss_a.backward()
# compare the output and parameters gradients
self.assertTrue((loss - loss_a).abs() < 1e-5 )
SCREAMING_SNAKE_CASE : List[Any] = dict(model.named_parameters() )
SCREAMING_SNAKE_CASE : Optional[int] = dict(model_a.named_parameters() )
for name, param in named_params.items():
self.assertTrue(torch_all_close(param.grad.data , named_params_a[name].grad.data , atol=5e-5 ) )
def __lowerCAmelCase ( self ) ->List[str]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = AutoencoderKL.from_pretrained('''fusing/autoencoder-kl-dummy''' , output_loading_info=_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
self.assertEqual(len(loading_info['''missing_keys'''] ) , 0 )
model.to(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Tuple = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def __lowerCAmelCase ( self ) ->Dict:
SCREAMING_SNAKE_CASE : str = AutoencoderKL.from_pretrained('''fusing/autoencoder-kl-dummy''' )
SCREAMING_SNAKE_CASE : Dict = model.to(_lowerCamelCase )
model.eval()
if torch_device == "mps":
SCREAMING_SNAKE_CASE : List[str] = torch.manual_seed(0 )
else:
SCREAMING_SNAKE_CASE : Optional[Any] = torch.Generator(device=_lowerCamelCase ).manual_seed(0 )
SCREAMING_SNAKE_CASE : Any = torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
SCREAMING_SNAKE_CASE : List[Any] = image.to(_lowerCamelCase )
with torch.no_grad():
SCREAMING_SNAKE_CASE : Dict = model(_lowerCamelCase , sample_posterior=_lowerCamelCase , generator=_lowerCamelCase ).sample
SCREAMING_SNAKE_CASE : Union[str, Any] = output[0, -1, -3:, -3:].flatten().cpu()
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
if torch_device == "mps":
SCREAMING_SNAKE_CASE : str = torch.tensor(
[
-4.0078e-01,
-3.8323e-04,
-1.2681e-01,
-1.1462e-01,
2.0095e-01,
1.0893e-01,
-8.8247e-02,
-3.0361e-01,
-9.8644e-03,
] )
elif torch_device == "cpu":
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor(
[-0.1_3_5_2, 0.0_8_7_8, 0.0_4_1_9, -0.0_8_1_8, -0.1_0_6_9, 0.0_6_8_8, -0.1_4_5_8, -0.4_4_4_6, -0.0_0_2_6] )
else:
SCREAMING_SNAKE_CASE : Dict = torch.tensor(
[-0.2_4_2_1, 0.4_6_4_2, 0.2_5_0_7, -0.0_4_3_8, 0.0_6_8_2, 0.3_1_6_0, -0.2_0_1_8, -0.0_7_2_7, 0.2_4_8_5] )
self.assertTrue(torch_all_close(_lowerCamelCase , _lowerCamelCase , rtol=1e-2 ) )
@slow
class a_ ( unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase ) ->Union[str, Any]:
return F"""gaussian_noise_s={seed}_shape={"_".join([str(_lowerCamelCase ) for s in shape] )}.npy"""
def __lowerCAmelCase ( self ) ->Dict:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self , _lowerCamelCase=0 , _lowerCamelCase=(4, 3, 512, 512) , _lowerCamelCase=False ) ->List[Any]:
SCREAMING_SNAKE_CASE : Optional[int] = torch.floataa if fpaa else torch.floataa
SCREAMING_SNAKE_CASE : Dict = torch.from_numpy(load_hf_numpy(self.get_file_format(_lowerCamelCase , _lowerCamelCase ) ) ).to(_lowerCamelCase ).to(_lowerCamelCase )
return image
def __lowerCAmelCase ( self , _lowerCamelCase="CompVis/stable-diffusion-v1-4" , _lowerCamelCase=False ) ->List[Any]:
SCREAMING_SNAKE_CASE : List[str] = '''fp16''' if fpaa else None
SCREAMING_SNAKE_CASE : Dict = torch.floataa if fpaa else torch.floataa
SCREAMING_SNAKE_CASE : List[str] = AutoencoderKL.from_pretrained(
_lowerCamelCase , subfolder='''vae''' , torch_dtype=_lowerCamelCase , revision=_lowerCamelCase , )
model.to(_lowerCamelCase ).eval()
return model
def __lowerCAmelCase ( self , _lowerCamelCase=0 ) ->Optional[int]:
if torch_device == "mps":
return torch.manual_seed(_lowerCamelCase )
return torch.Generator(device=_lowerCamelCase ).manual_seed(_lowerCamelCase )
@parameterized.expand(
[
# fmt: off
[33, [-0.1_6_0_3, 0.9_8_7_8, -0.0_4_9_5, -0.0_7_9_0, -0.2_7_0_9, 0.8_3_7_5, -0.2_0_6_0, -0.0_8_2_4], [-0.2_3_9_5, 0.0_0_9_8, 0.0_1_0_2, -0.0_7_0_9, -0.2_8_4_0, -0.0_2_7_4, -0.0_7_1_8, -0.1_8_2_4]],
[47, [-0.2_3_7_6, 0.1_1_6_8, 0.1_3_3_2, -0.4_8_4_0, -0.2_5_0_8, -0.0_7_9_1, -0.0_4_9_3, -0.4_0_8_9], [0.0_3_5_0, 0.0_8_4_7, 0.0_4_6_7, 0.0_3_4_4, -0.0_8_4_2, -0.0_5_4_7, -0.0_6_3_3, -0.1_1_3_1]],
# fmt: on
] )
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) ->Optional[int]:
SCREAMING_SNAKE_CASE : List[Any] = self.get_sd_vae_model()
SCREAMING_SNAKE_CASE : List[str] = self.get_sd_image(_lowerCamelCase )
SCREAMING_SNAKE_CASE : List[str] = self.get_generator(_lowerCamelCase )
with torch.no_grad():
SCREAMING_SNAKE_CASE : Union[str, Any] = model(_lowerCamelCase , generator=_lowerCamelCase , sample_posterior=_lowerCamelCase ).sample
assert sample.shape == image.shape
SCREAMING_SNAKE_CASE : Any = sample[-1, -2:, -2:, :2].flatten().float().cpu()
SCREAMING_SNAKE_CASE : Any = torch.tensor(expected_slice_mps if torch_device == '''mps''' else expected_slice )
assert torch_all_close(_lowerCamelCase , _lowerCamelCase , atol=3e-3 )
@parameterized.expand(
[
# fmt: off
[33, [-0.0_5_1_3, 0.0_2_8_9, 1.3_7_9_9, 0.2_1_6_6, -0.2_5_7_3, -0.0_8_7_1, 0.5_1_0_3, -0.0_9_9_9]],
[47, [-0.4_1_2_8, -0.1_3_2_0, -0.3_7_0_4, 0.1_9_6_5, -0.4_1_1_6, -0.2_3_3_2, -0.3_3_4_0, 0.2_2_4_7]],
# fmt: on
] )
@require_torch_gpu
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase ) ->Dict:
SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_sd_vae_model(fpaa=_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[int] = self.get_sd_image(_lowerCamelCase , fpaa=_lowerCamelCase )
SCREAMING_SNAKE_CASE : int = self.get_generator(_lowerCamelCase )
with torch.no_grad():
SCREAMING_SNAKE_CASE : Optional[Any] = model(_lowerCamelCase , generator=_lowerCamelCase , sample_posterior=_lowerCamelCase ).sample
assert sample.shape == image.shape
SCREAMING_SNAKE_CASE : Optional[int] = sample[-1, -2:, :2, -2:].flatten().float().cpu()
SCREAMING_SNAKE_CASE : str = torch.tensor(_lowerCamelCase )
assert torch_all_close(_lowerCamelCase , _lowerCamelCase , atol=1e-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.1_6_0_9, 0.9_8_6_6, -0.0_4_8_7, -0.0_7_7_7, -0.2_7_1_6, 0.8_3_6_8, -0.2_0_5_5, -0.0_8_1_4], [-0.2_3_9_5, 0.0_0_9_8, 0.0_1_0_2, -0.0_7_0_9, -0.2_8_4_0, -0.0_2_7_4, -0.0_7_1_8, -0.1_8_2_4]],
[47, [-0.2_3_7_7, 0.1_1_4_7, 0.1_3_3_3, -0.4_8_4_1, -0.2_5_0_6, -0.0_8_0_5, -0.0_4_9_1, -0.4_0_8_5], [0.0_3_5_0, 0.0_8_4_7, 0.0_4_6_7, 0.0_3_4_4, -0.0_8_4_2, -0.0_5_4_7, -0.0_6_3_3, -0.1_1_3_1]],
# fmt: on
] )
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) ->Optional[int]:
SCREAMING_SNAKE_CASE : Dict = self.get_sd_vae_model()
SCREAMING_SNAKE_CASE : List[str] = self.get_sd_image(_lowerCamelCase )
with torch.no_grad():
SCREAMING_SNAKE_CASE : Any = model(_lowerCamelCase ).sample
assert sample.shape == image.shape
SCREAMING_SNAKE_CASE : Dict = sample[-1, -2:, -2:, :2].flatten().float().cpu()
SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor(expected_slice_mps if torch_device == '''mps''' else expected_slice )
assert torch_all_close(_lowerCamelCase , _lowerCamelCase , atol=3e-3 )
@parameterized.expand(
[
# fmt: off
[13, [-0.2_0_5_1, -0.1_8_0_3, -0.2_3_1_1, -0.2_1_1_4, -0.3_2_9_2, -0.3_5_7_4, -0.2_9_5_3, -0.3_3_2_3]],
[37, [-0.2_6_3_2, -0.2_6_2_5, -0.2_1_9_9, -0.2_7_4_1, -0.4_5_3_9, -0.4_9_9_0, -0.3_7_2_0, -0.4_9_2_5]],
# fmt: on
] )
@require_torch_gpu
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase ) ->Dict:
SCREAMING_SNAKE_CASE : str = self.get_sd_vae_model()
SCREAMING_SNAKE_CASE : List[str] = self.get_sd_image(_lowerCamelCase , shape=(3, 4, 64, 64) )
with torch.no_grad():
SCREAMING_SNAKE_CASE : Tuple = model.decode(_lowerCamelCase ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
SCREAMING_SNAKE_CASE : Any = sample[-1, -2:, :2, -2:].flatten().cpu()
SCREAMING_SNAKE_CASE : Tuple = torch.tensor(_lowerCamelCase )
assert torch_all_close(_lowerCamelCase , _lowerCamelCase , atol=1e-3 )
@parameterized.expand(
[
# fmt: off
[27, [-0.0_3_6_9, 0.0_2_0_7, -0.0_7_7_6, -0.0_6_8_2, -0.1_7_4_7, -0.1_9_3_0, -0.1_4_6_5, -0.2_0_3_9]],
[16, [-0.1_6_2_8, -0.2_1_3_4, -0.2_7_4_7, -0.2_6_4_2, -0.3_7_7_4, -0.4_4_0_4, -0.3_6_8_7, -0.4_2_7_7]],
# fmt: on
] )
@require_torch_gpu
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase ) ->int:
SCREAMING_SNAKE_CASE : int = self.get_sd_vae_model(fpaa=_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_sd_image(_lowerCamelCase , shape=(3, 4, 64, 64) , fpaa=_lowerCamelCase )
with torch.no_grad():
SCREAMING_SNAKE_CASE : List[str] = model.decode(_lowerCamelCase ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
SCREAMING_SNAKE_CASE : str = sample[-1, -2:, :2, -2:].flatten().float().cpu()
SCREAMING_SNAKE_CASE : List[Any] = torch.tensor(_lowerCamelCase )
assert torch_all_close(_lowerCamelCase , _lowerCamelCase , atol=5e-3 )
@parameterized.expand([(13,), (16,), (27,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason='''xformers is not required when using PyTorch 2.0.''' )
def __lowerCAmelCase ( self , _lowerCamelCase ) ->Optional[Any]:
SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_sd_vae_model(fpaa=_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_sd_image(_lowerCamelCase , shape=(3, 4, 64, 64) , fpaa=_lowerCamelCase )
with torch.no_grad():
SCREAMING_SNAKE_CASE : List[Any] = model.decode(_lowerCamelCase ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
SCREAMING_SNAKE_CASE : Optional[Any] = model.decode(_lowerCamelCase ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(_lowerCamelCase , _lowerCamelCase , atol=1e-1 )
@parameterized.expand([(13,), (16,), (37,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason='''xformers is not required when using PyTorch 2.0.''' )
def __lowerCAmelCase ( self , _lowerCamelCase ) ->Optional[int]:
SCREAMING_SNAKE_CASE : int = self.get_sd_vae_model()
SCREAMING_SNAKE_CASE : int = self.get_sd_image(_lowerCamelCase , shape=(3, 4, 64, 64) )
with torch.no_grad():
SCREAMING_SNAKE_CASE : List[Any] = model.decode(_lowerCamelCase ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
SCREAMING_SNAKE_CASE : Union[str, Any] = model.decode(_lowerCamelCase ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(_lowerCamelCase , _lowerCamelCase , atol=1e-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.3_0_0_1, 0.0_9_1_8, -2.6_9_8_4, -3.9_7_2_0, -3.2_0_9_9, -5.0_3_5_3, 1.7_3_3_8, -0.2_0_6_5, 3.4_2_6_7]],
[47, [-1.5_0_3_0, -4.3_8_7_1, -6.0_3_5_5, -9.1_1_5_7, -1.6_6_6_1, -2.7_8_5_3, 2.1_6_0_7, -5.0_8_2_3, 2.5_6_3_3]],
# fmt: on
] )
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase ) ->List[Any]:
SCREAMING_SNAKE_CASE : int = self.get_sd_vae_model()
SCREAMING_SNAKE_CASE : List[str] = self.get_sd_image(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Any = self.get_generator(_lowerCamelCase )
with torch.no_grad():
SCREAMING_SNAKE_CASE : Tuple = model.encode(_lowerCamelCase ).latent_dist
SCREAMING_SNAKE_CASE : int = dist.sample(generator=_lowerCamelCase )
assert list(sample.shape ) == [image.shape[0], 4] + [i // 8 for i in image.shape[2:]]
SCREAMING_SNAKE_CASE : Optional[Any] = sample[0, -1, -3:, -3:].flatten().cpu()
SCREAMING_SNAKE_CASE : List[str] = torch.tensor(_lowerCamelCase )
SCREAMING_SNAKE_CASE : List[str] = 3e-3 if torch_device != '''mps''' else 1e-2
assert torch_all_close(_lowerCamelCase , _lowerCamelCase , atol=_lowerCamelCase )
| 333 | 0 |
"""simple docstring"""
import unittest
from transformers import XLMConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMWithLMHeadModel,
)
from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCamelCase :
"""simple docstring"""
def __init__( self : Tuple , _lowerCamelCase : Optional[int] , _lowerCamelCase : List[str]=1_3 , _lowerCamelCase : List[Any]=7 , _lowerCamelCase : Optional[int]=True , _lowerCamelCase : int=True , _lowerCamelCase : Optional[Any]=True , _lowerCamelCase : Tuple=True , _lowerCamelCase : Optional[int]=True , _lowerCamelCase : Tuple=False , _lowerCamelCase : List[str]=False , _lowerCamelCase : List[str]=False , _lowerCamelCase : List[Any]=2 , _lowerCamelCase : List[str]=9_9 , _lowerCamelCase : Any=0 , _lowerCamelCase : Any=3_2 , _lowerCamelCase : List[Any]=5 , _lowerCamelCase : Any=4 , _lowerCamelCase : List[Any]=0.1 , _lowerCamelCase : Optional[Any]=0.1 , _lowerCamelCase : List[str]=5_1_2 , _lowerCamelCase : int=2 , _lowerCamelCase : Union[str, Any]=0.02 , _lowerCamelCase : Optional[int]=2 , _lowerCamelCase : Optional[Any]=4 , _lowerCamelCase : int="last" , _lowerCamelCase : Tuple=True , _lowerCamelCase : Tuple=None , _lowerCamelCase : Dict=0 , ):
A__ = parent
A__ = batch_size
A__ = seq_length
A__ = is_training
A__ = use_input_lengths
A__ = use_token_type_ids
A__ = use_labels
A__ = gelu_activation
A__ = sinusoidal_embeddings
A__ = causal
A__ = asm
A__ = n_langs
A__ = vocab_size
A__ = n_special
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = max_position_embeddings
A__ = type_sequence_label_size
A__ = initializer_range
A__ = num_labels
A__ = num_choices
A__ = summary_type
A__ = use_proj
A__ = scope
A__ = bos_token_id
def A__ ( self : Tuple ):
A__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A__ = random_attention_mask([self.batch_size, self.seq_length] )
A__ = None
if self.use_input_lengths:
A__ = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
A__ = None
if self.use_token_type_ids:
A__ = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
A__ = None
A__ = None
A__ = None
if self.use_labels:
A__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A__ = ids_tensor([self.batch_size] , 2 ).float()
A__ = ids_tensor([self.batch_size] , self.num_choices )
A__ = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def A__ ( self : List[str] ):
return XLMConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , )
def A__ ( self : List[str] , _lowerCamelCase : Dict , _lowerCamelCase : Tuple , _lowerCamelCase : Dict , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Optional[int] , _lowerCamelCase : Optional[Any] , _lowerCamelCase : List[Any] , _lowerCamelCase : Any , _lowerCamelCase : int , ):
A__ = XLMModel(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
A__ = model(_SCREAMING_SNAKE_CASE , lengths=_SCREAMING_SNAKE_CASE , langs=_SCREAMING_SNAKE_CASE )
A__ = model(_SCREAMING_SNAKE_CASE , langs=_SCREAMING_SNAKE_CASE )
A__ = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A__ ( self : Optional[int] , _lowerCamelCase : Optional[Any] , _lowerCamelCase : str , _lowerCamelCase : int , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Optional[Any] , _lowerCamelCase : List[str] , _lowerCamelCase : List[str] , _lowerCamelCase : Dict , _lowerCamelCase : Dict , ):
A__ = XLMWithLMHeadModel(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
A__ = model(_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A__ ( self : Optional[Any] , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Dict , _lowerCamelCase : Tuple , _lowerCamelCase : Optional[int] , _lowerCamelCase : Dict , _lowerCamelCase : Tuple , _lowerCamelCase : Dict , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Any , ):
A__ = XLMForQuestionAnsweringSimple(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
A__ = model(_SCREAMING_SNAKE_CASE )
A__ = model(_SCREAMING_SNAKE_CASE , start_positions=_SCREAMING_SNAKE_CASE , end_positions=_SCREAMING_SNAKE_CASE )
A__ = outputs
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A__ ( self : Any , _lowerCamelCase : int , _lowerCamelCase : str , _lowerCamelCase : Optional[int] , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : List[str] , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Any , _lowerCamelCase : Any , ):
A__ = XLMForQuestionAnswering(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
A__ = model(_SCREAMING_SNAKE_CASE )
A__ = model(
_SCREAMING_SNAKE_CASE , start_positions=_SCREAMING_SNAKE_CASE , end_positions=_SCREAMING_SNAKE_CASE , cls_index=_SCREAMING_SNAKE_CASE , is_impossible=_SCREAMING_SNAKE_CASE , p_mask=_SCREAMING_SNAKE_CASE , )
A__ = model(
_SCREAMING_SNAKE_CASE , start_positions=_SCREAMING_SNAKE_CASE , end_positions=_SCREAMING_SNAKE_CASE , cls_index=_SCREAMING_SNAKE_CASE , is_impossible=_SCREAMING_SNAKE_CASE , )
(A__ ) = result_with_labels.to_tuple()
A__ = model(_SCREAMING_SNAKE_CASE , start_positions=_SCREAMING_SNAKE_CASE , end_positions=_SCREAMING_SNAKE_CASE )
(A__ ) = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def A__ ( self : Optional[Any] , _lowerCamelCase : Optional[int] , _lowerCamelCase : List[str] , _lowerCamelCase : List[Any] , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Any , _lowerCamelCase : Tuple , _lowerCamelCase : Optional[int] , _lowerCamelCase : List[Any] , _lowerCamelCase : Optional[Any] , ):
A__ = XLMForSequenceClassification(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
A__ = model(_SCREAMING_SNAKE_CASE )
A__ = model(_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def A__ ( self : Any , _lowerCamelCase : List[Any] , _lowerCamelCase : List[str] , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Optional[int] , _lowerCamelCase : str , _lowerCamelCase : Optional[int] , _lowerCamelCase : List[str] , _lowerCamelCase : int , _lowerCamelCase : str , ):
A__ = self.num_labels
A__ = XLMForTokenClassification(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
A__ = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A__ ( self : Union[str, Any] , _lowerCamelCase : str , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : List[str] , _lowerCamelCase : Tuple , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Any , _lowerCamelCase : Any , _lowerCamelCase : Any , ):
A__ = self.num_choices
A__ = XLMForMultipleChoice(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
A__ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
A__ = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
A__ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
A__ = model(
_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def A__ ( self : str ):
A__ = self.prepare_config_and_inputs()
(
A__
) = config_and_inputs
A__ = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''lengths''': input_lengths}
return config, inputs_dict
@require_torch
class UpperCamelCase ( a , a , a , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase : Any =(
(
XLMModel,
XLMWithLMHeadModel,
XLMForQuestionAnswering,
XLMForSequenceClassification,
XLMForQuestionAnsweringSimple,
XLMForTokenClassification,
XLMForMultipleChoice,
)
if is_torch_available()
else ()
)
_lowerCamelCase : Optional[Any] =(
(XLMWithLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
_lowerCamelCase : int =(
{
"feature-extraction": XLMModel,
"fill-mask": XLMWithLMHeadModel,
"question-answering": XLMForQuestionAnsweringSimple,
"text-classification": XLMForSequenceClassification,
"text-generation": XLMWithLMHeadModel,
"token-classification": XLMForTokenClassification,
"zero-shot": XLMForSequenceClassification,
}
if is_torch_available()
else {}
)
def A__ ( self : List[str] , _lowerCamelCase : List[str] , _lowerCamelCase : List[str] , _lowerCamelCase : int , _lowerCamelCase : str , _lowerCamelCase : Union[str, Any] ):
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('''Fast''' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def A__ ( self : Dict , _lowerCamelCase : Any , _lowerCamelCase : Tuple , _lowerCamelCase : int=False ):
A__ = super()._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , return_labels=_SCREAMING_SNAKE_CASE )
if return_labels:
if model_class.__name__ == "XLMForQuestionAnswering":
A__ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_SCREAMING_SNAKE_CASE )
A__ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_SCREAMING_SNAKE_CASE )
return inputs_dict
def A__ ( self : List[str] ):
A__ = XLMModelTester(self )
A__ = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , emb_dim=3_7 )
def A__ ( self : Optional[Any] ):
self.config_tester.run_common_tests()
def A__ ( self : Dict ):
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_model(*_SCREAMING_SNAKE_CASE )
def A__ ( self : List[str] ):
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_lm_head(*_SCREAMING_SNAKE_CASE )
def A__ ( self : str ):
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_simple_qa(*_SCREAMING_SNAKE_CASE )
def A__ ( self : str ):
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_qa(*_SCREAMING_SNAKE_CASE )
def A__ ( self : Tuple ):
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_sequence_classif(*_SCREAMING_SNAKE_CASE )
def A__ ( self : Union[str, Any] ):
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_token_classif(*_SCREAMING_SNAKE_CASE )
def A__ ( self : Dict ):
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_for_multiple_choice(*_SCREAMING_SNAKE_CASE )
def A__ ( self : Optional[Any] , _lowerCamelCase : str , _lowerCamelCase : Any , _lowerCamelCase : Optional[int] , _lowerCamelCase : Any , _lowerCamelCase : Any , _lowerCamelCase : List[Any]=False , _lowerCamelCase : Dict=1 ):
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.assertListEqual(
[isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for iter_attentions in attentions] , [True] * len(_SCREAMING_SNAKE_CASE ) )
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , (max_length - min_length) * num_beam_groups )
for idx, iter_attentions in enumerate(_SCREAMING_SNAKE_CASE ):
# adds PAD dummy token
A__ = min_length + idx + 1
A__ = min_length + idx + 1
A__ = (
batch_size * num_beam_groups,
config.num_attention_heads,
tgt_len,
src_len,
)
# check attn size
self.assertListEqual(
[layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(_SCREAMING_SNAKE_CASE ) )
def A__ ( self : Tuple , _lowerCamelCase : List[str] , _lowerCamelCase : int , _lowerCamelCase : List[str] , _lowerCamelCase : str , _lowerCamelCase : List[str] , _lowerCamelCase : Optional[Any]=False , _lowerCamelCase : List[str]=1 ):
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.assertListEqual(
[isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for iter_hidden_states in hidden_states] , [True] * len(_SCREAMING_SNAKE_CASE ) , )
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , (max_length - min_length) * num_beam_groups )
for idx, iter_hidden_states in enumerate(_SCREAMING_SNAKE_CASE ):
# adds PAD dummy token
A__ = min_length + idx + 1
A__ = (batch_size * num_beam_groups, seq_len, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(_SCREAMING_SNAKE_CASE ) , )
pass
@slow
def A__ ( self : int ):
for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ = XLMModel.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
@require_torch
class UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def A__ ( self : Optional[int] ):
A__ = XLMWithLMHeadModel.from_pretrained('''xlm-mlm-en-2048''' )
model.to(_SCREAMING_SNAKE_CASE )
A__ = torch.tensor([[1_4, 4_4_7]] , dtype=torch.long , device=_SCREAMING_SNAKE_CASE ) # the president
A__ = [
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
] # the president the president the president the president the president the president the president the president the president the president
# TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference
A__ = model.generate(_SCREAMING_SNAKE_CASE , do_sample=_SCREAMING_SNAKE_CASE )
self.assertListEqual(output_ids[0].cpu().numpy().tolist() , _SCREAMING_SNAKE_CASE )
| 571 |
import argparse
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import GLPNConfig, GLPNForDepthEstimation, GLPNImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase = logging.get_logger(__name__)
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
A_ : List[Any] = OrderedDict()
for key, value in state_dict.items():
if key.startswith('''module.encoder''' ):
A_ : str = key.replace('''module.encoder''' , '''glpn.encoder''' )
if key.startswith('''module.decoder''' ):
A_ : List[Any] = key.replace('''module.decoder''' , '''decoder.stages''' )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
A_ : Any = key[key.find('''patch_embed''' ) + len('''patch_embed''' )]
A_ : Union[str, Any] = key.replace(f'''patch_embed{idx}''' , f'''patch_embeddings.{int(SCREAMING_SNAKE_CASE )-1}''' )
if "norm" in key:
A_ : Union[str, Any] = key.replace('''norm''' , '''layer_norm''' )
if "glpn.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
A_ : Optional[Any] = key[key.find('''glpn.encoder.layer_norm''' ) + len('''glpn.encoder.layer_norm''' )]
A_ : Dict = key.replace(f'''layer_norm{idx}''' , f'''layer_norm.{int(SCREAMING_SNAKE_CASE )-1}''' )
if "layer_norm1" in key:
A_ : Optional[Any] = key.replace('''layer_norm1''' , '''layer_norm_1''' )
if "layer_norm2" in key:
A_ : List[str] = key.replace('''layer_norm2''' , '''layer_norm_2''' )
if "block" in key:
# replace for example block1 by block.0
A_ : List[str] = key[key.find('''block''' ) + len('''block''' )]
A_ : Dict = key.replace(f'''block{idx}''' , f'''block.{int(SCREAMING_SNAKE_CASE )-1}''' )
if "attn.q" in key:
A_ : List[str] = key.replace('''attn.q''' , '''attention.self.query''' )
if "attn.proj" in key:
A_ : Union[str, Any] = key.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in key:
A_ : List[Any] = key.replace('''attn''' , '''attention.self''' )
if "fc1" in key:
A_ : List[Any] = key.replace('''fc1''' , '''dense1''' )
if "fc2" in key:
A_ : Any = key.replace('''fc2''' , '''dense2''' )
if "linear_pred" in key:
A_ : List[str] = key.replace('''linear_pred''' , '''classifier''' )
if "linear_fuse" in key:
A_ : Optional[int] = key.replace('''linear_fuse.conv''' , '''linear_fuse''' )
A_ : str = key.replace('''linear_fuse.bn''' , '''batch_norm''' )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
A_ : List[str] = key[key.find('''linear_c''' ) + len('''linear_c''' )]
A_ : Tuple = key.replace(f'''linear_c{idx}''' , f'''linear_c.{int(SCREAMING_SNAKE_CASE )-1}''' )
if "bot_conv" in key:
A_ : Union[str, Any] = key.replace('''bot_conv''' , '''0.convolution''' )
if "skip_conv1" in key:
A_ : int = key.replace('''skip_conv1''' , '''1.convolution''' )
if "skip_conv2" in key:
A_ : Optional[Any] = key.replace('''skip_conv2''' , '''2.convolution''' )
if "fusion1" in key:
A_ : int = key.replace('''fusion1''' , '''1.fusion''' )
if "fusion2" in key:
A_ : Any = key.replace('''fusion2''' , '''2.fusion''' )
if "fusion3" in key:
A_ : Optional[int] = key.replace('''fusion3''' , '''3.fusion''' )
if "fusion" in key and "conv" in key:
A_ : Union[str, Any] = key.replace('''conv''' , '''convolutional_layer''' )
if key.startswith('''module.last_layer_depth''' ):
A_ : Optional[int] = key.replace('''module.last_layer_depth''' , '''head.head''' )
A_ : Optional[int] = value
return new_state_dict
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
# for each of the encoder blocks:
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
A_ : Optional[int] = state_dict.pop(f'''glpn.encoder.block.{i}.{j}.attention.self.kv.weight''' )
A_ : Union[str, Any] = state_dict.pop(f'''glpn.encoder.block.{i}.{j}.attention.self.kv.bias''' )
# next, add keys and values (in that order) to the state dict
A_ : Optional[Any] = kv_weight[
: config.hidden_sizes[i], :
]
A_ : Dict = kv_bias[: config.hidden_sizes[i]]
A_ : Union[str, Any] = kv_weight[
config.hidden_sizes[i] :, :
]
A_ : Union[str, Any] = kv_bias[config.hidden_sizes[i] :]
def _SCREAMING_SNAKE_CASE ( ):
A_ : Tuple = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
A_ : Any = Image.open(requests.get(SCREAMING_SNAKE_CASE , stream=SCREAMING_SNAKE_CASE ).raw )
return image
@torch.no_grad()
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=None ):
A_ : Union[str, Any] = GLPNConfig(hidden_sizes=[64, 128, 320, 512] , decoder_hidden_size=64 , depths=[3, 8, 27, 3] )
# load image processor (only resize + rescale)
A_ : Any = GLPNImageProcessor()
# prepare image
A_ : Dict = prepare_img()
A_ : Tuple = image_processor(images=SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).pixel_values
logger.info('''Converting model...''' )
# load original state dict
A_ : str = torch.load(SCREAMING_SNAKE_CASE , map_location=torch.device('''cpu''' ) )
# rename keys
A_ : Tuple = rename_keys(SCREAMING_SNAKE_CASE )
# key and value matrices need special treatment
read_in_k_v(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# create HuggingFace model and load state dict
A_ : Dict = GLPNForDepthEstimation(SCREAMING_SNAKE_CASE )
model.load_state_dict(SCREAMING_SNAKE_CASE )
model.eval()
# forward pass
A_ : List[Any] = model(SCREAMING_SNAKE_CASE )
A_ : int = outputs.predicted_depth
# verify output
if model_name is not None:
if "nyu" in model_name:
A_ : List[Any] = torch.tensor(
[[4.4_1_4_7, 4.0_8_7_3, 4.0_6_7_3], [3.7_8_9_0, 3.2_8_8_1, 3.1_5_2_5], [3.7_6_7_4, 3.5_4_2_3, 3.4_9_1_3]] )
elif "kitti" in model_name:
A_ : Optional[int] = torch.tensor(
[[3.4_2_9_1, 2.7_8_6_5, 2.5_1_5_1], [3.2_8_4_1, 2.7_0_2_1, 2.3_5_0_2], [3.1_1_4_7, 2.4_6_2_5, 2.2_4_8_1]] )
else:
raise ValueError(f'''Unknown model name: {model_name}''' )
A_ : Optional[Any] = torch.Size([1, 480, 640] )
assert predicted_depth.shape == expected_shape
assert torch.allclose(predicted_depth[0, :3, :3] , SCREAMING_SNAKE_CASE , atol=1e-4 )
print('''Looks ok!''' )
# finally, push to hub if required
if push_to_hub:
logger.info('''Pushing model and image processor to the hub...''' )
model.push_to_hub(
repo_path_or_name=Path(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , organization='''nielsr''' , commit_message='''Add model''' , use_temp_dir=SCREAMING_SNAKE_CASE , )
image_processor.push_to_hub(
repo_path_or_name=Path(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , organization='''nielsr''' , commit_message='''Add image processor''' , use_temp_dir=SCREAMING_SNAKE_CASE , )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_path""",
default=None,
type=str,
help="""Path to the original PyTorch checkpoint (.pth file).""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether to upload the model to the HuggingFace hub."""
)
parser.add_argument(
"""--model_name""",
default="""glpn-kitti""",
type=str,
help="""Name of the model in case you're pushing to the hub.""",
)
UpperCamelCase = parser.parse_args()
convert_glpn_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 590 | 0 |
import multiprocessing
import os
from typing import BinaryIO, Optional, Union
import fsspec
from .. import Dataset, Features, NamedSplit, config
from ..formatting import query_table
from ..packaged_modules.json.json import Json
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
def __init__( self : str , __lowerCamelCase : NestedDataStructureLike[PathLike] , __lowerCamelCase : Optional[NamedSplit] = None , __lowerCamelCase : Optional[Features] = None , __lowerCamelCase : str = None , __lowerCamelCase : bool = False , __lowerCamelCase : bool = False , __lowerCamelCase : Optional[str] = None , __lowerCamelCase : Optional[int] = None , **__lowerCamelCase : Tuple , ) -> Union[str, Any]:
super().__init__(
__lowerCamelCase , split=__lowerCamelCase , features=__lowerCamelCase , cache_dir=__lowerCamelCase , keep_in_memory=__lowerCamelCase , streaming=__lowerCamelCase , num_proc=__lowerCamelCase , **__lowerCamelCase , )
SCREAMING_SNAKE_CASE__ = field
SCREAMING_SNAKE_CASE__ = path_or_paths if isinstance(__lowerCamelCase , __lowerCamelCase ) else {self.split: path_or_paths}
SCREAMING_SNAKE_CASE__ = Json(
cache_dir=__lowerCamelCase , data_files=__lowerCamelCase , features=__lowerCamelCase , field=__lowerCamelCase , **__lowerCamelCase , )
def lowercase_ ( self : List[str] ) -> Dict:
# Build iterable dataset
if self.streaming:
SCREAMING_SNAKE_CASE__ = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
self.builder.download_and_prepare(
download_config=__lowerCamelCase , download_mode=__lowerCamelCase , verification_mode=__lowerCamelCase , base_path=__lowerCamelCase , num_proc=self.num_proc , )
SCREAMING_SNAKE_CASE__ = self.builder.as_dataset(
split=self.split , verification_mode=__lowerCamelCase , in_memory=self.keep_in_memory )
return dataset
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self : str , __lowerCamelCase : Dataset , __lowerCamelCase : Union[PathLike, BinaryIO] , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : Optional[int] = None , **__lowerCamelCase : Optional[int] , ) -> str:
if num_proc is not None and num_proc <= 0:
raise ValueError(f'''num_proc {num_proc} must be an integer > 0.''' )
SCREAMING_SNAKE_CASE__ = dataset
SCREAMING_SNAKE_CASE__ = path_or_buf
SCREAMING_SNAKE_CASE__ = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
SCREAMING_SNAKE_CASE__ = num_proc
SCREAMING_SNAKE_CASE__ = '''utf-8'''
SCREAMING_SNAKE_CASE__ = to_json_kwargs
def lowercase_ ( self : Optional[Any] ) -> int:
SCREAMING_SNAKE_CASE__ = self.to_json_kwargs.pop('''path_or_buf''' , __lowerCamelCase )
SCREAMING_SNAKE_CASE__ = self.to_json_kwargs.pop('''orient''' , '''records''' )
SCREAMING_SNAKE_CASE__ = self.to_json_kwargs.pop('''lines''' , True if orient == '''records''' else False )
SCREAMING_SNAKE_CASE__ = self.to_json_kwargs.pop('''index''' , False if orient in ['''split''', '''table'''] else True )
SCREAMING_SNAKE_CASE__ = self.to_json_kwargs.pop('''compression''' , __lowerCamelCase )
if compression not in [None, "infer", "gzip", "bz2", "xz"]:
raise NotImplementedError(f'''`datasets` currently does not support {compression} compression''' )
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with fsspec.open(self.path_or_buf , '''wb''' , compression=__lowerCamelCase ) as buffer:
SCREAMING_SNAKE_CASE__ = self._write(file_obj=__lowerCamelCase , orient=__lowerCamelCase , lines=__lowerCamelCase , index=__lowerCamelCase , **self.to_json_kwargs )
else:
if compression:
raise NotImplementedError(
f'''The compression parameter is not supported when writing to a buffer, but compression={compression}'''
''' was passed. Please provide a local path instead.''' )
SCREAMING_SNAKE_CASE__ = self._write(
file_obj=self.path_or_buf , orient=__lowerCamelCase , lines=__lowerCamelCase , index=__lowerCamelCase , **self.to_json_kwargs )
return written
def lowercase_ ( self : Any , __lowerCamelCase : Optional[int] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = args
SCREAMING_SNAKE_CASE__ = query_table(
table=self.dataset.data , key=slice(__lowerCamelCase , offset + self.batch_size ) , indices=self.dataset._indices , )
SCREAMING_SNAKE_CASE__ = batch.to_pandas().to_json(
path_or_buf=__lowerCamelCase , orient=__lowerCamelCase , lines=__lowerCamelCase , index=__lowerCamelCase , **__lowerCamelCase )
if not json_str.endswith('''\n''' ):
json_str += "\n"
return json_str.encode(self.encoding )
def lowercase_ ( self : str , __lowerCamelCase : BinaryIO , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[str] , __lowerCamelCase : str , **__lowerCamelCase : Any , ) -> int:
SCREAMING_SNAKE_CASE__ = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating json from Arrow format''' , ):
SCREAMING_SNAKE_CASE__ = self._batch_json((offset, orient, lines, index, to_json_kwargs) )
written += file_obj.write(__lowerCamelCase )
else:
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for json_str in logging.tqdm(
pool.imap(
self._batch_json , [(offset, orient, lines, index, to_json_kwargs) for offset in range(0 , __lowerCamelCase , __lowerCamelCase )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating json from Arrow format''' , ):
written += file_obj.write(__lowerCamelCase )
return written
| 472 |
import doctest
from collections import deque
import numpy as np
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self : Optional[Any] ) -> None:
SCREAMING_SNAKE_CASE__ = [2, 1, 2, -1]
SCREAMING_SNAKE_CASE__ = [1, 2, 3, 4]
def lowercase_ ( self : List[str] ) -> list[float]:
SCREAMING_SNAKE_CASE__ = len(self.first_signal )
SCREAMING_SNAKE_CASE__ = len(self.second_signal )
SCREAMING_SNAKE_CASE__ = max(__lowerCamelCase , __lowerCamelCase )
# create a zero matrix of max_length x max_length
SCREAMING_SNAKE_CASE__ = [[0] * max_length for i in range(__lowerCamelCase )]
# fills the smaller signal with zeros to make both signals of same length
if length_first_signal < length_second_signal:
self.first_signal += [0] * (max_length - length_first_signal)
elif length_first_signal > length_second_signal:
self.second_signal += [0] * (max_length - length_second_signal)
for i in range(__lowerCamelCase ):
SCREAMING_SNAKE_CASE__ = deque(self.second_signal )
rotated_signal.rotate(__lowerCamelCase )
for j, item in enumerate(__lowerCamelCase ):
matrix[i][j] += item
# multiply the matrix with the first signal
SCREAMING_SNAKE_CASE__ = np.matmul(np.transpose(__lowerCamelCase ) , np.transpose(self.first_signal ) )
# rounding-off to two decimal places
return [round(__lowerCamelCase , 2 ) for i in final_signal]
if __name__ == "__main__":
doctest.testmod()
| 472 | 1 |
"""simple docstring"""
import cmath
import math
def lowercase_ ( _lowerCamelCase: float , _lowerCamelCase: float , _lowerCamelCase: float , _lowerCamelCase: float ) -> complex:
'''simple docstring'''
__lowerCamelCase : Any = math.radians(__A )
__lowerCamelCase : Union[str, Any] = math.radians(__A )
# Convert voltage and current to rectangular form
__lowerCamelCase : Tuple = cmath.rect(__A , __A )
__lowerCamelCase : List[str] = cmath.rect(__A , __A )
# Calculate apparent power
return voltage_rect * current_rect
if __name__ == "__main__":
import doctest
doctest.testmod() | 646 |
'''simple docstring'''
def lowercase_ ( __A : int , __A : int ) -> int:
"""simple docstring"""
return 1 if input_a == input_a else 0
def lowercase_ ( ) -> None:
"""simple docstring"""
assert xnor_gate(0 , 0 ) == 1
assert xnor_gate(0 , 1 ) == 0
assert xnor_gate(1 , 0 ) == 0
assert xnor_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(xnor_gate(0, 0))
print(xnor_gate(0, 1))
print(xnor_gate(1, 0))
print(xnor_gate(1, 1))
| 94 | 0 |
'''simple docstring'''
from typing import List, Union
import numpy as np
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, logging
from .base import PIPELINE_INIT_ARGS, ArgumentHandler, ChunkPipeline
a = logging.get_logger(__name__)
class a_ ( snake_case ):
def UpperCamelCase ( self : Optional[Any] , a_ : Optional[int] ) -> List[Any]:
if isinstance(a_ , a_ ):
snake_case: Any =[label.strip() for label in labels.split(',' ) if label.strip()]
return labels
def __call__( self : Dict , a_ : List[Any] , a_ : Optional[int] , a_ : Optional[Any] ) -> Optional[Any]:
if len(a_ ) == 0 or len(a_ ) == 0:
raise ValueError('You must include at least one label and at least one sequence.' )
if hypothesis_template.format(labels[0] ) == hypothesis_template:
raise ValueError(
(
'The provided hypothesis_template "{}" was not able to be formatted with the target labels. '
'Make sure the passed template includes formatting syntax such as {{}} where the label should go.'
).format(a_ ) )
if isinstance(a_ , a_ ):
snake_case: Tuple =[sequences]
snake_case: str =[]
for sequence in sequences:
sequence_pairs.extend([[sequence, hypothesis_template.format(a_ )] for label in labels] )
return sequence_pairs, sequences
@add_end_docstrings(snake_case )
class a_ ( snake_case ):
def __init__( self : Any , a_ : Tuple=ZeroShotClassificationArgumentHandler() , *a_ : List[str] , **a_ : List[Any] ) -> List[str]:
snake_case: List[str] =args_parser
super().__init__(*a_ , **a_ )
if self.entailment_id == -1:
logger.warning(
'Failed to determine \'entailment\' label id from the label2id mapping in the model config. Setting to '
'-1. Define a descriptive label2id mapping in the model config to ensure correct outputs.' )
@property
def UpperCamelCase ( self : int ) -> Union[str, Any]:
for label, ind in self.model.config.labelaid.items():
if label.lower().startswith('entail' ):
return ind
return -1
def UpperCamelCase ( self : Union[str, Any] , a_ : str , a_ : Dict=True , a_ : List[Any]=True , a_ : Dict=TruncationStrategy.ONLY_FIRST , **a_ : Any ) -> Dict:
snake_case: Optional[Any] =self.framework
if self.tokenizer.pad_token is None:
# Override for tokenizers not supporting padding
logger.error(
'Tokenizer was not supporting padding necessary for zero-shot, attempting to use '
' `pad_token=eos_token`' )
snake_case: Dict =self.tokenizer.eos_token
try:
snake_case: Optional[int] =self.tokenizer(
a_ , add_special_tokens=a_ , return_tensors=a_ , padding=a_ , truncation=a_ , )
except Exception as e:
if "too short" in str(a_ ):
# tokenizers might yell that we want to truncate
# to a value that is not even reached by the input.
# In that case we don't want to truncate.
# It seems there's not a really better way to catch that
# exception.
snake_case: int =self.tokenizer(
a_ , add_special_tokens=a_ , return_tensors=a_ , padding=a_ , truncation=TruncationStrategy.DO_NOT_TRUNCATE , )
else:
raise e
return inputs
def UpperCamelCase ( self : Optional[int] , **a_ : int ) -> int:
if kwargs.get('multi_class' , a_ ) is not None:
snake_case: Union[str, Any] =kwargs['multi_class']
logger.warning(
'The `multi_class` argument has been deprecated and renamed to `multi_label`. '
'`multi_class` will be removed in a future version of Transformers.' )
snake_case: Union[str, Any] ={}
if "candidate_labels" in kwargs:
snake_case: Union[str, Any] =self._args_parser._parse_labels(kwargs['candidate_labels'] )
if "hypothesis_template" in kwargs:
snake_case: Any =kwargs['hypothesis_template']
snake_case: Optional[int] ={}
if "multi_label" in kwargs:
snake_case: Tuple =kwargs['multi_label']
return preprocess_params, {}, postprocess_params
def __call__( self : Tuple , a_ : Union[str, List[str]] , *a_ : List[str] , **a_ : str , ) -> str:
if len(a_ ) == 0:
pass
elif len(a_ ) == 1 and "candidate_labels" not in kwargs:
snake_case: List[str] =args[0]
else:
raise ValueError(F'''Unable to understand extra arguments {args}''' )
return super().__call__(a_ , **a_ )
def UpperCamelCase ( self : int , a_ : Union[str, Any] , a_ : Dict=None , a_ : int="This example is {}." ) -> Tuple:
snake_case , snake_case: Optional[Any] =self._args_parser(a_ , a_ , a_ )
for i, (candidate_label, sequence_pair) in enumerate(zip(a_ , a_ ) ):
snake_case: List[str] =self._parse_and_tokenize([sequence_pair] )
yield {
"candidate_label": candidate_label,
"sequence": sequences[0],
"is_last": i == len(a_ ) - 1,
**model_input,
}
def UpperCamelCase ( self : Tuple , a_ : List[Any] ) -> Tuple:
snake_case: Union[str, Any] =inputs['candidate_label']
snake_case: Tuple =inputs['sequence']
snake_case: Dict ={k: inputs[k] for k in self.tokenizer.model_input_names}
snake_case: List[str] =self.model(**a_ )
snake_case: int ={
'candidate_label': candidate_label,
'sequence': sequence,
'is_last': inputs['is_last'],
**outputs,
}
return model_outputs
def UpperCamelCase ( self : Optional[Any] , a_ : List[Any] , a_ : Union[str, Any]=False ) -> Optional[int]:
snake_case: List[str] =[outputs['candidate_label'] for outputs in model_outputs]
snake_case: List[str] =[outputs['sequence'] for outputs in model_outputs]
snake_case: str =np.concatenate([output['logits'].numpy() for output in model_outputs] )
snake_case: Tuple =logits.shape[0]
snake_case: Optional[int] =len(a_ )
snake_case: Any =N // n
snake_case: List[Any] =logits.reshape((num_sequences, n, -1) )
if multi_label or len(a_ ) == 1:
# softmax over the entailment vs. contradiction dim for each label independently
snake_case: Optional[int] =self.entailment_id
snake_case: Union[str, Any] =-1 if entailment_id == 0 else 0
snake_case: List[str] =reshaped_outputs[..., [contradiction_id, entailment_id]]
snake_case: Optional[int] =np.exp(a_ ) / np.exp(a_ ).sum(-1 , keepdims=a_ )
snake_case: Any =scores[..., 1]
else:
# softmax the "entailment" logits over all candidate labels
snake_case: str =reshaped_outputs[..., self.entailment_id]
snake_case: Any =np.exp(a_ ) / np.exp(a_ ).sum(-1 , keepdims=a_ )
snake_case: Any =list(reversed(scores[0].argsort() ) )
return {
"sequence": sequences[0],
"labels": [candidate_labels[i] for i in top_inds],
"scores": scores[0, top_inds].tolist(),
}
| 347 |
'''simple docstring'''
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
from PIL import Image
from ...utils import (
BaseOutput,
OptionalDependencyNotAvailable,
is_flax_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_onnx_available,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
@dataclass
class a_ ( snake_case ):
UpperCAmelCase : Union[List[PIL.Image.Image], np.ndarray]
UpperCAmelCase : Optional[List[bool]]
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_cycle_diffusion import CycleDiffusionPipeline
from .pipeline_stable_diffusion import StableDiffusionPipeline
from .pipeline_stable_diffusion_attend_and_excite import StableDiffusionAttendAndExcitePipeline
from .pipeline_stable_diffusion_imgaimg import StableDiffusionImgaImgPipeline
from .pipeline_stable_diffusion_inpaint import StableDiffusionInpaintPipeline
from .pipeline_stable_diffusion_inpaint_legacy import StableDiffusionInpaintPipelineLegacy
from .pipeline_stable_diffusion_instruct_pixapix import StableDiffusionInstructPixaPixPipeline
from .pipeline_stable_diffusion_latent_upscale import StableDiffusionLatentUpscalePipeline
from .pipeline_stable_diffusion_ldmad import StableDiffusionLDMaDPipeline
from .pipeline_stable_diffusion_model_editing import StableDiffusionModelEditingPipeline
from .pipeline_stable_diffusion_panorama import StableDiffusionPanoramaPipeline
from .pipeline_stable_diffusion_paradigms import StableDiffusionParadigmsPipeline
from .pipeline_stable_diffusion_sag import StableDiffusionSAGPipeline
from .pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline
from .pipeline_stable_unclip import StableUnCLIPPipeline
from .pipeline_stable_unclip_imgaimg import StableUnCLIPImgaImgPipeline
from .safety_checker import StableDiffusionSafetyChecker
from .stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.25.0')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import StableDiffusionImageVariationPipeline
else:
from .pipeline_stable_diffusion_image_variation import StableDiffusionImageVariationPipeline
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.26.0')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionPixaPixZeroPipeline,
)
else:
from .pipeline_stable_diffusion_depthaimg import StableDiffusionDepthaImgPipeline
from .pipeline_stable_diffusion_diffedit import StableDiffusionDiffEditPipeline
from .pipeline_stable_diffusion_pixapix_zero import StableDiffusionPixaPixZeroPipeline
try:
if not (
is_torch_available()
and is_transformers_available()
and is_k_diffusion_available()
and is_k_diffusion_version('>=', '0.0.12')
):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipeline_stable_diffusion_k_diffusion import StableDiffusionKDiffusionPipeline
try:
if not (is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_onnx_objects import * # noqa F403
else:
from .pipeline_onnx_stable_diffusion import OnnxStableDiffusionPipeline, StableDiffusionOnnxPipeline
from .pipeline_onnx_stable_diffusion_imgaimg import OnnxStableDiffusionImgaImgPipeline
from .pipeline_onnx_stable_diffusion_inpaint import OnnxStableDiffusionInpaintPipeline
from .pipeline_onnx_stable_diffusion_inpaint_legacy import OnnxStableDiffusionInpaintPipelineLegacy
from .pipeline_onnx_stable_diffusion_upscale import OnnxStableDiffusionUpscalePipeline
if is_transformers_available() and is_flax_available():
import flax
@flax.struct.dataclass
class a_ ( snake_case ):
UpperCAmelCase : np.ndarray
UpperCAmelCase : List[bool]
from ...schedulers.scheduling_pndm_flax import PNDMSchedulerState
from .pipeline_flax_stable_diffusion import FlaxStableDiffusionPipeline
from .pipeline_flax_stable_diffusion_imgaimg import FlaxStableDiffusionImgaImgPipeline
from .pipeline_flax_stable_diffusion_inpaint import FlaxStableDiffusionInpaintPipeline
from .safety_checker_flax import FlaxStableDiffusionSafetyChecker
| 347 | 1 |
from __future__ import annotations
import math
SCREAMING_SNAKE_CASE__ = '''2020.9.26'''
SCREAMING_SNAKE_CASE__ = '''xcodz-dot, cclaus, dhruvmanila'''
def UpperCAmelCase__ ( lowerCamelCase_ : float , lowerCamelCase_ : float , lowerCamelCase_ : float , lowerCamelCase_ : float , lowerCamelCase_ : float ):
if not all(isinstance(lowerCamelCase_ , (float, int) ) for val in locals().values() ):
__a : int = f'''Input values must either be float or int: {list(locals().values() )}'''
raise TypeError(lowerCamelCase_ )
__a : Tuple = ((x * distance) / (z + distance)) * scale
__a : List[str] = ((y * distance) / (z + distance)) * scale
return projected_x, projected_y
def UpperCAmelCase__ ( lowerCamelCase_ : float , lowerCamelCase_ : float , lowerCamelCase_ : float , lowerCamelCase_ : str , lowerCamelCase_ : float ):
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
raise TypeError('Axis must be a str' )
__a : Optional[Any] = locals()
del input_variables["axis"]
if not all(isinstance(lowerCamelCase_ , (float, int) ) for val in input_variables.values() ):
__a : Any = (
'Input values except axis must either be float or int: '
f'''{list(input_variables.values() )}'''
)
raise TypeError(lowerCamelCase_ )
__a : str = (angle % 3_6_0) / 4_5_0 * 1_8_0 / math.pi
if axis == "z":
__a : Dict = x * math.cos(lowerCamelCase_ ) - y * math.sin(lowerCamelCase_ )
__a : Optional[Any] = y * math.cos(lowerCamelCase_ ) + x * math.sin(lowerCamelCase_ )
__a : List[str] = z
elif axis == "x":
__a : int = y * math.cos(lowerCamelCase_ ) - z * math.sin(lowerCamelCase_ )
__a : Tuple = z * math.cos(lowerCamelCase_ ) + y * math.sin(lowerCamelCase_ )
__a : str = x
elif axis == "y":
__a : Any = x * math.cos(lowerCamelCase_ ) - z * math.sin(lowerCamelCase_ )
__a : List[str] = z * math.cos(lowerCamelCase_ ) + x * math.sin(lowerCamelCase_ )
__a : List[Any] = y
else:
raise ValueError('not a valid axis, choose one of \'x\', \'y\', \'z\'' )
return new_x, new_y, new_z
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F"{convert_to_ad(1.0, 2.0, 3.0, 1_0.0, 1_0.0) = }")
print(F"{rotate(1.0, 2.0, 3.0, 'y', 9_0.0) = }")
| 47 |
import os
import sys
import unittest
UpperCamelCase_ = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, "utils"))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
UpperCamelCase_ = os.path.join(git_repo_path, "src", "transformers")
UpperCamelCase_ = "\n{0} = None\n"
UpperCamelCase_ = "\nclass {0}(metaclass=DummyObject):\n _backends = {1}\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, {1})\n"
UpperCamelCase_ = "\ndef {0}(*args, **kwargs):\n requires_backends({0}, {1})\n"
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def _UpperCAmelCase ( self : Union[str, Any] ):
"""simple docstring"""
A : int = find_backend(''' _import_structure["models.albert"].append("AlbertTokenizerFast")''' )
self.assertIsNone(snake_case_ )
A : Any = find_backend(''' if not is_tokenizers_available():''' )
self.assertEqual(snake_case_ , '''tokenizers''' )
A : Optional[Any] = find_backend(''' if not is_tensorflow_text_available():''' )
self.assertEqual(snake_case_ , '''tensorflow_text''' )
A : Dict = find_backend(''' if not (is_sentencepiece_available() and is_tokenizers_available()):''' )
self.assertEqual(snake_case_ , '''sentencepiece_and_tokenizers''' )
A : int = find_backend(
''' if not (is_sentencepiece_available() and is_tensorflow_text_available()):''' )
self.assertEqual(snake_case_ , '''sentencepiece_and_tensorflow_text''' )
A : List[Any] = find_backend(
''' if not (is_sentencepiece_available() and is_tokenizers_available() and is_vision_available()):''' )
self.assertEqual(snake_case_ , '''sentencepiece_and_tokenizers_and_vision''' )
def _UpperCAmelCase ( self : int ):
"""simple docstring"""
A : Optional[int] = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn('''torch''' , snake_case_ )
self.assertIn('''tensorflow_text''' , snake_case_ )
self.assertIn('''sentencepiece_and_tokenizers''' , snake_case_ )
# Likewise, we can't assert on the exact content of a key
self.assertIn('''BertModel''' , objects['''torch'''] )
self.assertIn('''TFBertModel''' , objects['''tf'''] )
self.assertIn('''FlaxBertModel''' , objects['''flax'''] )
self.assertIn('''BertModel''' , objects['''torch'''] )
self.assertIn('''TFBertTokenizer''' , objects['''tensorflow_text'''] )
self.assertIn('''convert_slow_tokenizer''' , objects['''sentencepiece_and_tokenizers'''] )
def _UpperCAmelCase ( self : Dict ):
"""simple docstring"""
A : Optional[Any] = create_dummy_object('''CONSTANT''' , '''\'torch\'''' )
self.assertEqual(snake_case_ , '''\nCONSTANT = None\n''' )
A : Optional[int] = create_dummy_object('''function''' , '''\'torch\'''' )
self.assertEqual(
snake_case_ , '''\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n''' )
A : Optional[int] = '''
class FakeClass(metaclass=DummyObject):
_backends = \'torch\'
def __init__(self, *args, **kwargs):
requires_backends(self, \'torch\')
'''
A : Union[str, Any] = create_dummy_object('''FakeClass''' , '''\'torch\'''' )
self.assertEqual(snake_case_ , snake_case_ )
def _UpperCAmelCase ( self : List[str] ):
"""simple docstring"""
A : Tuple = '''# This file is autogenerated by the command `make fix-copies`, do not edit.
from ..utils import DummyObject, requires_backends
CONSTANT = None
def function(*args, **kwargs):
requires_backends(function, ["torch"])
class FakeClass(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
'''
A : Optional[Any] = create_dummy_files({'''torch''': ['''CONSTANT''', '''function''', '''FakeClass''']} )
self.assertEqual(dummy_files['''torch'''] , snake_case_ ) | 256 | 0 |
"""simple docstring"""
import importlib
import os
import sys
# This is required to make the module import works (when the python process is running from the root of the repo)
sys.path.append(""".""")
def lowerCamelCase_ ( _lowerCamelCase : List[str] ):
lowerCamelCase_ = test_file.split(os.path.sep )
if components[0:2] != ["tests", "models"]:
raise ValueError(
'''`test_file` should start with `tests/models/` (with `/` being the OS specific path separator). Got '''
F"""{test_file} instead.""" )
lowerCamelCase_ = components[-1]
if not test_fn.endswith('''py''' ):
raise ValueError(F"""`test_file` should be a python file. Got {test_fn} instead.""" )
if not test_fn.startswith('''test_modeling_''' ):
raise ValueError(
F"""`test_file` should point to a file name of the form `test_modeling_*.py`. Got {test_fn} instead.""" )
lowerCamelCase_ = components[:-1] + [test_fn.replace('''.py''' , '''''' )]
lowerCamelCase_ = '.'.join(lowerCAmelCase__ )
return test_module_path
def lowerCamelCase_ ( _lowerCamelCase : List[Any] ):
lowerCamelCase_ = get_module_path(lowerCAmelCase__ )
lowerCamelCase_ = importlib.import_module(lowerCAmelCase__ )
return test_module
def lowerCamelCase_ ( _lowerCamelCase : Optional[Any] ):
lowerCamelCase_ = []
lowerCamelCase_ = get_test_module(lowerCAmelCase__ )
for attr in dir(lowerCAmelCase__ ):
if attr.endswith('''ModelTester''' ):
tester_classes.append(getattr(lowerCAmelCase__ , lowerCAmelCase__ ) )
# sort with class names
return sorted(lowerCAmelCase__ , key=lambda _lowerCamelCase : x.__name__ )
def lowerCamelCase_ ( _lowerCamelCase : Any ):
lowerCamelCase_ = []
lowerCamelCase_ = get_test_module(lowerCAmelCase__ )
for attr in dir(lowerCAmelCase__ ):
lowerCamelCase_ = getattr(lowerCAmelCase__ , lowerCAmelCase__ )
# (TF/Flax)ModelTesterMixin is also an attribute in specific model test module. Let's exclude them by checking
# `all_model_classes` is not empty (which also excludes other special classes).
lowerCamelCase_ = getattr(lowerCAmelCase__ , '''all_model_classes''' , [] )
if len(lowerCAmelCase__ ) > 0:
test_classes.append(lowerCAmelCase__ )
# sort with class names
return sorted(lowerCAmelCase__ , key=lambda _lowerCamelCase : x.__name__ )
def lowerCamelCase_ ( _lowerCamelCase : Tuple ):
lowerCamelCase_ = get_test_classes(lowerCAmelCase__ )
lowerCamelCase_ = set()
for test_class in test_classes:
model_classes.update(test_class.all_model_classes )
# sort with class names
return sorted(lowerCAmelCase__ , key=lambda _lowerCamelCase : x.__name__ )
def lowerCamelCase_ ( _lowerCamelCase : List[Any] ):
lowerCamelCase_ = test_class()
if hasattr(lowerCAmelCase__ , '''setUp''' ):
test.setUp()
lowerCamelCase_ = None
if hasattr(lowerCAmelCase__ , '''model_tester''' ):
# `(TF/Flax)ModelTesterMixin` has this attribute default to `None`. Let's skip this case.
if test.model_tester is not None:
lowerCamelCase_ = test.model_tester.__class__
return model_tester
def lowerCamelCase_ ( _lowerCamelCase : List[Any] , _lowerCamelCase : Dict ):
lowerCamelCase_ = get_test_classes(lowerCAmelCase__ )
lowerCamelCase_ = []
for test_class in test_classes:
if model_class in test_class.all_model_classes:
target_test_classes.append(lowerCAmelCase__ )
# sort with class names
return sorted(lowerCAmelCase__ , key=lambda _lowerCamelCase : x.__name__ )
def lowerCamelCase_ ( _lowerCamelCase : Any , _lowerCamelCase : List[str] ):
lowerCamelCase_ = get_test_classes_for_model(lowerCAmelCase__ , lowerCAmelCase__ )
lowerCamelCase_ = []
for test_class in test_classes:
lowerCamelCase_ = get_model_tester_from_test_class(lowerCAmelCase__ )
if tester_class is not None:
tester_classes.append(lowerCAmelCase__ )
# sort with class names
return sorted(lowerCAmelCase__ , key=lambda _lowerCamelCase : x.__name__ )
def lowerCamelCase_ ( _lowerCamelCase : Optional[Any] ):
lowerCamelCase_ = get_test_classes(lowerCAmelCase__ )
lowerCamelCase_ = {test_class: get_model_tester_from_test_class(lowerCAmelCase__ ) for test_class in test_classes}
return test_tester_mapping
def lowerCamelCase_ ( _lowerCamelCase : str ):
lowerCamelCase_ = get_model_classes(lowerCAmelCase__ )
lowerCamelCase_ = {
model_class: get_test_classes_for_model(lowerCAmelCase__ , lowerCAmelCase__ ) for model_class in model_classes
}
return model_test_mapping
def lowerCamelCase_ ( _lowerCamelCase : Union[str, Any] ):
lowerCamelCase_ = get_model_classes(lowerCAmelCase__ )
lowerCamelCase_ = {
model_class: get_tester_classes_for_model(lowerCAmelCase__ , lowerCAmelCase__ ) for model_class in model_classes
}
return model_to_tester_mapping
def lowerCamelCase_ ( _lowerCamelCase : List[Any] ):
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
return o
elif isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
return o.__name__
elif isinstance(lowerCAmelCase__ , (list, tuple) ):
return [to_json(lowerCAmelCase__ ) for x in o]
elif isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
return {to_json(lowerCAmelCase__ ): to_json(lowerCAmelCase__ ) for k, v in o.items()}
else:
return o | 708 |
"""simple docstring"""
import unittest
from transformers import DebertaVaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaVaForMaskedLM,
DebertaVaForMultipleChoice,
DebertaVaForQuestionAnswering,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaModel,
)
from transformers.models.deberta_va.modeling_deberta_va import DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCAmelCase ( a ):
"""simple docstring"""
def __init__( self , UpperCamelCase__ , UpperCamelCase__=13 , UpperCamelCase__=7 , UpperCamelCase__=True , UpperCamelCase__=True , UpperCamelCase__=True , UpperCamelCase__=True , UpperCamelCase__=99 , UpperCamelCase__=32 , UpperCamelCase__=5 , UpperCamelCase__=4 , UpperCamelCase__=37 , UpperCamelCase__="gelu" , UpperCamelCase__=0.1 , UpperCamelCase__=0.1 , UpperCamelCase__=512 , UpperCamelCase__=16 , UpperCamelCase__=2 , UpperCamelCase__=0.02 , UpperCamelCase__=False , UpperCamelCase__=True , UpperCamelCase__="None" , UpperCamelCase__=3 , UpperCamelCase__=4 , UpperCamelCase__=None , ) -> List[str]:
'''simple docstring'''
lowerCamelCase_ = parent
lowerCamelCase_ = batch_size
lowerCamelCase_ = seq_length
lowerCamelCase_ = is_training
lowerCamelCase_ = use_input_mask
lowerCamelCase_ = use_token_type_ids
lowerCamelCase_ = use_labels
lowerCamelCase_ = vocab_size
lowerCamelCase_ = hidden_size
lowerCamelCase_ = num_hidden_layers
lowerCamelCase_ = num_attention_heads
lowerCamelCase_ = intermediate_size
lowerCamelCase_ = hidden_act
lowerCamelCase_ = hidden_dropout_prob
lowerCamelCase_ = attention_probs_dropout_prob
lowerCamelCase_ = max_position_embeddings
lowerCamelCase_ = type_vocab_size
lowerCamelCase_ = type_sequence_label_size
lowerCamelCase_ = initializer_range
lowerCamelCase_ = num_labels
lowerCamelCase_ = num_choices
lowerCamelCase_ = relative_attention
lowerCamelCase_ = position_biased_input
lowerCamelCase_ = pos_att_type
lowerCamelCase_ = scope
def _lowerCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase_ = None
if self.use_input_mask:
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
lowerCamelCase_ = None
if self.use_token_type_ids:
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCamelCase_ = None
lowerCamelCase_ = None
lowerCamelCase_ = None
if self.use_labels:
lowerCamelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase_ = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase_ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowerCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
return DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def _lowerCAmelCase ( self , UpperCamelCase__ ) -> int:
'''simple docstring'''
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def _lowerCAmelCase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Tuple:
'''simple docstring'''
lowerCamelCase_ = DebertaVaModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCamelCase_ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ )[0]
lowerCamelCase_ = model(UpperCamelCase__ , token_type_ids=UpperCamelCase__ )[0]
lowerCamelCase_ = model(UpperCamelCase__ )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def _lowerCAmelCase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> str:
'''simple docstring'''
lowerCamelCase_ = DebertaVaForMaskedLM(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCamelCase_ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowerCAmelCase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase_ = self.num_labels
lowerCamelCase_ = DebertaVaForSequenceClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCamelCase_ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(UpperCamelCase__ )
def _lowerCAmelCase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase_ = self.num_labels
lowerCamelCase_ = DebertaVaForTokenClassification(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCamelCase_ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _lowerCAmelCase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase_ = DebertaVaForQuestionAnswering(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCamelCase_ = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , start_positions=UpperCamelCase__ , end_positions=UpperCamelCase__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _lowerCAmelCase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> str:
'''simple docstring'''
lowerCamelCase_ = DebertaVaForMultipleChoice(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCamelCase_ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCamelCase_ = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCamelCase_ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCamelCase_ = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _lowerCAmelCase ( self ) -> int:
'''simple docstring'''
lowerCamelCase_ = self.prepare_config_and_inputs()
(
(
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) ,
) = config_and_inputs
lowerCamelCase_ = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class lowerCAmelCase ( a , a , unittest.TestCase ):
"""simple docstring"""
__lowercase :Union[str, Any] = (
(
DebertaVaModel,
DebertaVaForMaskedLM,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaForQuestionAnswering,
DebertaVaForMultipleChoice,
)
if is_torch_available()
else ()
)
__lowercase :Optional[Any] = (
{
"feature-extraction": DebertaVaModel,
"fill-mask": DebertaVaForMaskedLM,
"question-answering": DebertaVaForQuestionAnswering,
"text-classification": DebertaVaForSequenceClassification,
"token-classification": DebertaVaForTokenClassification,
"zero-shot": DebertaVaForSequenceClassification,
}
if is_torch_available()
else {}
)
__lowercase :Optional[int] = True
__lowercase :Any = False
__lowercase :Dict = False
__lowercase :Optional[Any] = False
__lowercase :Union[str, Any] = False
def _lowerCAmelCase ( self ) -> Any:
'''simple docstring'''
lowerCamelCase_ = DebertaVaModelTester(self )
lowerCamelCase_ = ConfigTester(self , config_class=UpperCamelCase__ , hidden_size=37 )
def _lowerCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def _lowerCAmelCase ( self ) -> List[str]:
'''simple docstring'''
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*UpperCamelCase__ )
def _lowerCAmelCase ( self ) -> str:
'''simple docstring'''
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*UpperCamelCase__ )
def _lowerCAmelCase ( self ) -> int:
'''simple docstring'''
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*UpperCamelCase__ )
def _lowerCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*UpperCamelCase__ )
def _lowerCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*UpperCamelCase__ )
def _lowerCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_multiple_choice(*UpperCamelCase__ )
@slow
def _lowerCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
for model_name in DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ = DebertaVaModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
@require_torch
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@unittest.skip(reason='''Model not available yet''' )
def _lowerCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
pass
@slow
def _lowerCAmelCase ( self ) -> Any:
'''simple docstring'''
lowerCamelCase_ = DebertaVaModel.from_pretrained('''microsoft/deberta-v2-xlarge''' )
lowerCamelCase_ = torch.tensor([[0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2]] )
lowerCamelCase_ = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
lowerCamelCase_ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ )[0]
# compare the actual values for a slice.
lowerCamelCase_ = torch.tensor(
[[[0.2_356, 0.1_948, 0.0_369], [-0.1_063, 0.3_586, -0.5_152], [-0.6_399, -0.0_259, -0.2_525]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , UpperCamelCase__ , atol=1e-4 ) , F"""{output[:, 1:4, 1:4]}""" ) | 66 | 0 |
"""simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowercase : Union[str, Any] = logging.get_logger(__name__)
__lowercase : Tuple = {
"asapp/sew-d-tiny-100k": "https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json",
# See all SEW-D models at https://huggingface.co/models?filter=sew-d
}
class _A ( _UpperCAmelCase ):
"""simple docstring"""
UpperCamelCase_ : List[str] = '''sew-d'''
def __init__( self : int , A_ : Dict=32 , A_ : str=768 , A_ : Optional[Any]=12 , A_ : Optional[Any]=12 , A_ : Any=3_072 , A_ : Tuple=2 , A_ : Any=512 , A_ : Dict=256 , A_ : Optional[Any]=True , A_ : str=True , A_ : List[str]=("p2c", "c2p") , A_ : int="layer_norm" , A_ : List[Any]="gelu_python" , A_ : str=0.1 , A_ : str=0.1 , A_ : Dict=0.1 , A_ : Tuple=0.0 , A_ : List[str]=0.1 , A_ : List[str]=0.02 , A_ : Optional[Any]=1E-7 , A_ : Union[str, Any]=1E-5 , A_ : Tuple="group" , A_ : Dict="gelu" , A_ : Union[str, Any]=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , A_ : List[Any]=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , A_ : Union[str, Any]=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , A_ : List[Any]=False , A_ : List[Any]=128 , A_ : Union[str, Any]=16 , A_ : List[str]=True , A_ : Optional[int]=0.05 , A_ : str=10 , A_ : Dict=2 , A_ : Any=0.0 , A_ : Optional[Any]=10 , A_ : Any=0 , A_ : Optional[Any]="mean" , A_ : Optional[int]=False , A_ : Union[str, Any]=False , A_ : str=256 , A_ : Any=0 , A_ : List[Any]=1 , A_ : str=2 , **A_ : Optional[Any] , ) -> Optional[int]:
super().__init__(**A_ , pad_token_id=A_ , bos_token_id=A_ , eos_token_id=A_ )
__snake_case = hidden_size
__snake_case = feat_extract_norm
__snake_case = feat_extract_activation
__snake_case = list(A_ )
__snake_case = list(A_ )
__snake_case = list(A_ )
__snake_case = conv_bias
__snake_case = num_conv_pos_embeddings
__snake_case = num_conv_pos_embedding_groups
__snake_case = len(self.conv_dim )
__snake_case = num_hidden_layers
__snake_case = intermediate_size
__snake_case = squeeze_factor
__snake_case = max_position_embeddings
__snake_case = position_buckets
__snake_case = share_att_key
__snake_case = relative_attention
__snake_case = norm_rel_ebd
__snake_case = list(A_ )
__snake_case = hidden_act
__snake_case = num_attention_heads
__snake_case = hidden_dropout
__snake_case = attention_dropout
__snake_case = activation_dropout
__snake_case = feat_proj_dropout
__snake_case = final_dropout
__snake_case = layer_norm_eps
__snake_case = feature_layer_norm_eps
__snake_case = initializer_range
__snake_case = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect.'''
'''It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,'''
f"but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)"
f"= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`." )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__snake_case = apply_spec_augment
__snake_case = mask_time_prob
__snake_case = mask_time_length
__snake_case = mask_time_min_masks
__snake_case = mask_feature_prob
__snake_case = mask_feature_length
__snake_case = mask_feature_min_masks
# ctc loss
__snake_case = ctc_loss_reduction
__snake_case = ctc_zero_infinity
# sequence classification
__snake_case = use_weighted_layer_sum
__snake_case = classifier_proj_size
@property
def lowercase ( self : Union[str, Any] ) -> Union[str, Any]:
return functools.reduce(operator.mul , self.conv_stride , 1 ) | 564 | """simple docstring"""
import argparse
from collections import defaultdict
def SCREAMING_SNAKE_CASE ( snake_case, snake_case, snake_case, snake_case, snake_case):
__snake_case = f"{file}_{class_name}_{test_name}"
done_test[_id] += 1
with open(snake_case, '''r''') as f:
__snake_case = f.readlines()
__snake_case = f"class {class_name}("
__snake_case = f"{4 * ' '}def {test_name}("
__snake_case = f"{8 * ' '}{correct_line.split()[0]}"
__snake_case = f"{16 * ' '}{correct_line.split()[0]}"
__snake_case = False
__snake_case = False
__snake_case = False
__snake_case = False
__snake_case = 0
__snake_case = 0
__snake_case = []
for line in lines:
if line.startswith(snake_case):
__snake_case = True
elif in_class and line.startswith(snake_case):
__snake_case = True
elif in_class and in_func and (line.startswith(snake_case) or line.startswith(snake_case)):
__snake_case = len(line.split(correct_line.split()[0])[0])
count += 1
if count == done_test[_id]:
__snake_case = True
if in_class and in_func and in_line:
if ")" not in line:
continue
else:
__snake_case = True
if in_class and in_func and in_line and insert_line:
new_lines.append(f"{spaces * ' '}{correct_line}")
__snake_case = __snake_case = __snake_case = __snake_case = False
else:
new_lines.append(snake_case)
with open(snake_case, '''w''') as f:
for line in new_lines:
f.write(snake_case)
def SCREAMING_SNAKE_CASE ( snake_case, snake_case=None):
if fail is not None:
with open(snake_case, '''r''') as f:
__snake_case = {l.strip() for l in f.readlines()}
else:
__snake_case = None
with open(snake_case, '''r''') as f:
__snake_case = f.readlines()
__snake_case = defaultdict(snake_case)
for line in correct_lines:
__snake_case , __snake_case , __snake_case , __snake_case = line.split(''';''')
if test_failures is None or "::".join([file, class_name, test_name]) in test_failures:
overwrite_file(snake_case, snake_case, snake_case, snake_case, snake_case)
if __name__ == "__main__":
__lowercase : Tuple = argparse.ArgumentParser()
parser.add_argument("--correct_filename", help="filename of tests with expected result")
parser.add_argument("--fail_filename", help="filename of test failures", type=str, default=None)
__lowercase : Union[str, Any] = parser.parse_args()
main(args.correct_filename, args.fail_filename) | 564 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = {
# See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert
}
class _lowercase ( _UpperCAmelCase ):
_lowerCamelCase = '''megatron-bert'''
def __init__( self , UpperCamelCase_=2_9056 , UpperCamelCase_=1024 , UpperCamelCase_=24 , UpperCamelCase_=16 , UpperCamelCase_=4096 , UpperCamelCase_="gelu" , UpperCamelCase_=0.1 , UpperCamelCase_=0.1 , UpperCamelCase_=512 , UpperCamelCase_=2 , UpperCamelCase_=0.0_2 , UpperCamelCase_=1E-1_2 , UpperCamelCase_=0 , UpperCamelCase_="absolute" , UpperCamelCase_=True , **UpperCamelCase_ , ):
super().__init__(pad_token_id=lowercase_ , **lowercase_ )
__magic_name__ = vocab_size
__magic_name__ = hidden_size
__magic_name__ = num_hidden_layers
__magic_name__ = num_attention_heads
__magic_name__ = hidden_act
__magic_name__ = intermediate_size
__magic_name__ = hidden_dropout_prob
__magic_name__ = attention_probs_dropout_prob
__magic_name__ = max_position_embeddings
__magic_name__ = type_vocab_size
__magic_name__ = initializer_range
__magic_name__ = layer_norm_eps
__magic_name__ = position_embedding_type
__magic_name__ = use_cache
| 708 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bert import BertTokenizer
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
__lowerCamelCase = {
"vocab_file": {
"bert-base-uncased": "https://huggingface.co/bert-base-uncased/resolve/main/vocab.txt",
"bert-large-uncased": "https://huggingface.co/bert-large-uncased/resolve/main/vocab.txt",
"bert-base-cased": "https://huggingface.co/bert-base-cased/resolve/main/vocab.txt",
"bert-large-cased": "https://huggingface.co/bert-large-cased/resolve/main/vocab.txt",
"bert-base-multilingual-uncased": (
"https://huggingface.co/bert-base-multilingual-uncased/resolve/main/vocab.txt"
),
"bert-base-multilingual-cased": "https://huggingface.co/bert-base-multilingual-cased/resolve/main/vocab.txt",
"bert-base-chinese": "https://huggingface.co/bert-base-chinese/resolve/main/vocab.txt",
"bert-base-german-cased": "https://huggingface.co/bert-base-german-cased/resolve/main/vocab.txt",
"bert-large-uncased-whole-word-masking": (
"https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/vocab.txt"
),
"bert-large-cased-whole-word-masking": (
"https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/vocab.txt"
),
"bert-large-uncased-whole-word-masking-finetuned-squad": (
"https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt"
),
"bert-large-cased-whole-word-masking-finetuned-squad": (
"https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt"
),
"bert-base-cased-finetuned-mrpc": (
"https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/vocab.txt"
),
"bert-base-german-dbmdz-cased": "https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/vocab.txt",
"bert-base-german-dbmdz-uncased": (
"https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/vocab.txt"
),
"TurkuNLP/bert-base-finnish-cased-v1": (
"https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/vocab.txt"
),
"TurkuNLP/bert-base-finnish-uncased-v1": (
"https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/vocab.txt"
),
"wietsedv/bert-base-dutch-cased": (
"https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"bert-base-uncased": "https://huggingface.co/bert-base-uncased/resolve/main/tokenizer.json",
"bert-large-uncased": "https://huggingface.co/bert-large-uncased/resolve/main/tokenizer.json",
"bert-base-cased": "https://huggingface.co/bert-base-cased/resolve/main/tokenizer.json",
"bert-large-cased": "https://huggingface.co/bert-large-cased/resolve/main/tokenizer.json",
"bert-base-multilingual-uncased": (
"https://huggingface.co/bert-base-multilingual-uncased/resolve/main/tokenizer.json"
),
"bert-base-multilingual-cased": (
"https://huggingface.co/bert-base-multilingual-cased/resolve/main/tokenizer.json"
),
"bert-base-chinese": "https://huggingface.co/bert-base-chinese/resolve/main/tokenizer.json",
"bert-base-german-cased": "https://huggingface.co/bert-base-german-cased/resolve/main/tokenizer.json",
"bert-large-uncased-whole-word-masking": (
"https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/tokenizer.json"
),
"bert-large-cased-whole-word-masking": (
"https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/tokenizer.json"
),
"bert-large-uncased-whole-word-masking-finetuned-squad": (
"https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json"
),
"bert-large-cased-whole-word-masking-finetuned-squad": (
"https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json"
),
"bert-base-cased-finetuned-mrpc": (
"https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/tokenizer.json"
),
"bert-base-german-dbmdz-cased": (
"https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/tokenizer.json"
),
"bert-base-german-dbmdz-uncased": (
"https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/tokenizer.json"
),
"TurkuNLP/bert-base-finnish-cased-v1": (
"https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/tokenizer.json"
),
"TurkuNLP/bert-base-finnish-uncased-v1": (
"https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/tokenizer.json"
),
"wietsedv/bert-base-dutch-cased": (
"https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/tokenizer.json"
),
},
}
__lowerCamelCase = {
"bert-base-uncased": 5_12,
"bert-large-uncased": 5_12,
"bert-base-cased": 5_12,
"bert-large-cased": 5_12,
"bert-base-multilingual-uncased": 5_12,
"bert-base-multilingual-cased": 5_12,
"bert-base-chinese": 5_12,
"bert-base-german-cased": 5_12,
"bert-large-uncased-whole-word-masking": 5_12,
"bert-large-cased-whole-word-masking": 5_12,
"bert-large-uncased-whole-word-masking-finetuned-squad": 5_12,
"bert-large-cased-whole-word-masking-finetuned-squad": 5_12,
"bert-base-cased-finetuned-mrpc": 5_12,
"bert-base-german-dbmdz-cased": 5_12,
"bert-base-german-dbmdz-uncased": 5_12,
"TurkuNLP/bert-base-finnish-cased-v1": 5_12,
"TurkuNLP/bert-base-finnish-uncased-v1": 5_12,
"wietsedv/bert-base-dutch-cased": 5_12,
}
__lowerCamelCase = {
"bert-base-uncased": {"do_lower_case": True},
"bert-large-uncased": {"do_lower_case": True},
"bert-base-cased": {"do_lower_case": False},
"bert-large-cased": {"do_lower_case": False},
"bert-base-multilingual-uncased": {"do_lower_case": True},
"bert-base-multilingual-cased": {"do_lower_case": False},
"bert-base-chinese": {"do_lower_case": False},
"bert-base-german-cased": {"do_lower_case": False},
"bert-large-uncased-whole-word-masking": {"do_lower_case": True},
"bert-large-cased-whole-word-masking": {"do_lower_case": False},
"bert-large-uncased-whole-word-masking-finetuned-squad": {"do_lower_case": True},
"bert-large-cased-whole-word-masking-finetuned-squad": {"do_lower_case": False},
"bert-base-cased-finetuned-mrpc": {"do_lower_case": False},
"bert-base-german-dbmdz-cased": {"do_lower_case": False},
"bert-base-german-dbmdz-uncased": {"do_lower_case": True},
"TurkuNLP/bert-base-finnish-cased-v1": {"do_lower_case": False},
"TurkuNLP/bert-base-finnish-uncased-v1": {"do_lower_case": True},
"wietsedv/bert-base-dutch-cased": {"do_lower_case": False},
}
class _lowercase ( __UpperCAmelCase ):
_lowerCamelCase = VOCAB_FILES_NAMES
_lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase = PRETRAINED_INIT_CONFIGURATION
_lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase = BertTokenizer
def __init__( self , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=True , UpperCamelCase_="[UNK]" , UpperCamelCase_="[SEP]" , UpperCamelCase_="[PAD]" , UpperCamelCase_="[CLS]" , UpperCamelCase_="[MASK]" , UpperCamelCase_=True , UpperCamelCase_=None , **UpperCamelCase_ , ):
super().__init__(
UpperCamelCase_ , tokenizer_file=UpperCamelCase_ , do_lower_case=UpperCamelCase_ , unk_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , tokenize_chinese_chars=UpperCamelCase_ , strip_accents=UpperCamelCase_ , **UpperCamelCase_ , )
__magic_name__ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , UpperCamelCase_ ) != do_lower_case
or normalizer_state.get('''strip_accents''' , UpperCamelCase_ ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , UpperCamelCase_ ) != tokenize_chinese_chars
):
__magic_name__ = getattr(UpperCamelCase_ , normalizer_state.pop('''type''' ) )
__magic_name__ = do_lower_case
__magic_name__ = strip_accents
__magic_name__ = tokenize_chinese_chars
__magic_name__ = normalizer_class(**UpperCamelCase_ )
__magic_name__ = do_lower_case
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_=None ):
__magic_name__ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
__magic_name__ = [self.sep_token_id]
__magic_name__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
__magic_name__ = self._tokenizer.model.save(UpperCamelCase_ , name=UpperCamelCase_ )
return tuple(UpperCamelCase_ )
| 190 | 0 |
from __future__ import annotations
import time
from math import sqrt
# 1 for manhattan, 0 for euclidean
lowerCAmelCase = 0
lowerCAmelCase = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
lowerCAmelCase = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
lowerCAmelCase = tuple[int, int]
class lowerCamelCase :
def __init__( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , ):
__UpperCAmelCase : Union[str, Any] = pos_x
__UpperCAmelCase : Any = pos_y
__UpperCAmelCase : Optional[Any] = (pos_y, pos_x)
__UpperCAmelCase : Any = goal_x
__UpperCAmelCase : Optional[Any] = goal_y
__UpperCAmelCase : Dict = g_cost
__UpperCAmelCase : int = parent
__UpperCAmelCase : int = self.calculate_heuristic()
__UpperCAmelCase : List[Any] = self.g_cost + self.h_cost
def A( self):
__UpperCAmelCase : Optional[int] = self.pos_x - self.goal_x
__UpperCAmelCase : Optional[int] = self.pos_y - self.goal_y
if HEURISTIC == 1:
return abs(UpperCAmelCase_) + abs(UpperCAmelCase_)
else:
return sqrt(dy**2 + dx**2)
def __lt__( self , lowercase__):
return self.f_cost < other.f_cost
class lowerCamelCase :
def __init__( self , lowercase__ , lowercase__):
__UpperCAmelCase : List[Any] = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , UpperCAmelCase_)
__UpperCAmelCase : Optional[int] = Node(goal[1] , goal[0] , goal[1] , goal[0] , 9_9_9_9_9 , UpperCAmelCase_)
__UpperCAmelCase : List[str] = [self.start]
__UpperCAmelCase : list[Node] = []
__UpperCAmelCase : Tuple = False
def A( self):
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
__UpperCAmelCase : List[Any] = self.open_nodes.pop(0)
if current_node.pos == self.target.pos:
return self.retrace_path(UpperCAmelCase_)
self.closed_nodes.append(UpperCAmelCase_)
__UpperCAmelCase : Tuple = self.get_successors(UpperCAmelCase_)
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(UpperCAmelCase_)
else:
# retrieve the best current path
__UpperCAmelCase : Union[str, Any] = self.open_nodes.pop(self.open_nodes.index(UpperCAmelCase_))
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(UpperCAmelCase_)
else:
self.open_nodes.append(UpperCAmelCase_)
return [self.start.pos]
def A( self , lowercase__):
__UpperCAmelCase : int = []
for action in delta:
__UpperCAmelCase : Optional[Any] = parent.pos_x + action[1]
__UpperCAmelCase : Any = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0]) - 1 and 0 <= pos_y <= len(UpperCAmelCase_) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
UpperCAmelCase_ , UpperCAmelCase_ , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , UpperCAmelCase_ , ))
return successors
def A( self , lowercase__):
__UpperCAmelCase : List[Any] = node
__UpperCAmelCase : Optional[Any] = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x))
__UpperCAmelCase : Optional[int] = current_node.parent
path.reverse()
return path
class lowerCamelCase :
def __init__( self , lowercase__ , lowercase__):
__UpperCAmelCase : List[str] = AStar(UpperCAmelCase_ , UpperCAmelCase_)
__UpperCAmelCase : int = AStar(UpperCAmelCase_ , UpperCAmelCase_)
__UpperCAmelCase : str = False
def A( self):
while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes:
self.fwd_astar.open_nodes.sort()
self.bwd_astar.open_nodes.sort()
__UpperCAmelCase : Optional[int] = self.fwd_astar.open_nodes.pop(0)
__UpperCAmelCase : Dict = self.bwd_astar.open_nodes.pop(0)
if current_bwd_node.pos == current_fwd_node.pos:
return self.retrace_bidirectional_path(
UpperCAmelCase_ , UpperCAmelCase_)
self.fwd_astar.closed_nodes.append(UpperCAmelCase_)
self.bwd_astar.closed_nodes.append(UpperCAmelCase_)
__UpperCAmelCase : Tuple = current_bwd_node
__UpperCAmelCase : Tuple = current_fwd_node
__UpperCAmelCase : Optional[int] = {
self.fwd_astar: self.fwd_astar.get_successors(UpperCAmelCase_),
self.bwd_astar: self.bwd_astar.get_successors(UpperCAmelCase_),
}
for astar in [self.fwd_astar, self.bwd_astar]:
for child_node in successors[astar]:
if child_node in astar.closed_nodes:
continue
if child_node not in astar.open_nodes:
astar.open_nodes.append(UpperCAmelCase_)
else:
# retrieve the best current path
__UpperCAmelCase : Optional[int] = astar.open_nodes.pop(
astar.open_nodes.index(UpperCAmelCase_))
if child_node.g_cost < better_node.g_cost:
astar.open_nodes.append(UpperCAmelCase_)
else:
astar.open_nodes.append(UpperCAmelCase_)
return [self.fwd_astar.start.pos]
def A( self , lowercase__ , lowercase__):
__UpperCAmelCase : str = self.fwd_astar.retrace_path(UpperCAmelCase_)
__UpperCAmelCase : List[str] = self.bwd_astar.retrace_path(UpperCAmelCase_)
bwd_path.pop()
bwd_path.reverse()
__UpperCAmelCase : List[Any] = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
lowerCAmelCase = (0, 0)
lowerCAmelCase = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
lowerCAmelCase = time.time()
lowerCAmelCase = AStar(init, goal)
lowerCAmelCase = a_star.search()
lowerCAmelCase = time.time() - start_time
print(F'AStar execution time = {end_time:f} seconds')
lowerCAmelCase = time.time()
lowerCAmelCase = BidirectionalAStar(init, goal)
lowerCAmelCase = time.time() - bd_start_time
print(F'BidirectionalAStar execution time = {bd_end_time:f} seconds')
| 462 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from tokenizers.pre_tokenizers import BertPreTokenizer, PreTokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roformer import RoFormerTokenizer
from .tokenization_utils import JiebaPreTokenizer
snake_case = logging.get_logger(__name__)
snake_case = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
snake_case = {
"""vocab_file""": {
"""junnyu/roformer_chinese_small""": """https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/vocab.txt""",
"""junnyu/roformer_chinese_base""": """https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/vocab.txt""",
"""junnyu/roformer_chinese_char_small""": (
"""https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/vocab.txt"""
),
"""junnyu/roformer_chinese_char_base""": (
"""https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/vocab.txt"""
),
"""junnyu/roformer_small_discriminator""": (
"""https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/vocab.txt"""
),
"""junnyu/roformer_small_generator""": (
"""https://huggingface.co/junnyu/roformer_small_generator/resolve/main/vocab.txt"""
),
}
}
snake_case = {
"""junnyu/roformer_chinese_small""": 1_536,
"""junnyu/roformer_chinese_base""": 1_536,
"""junnyu/roformer_chinese_char_small""": 512,
"""junnyu/roformer_chinese_char_base""": 512,
"""junnyu/roformer_small_discriminator""": 128,
"""junnyu/roformer_small_generator""": 128,
}
snake_case = {
"""junnyu/roformer_chinese_small""": {"""do_lower_case""": True},
"""junnyu/roformer_chinese_base""": {"""do_lower_case""": True},
"""junnyu/roformer_chinese_char_small""": {"""do_lower_case""": True},
"""junnyu/roformer_chinese_char_base""": {"""do_lower_case""": True},
"""junnyu/roformer_small_discriminator""": {"""do_lower_case""": True},
"""junnyu/roformer_small_generator""": {"""do_lower_case""": True},
}
class SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
'''simple docstring'''
UpperCamelCase_ : Optional[Any] = VOCAB_FILES_NAMES
UpperCamelCase_ : int = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ : List[Any] = PRETRAINED_INIT_CONFIGURATION
UpperCamelCase_ : Any = RoFormerTokenizer
def __init__( self : Tuple , UpperCAmelCase_ : List[Any]=None , UpperCAmelCase_ : List[Any]=None , UpperCAmelCase_ : Optional[Any]=True , UpperCAmelCase_ : List[str]="[UNK]" , UpperCAmelCase_ : Any="[SEP]" , UpperCAmelCase_ : Any="[PAD]" , UpperCAmelCase_ : List[str]="[CLS]" , UpperCAmelCase_ : str="[MASK]" , UpperCAmelCase_ : Any=True , UpperCAmelCase_ : Optional[Any]=None , **UpperCAmelCase_ : List[str] , ):
super().__init__(
UpperCAmelCase_ , tokenizer_file=UpperCAmelCase_ , do_lower_case=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , sep_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , cls_token=UpperCAmelCase_ , mask_token=UpperCAmelCase_ , tokenize_chinese_chars=UpperCAmelCase_ , strip_accents=UpperCAmelCase_ , **UpperCAmelCase_ , )
SCREAMING_SNAKE_CASE : int = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
pre_tok_state.get("lowercase" , UpperCAmelCase_ ) != do_lower_case
or pre_tok_state.get("strip_accents" , UpperCAmelCase_ ) != strip_accents
):
SCREAMING_SNAKE_CASE : Optional[Any] = getattr(UpperCAmelCase_ , pre_tok_state.pop("type" ) )
SCREAMING_SNAKE_CASE : Any = do_lower_case
SCREAMING_SNAKE_CASE : List[str] = strip_accents
SCREAMING_SNAKE_CASE : Tuple = pre_tok_class(**UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : int = do_lower_case
def __getstate__( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE : Any = self.__dict__.copy()
SCREAMING_SNAKE_CASE : Optional[Any] = BertPreTokenizer()
return state
def __setstate__( self : Tuple , UpperCAmelCase_ : Union[str, Any] ):
SCREAMING_SNAKE_CASE : Dict = d
SCREAMING_SNAKE_CASE : Dict = self.__dict__["_tokenizer"].get_vocab()
SCREAMING_SNAKE_CASE : Any = PreTokenizer.custom(JiebaPreTokenizer(UpperCAmelCase_ ) )
def _A ( self : str , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Union[str, Any]=None ):
SCREAMING_SNAKE_CASE : List[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _A ( self : Tuple , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None ):
SCREAMING_SNAKE_CASE : List[Any] = [self.sep_token_id]
SCREAMING_SNAKE_CASE : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _A ( self : Optional[int] , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[str] = None ):
SCREAMING_SNAKE_CASE : Optional[int] = self._tokenizer.model.save(UpperCAmelCase_ , name=UpperCAmelCase_ )
return tuple(UpperCAmelCase_ )
def _A ( self : Tuple , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Any=None , UpperCAmelCase_ : str=None , UpperCAmelCase_ : Any=False , **UpperCAmelCase_ : str , ):
SCREAMING_SNAKE_CASE : Union[str, Any] = BertPreTokenizer()
return super().save_pretrained(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , **UpperCAmelCase_ )
| 62 | 0 |
"""simple docstring"""
def a__ ( a : str = 100 ):
"""simple docstring"""
_snake_case : Tuple = n * (n + 1) * (2 * n + 1) / 6
_snake_case : Dict = (n * (n + 1) / 2) ** 2
return int(square_of_sum - sum_of_squares )
if __name__ == "__main__":
print(f'{solution() = }')
| 702 |
"""simple docstring"""
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DPMSolverMultistepScheduler,
TextToVideoSDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class _UpperCAmelCase ( _snake_case , unittest.TestCase):
__lowercase : Any = TextToVideoSDPipeline
__lowercase : str = TEXT_TO_IMAGE_PARAMS
__lowercase : int = TEXT_TO_IMAGE_BATCH_PARAMS
# No `output_type`.
__lowercase : Optional[int] = frozenset(
[
"""num_inference_steps""",
"""generator""",
"""latents""",
"""return_dict""",
"""callback""",
"""callback_steps""",
])
def lowerCamelCase__ ( self ):
torch.manual_seed(0 )
_snake_case : str = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("CrossAttnDownBlock3D", "CrossAttnDownBlock3D", "CrossAttnDownBlock3D", "DownBlock3D") , up_block_types=("UpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D") , cross_attention_dim=32 , attention_head_dim=4 , )
_snake_case : List[Any] = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=snake_case_ , set_alpha_to_one=snake_case_ , )
torch.manual_seed(0 )
_snake_case : Union[str, Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , sample_size=1_28 , )
torch.manual_seed(0 )
_snake_case : Optional[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act="gelu" , projection_dim=5_12 , )
_snake_case : Tuple = CLIPTextModel(snake_case_ )
_snake_case : Optional[int] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
_snake_case : Any = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
}
return components
def lowerCamelCase__ ( self , snake_case_ , snake_case_=0 ):
if str(snake_case_ ).startswith("mps" ):
_snake_case : str = torch.manual_seed(snake_case_ )
else:
_snake_case : Union[str, Any] = torch.Generator(device=snake_case_ ).manual_seed(snake_case_ )
_snake_case : str = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "pt",
}
return inputs
def lowerCamelCase__ ( self ):
_snake_case : int = "cpu" # ensure determinism for the device-dependent torch.Generator
_snake_case : Optional[Any] = self.get_dummy_components()
_snake_case : Tuple = TextToVideoSDPipeline(**snake_case_ )
_snake_case : List[str] = sd_pipe.to(snake_case_ )
sd_pipe.set_progress_bar_config(disable=snake_case_ )
_snake_case : int = self.get_dummy_inputs(snake_case_ )
_snake_case : Union[str, Any] = "np"
_snake_case : Dict = sd_pipe(**snake_case_ ).frames
_snake_case : Any = frames[0][-3:, -3:, -1]
assert frames[0].shape == (64, 64, 3)
_snake_case : Dict = np.array([158.0, 160.0, 153.0, 125.0, 100.0, 121.0, 111.0, 93.0, 113.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCamelCase__ ( self ):
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=snake_case_ , expected_max_diff=3E-3 )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def lowerCamelCase__ ( self ):
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=snake_case_ , expected_max_diff=1E-2 )
@unittest.skip(reason="Batching needs to be properly figured out first for this pipeline." )
def lowerCamelCase__ ( self ):
pass
@unittest.skip(reason="Batching needs to be properly figured out first for this pipeline." )
def lowerCamelCase__ ( self ):
pass
@unittest.skip(reason="`num_images_per_prompt` argument is not supported for this pipeline." )
def lowerCamelCase__ ( self ):
pass
def lowerCamelCase__ ( self ):
return super().test_progress_bar()
@slow
@skip_mps
class _UpperCAmelCase ( unittest.TestCase):
def lowerCamelCase__ ( self ):
_snake_case : List[Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy" )
_snake_case : int = TextToVideoSDPipeline.from_pretrained("damo-vilab/text-to-video-ms-1.7b" )
_snake_case : str = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
_snake_case : Tuple = pipe.to("cuda" )
_snake_case : List[Any] = "Spiderman is surfing"
_snake_case : Optional[int] = torch.Generator(device="cpu" ).manual_seed(0 )
_snake_case : int = pipe(snake_case_ , generator=snake_case_ , num_inference_steps=25 , output_type="pt" ).frames
_snake_case : int = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5E-2
def lowerCamelCase__ ( self ):
_snake_case : Any = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy" )
_snake_case : str = TextToVideoSDPipeline.from_pretrained("damo-vilab/text-to-video-ms-1.7b" )
_snake_case : int = pipe.to("cuda" )
_snake_case : Any = "Spiderman is surfing"
_snake_case : str = torch.Generator(device="cpu" ).manual_seed(0 )
_snake_case : Any = pipe(snake_case_ , generator=snake_case_ , num_inference_steps=2 , output_type="pt" ).frames
_snake_case : Optional[int] = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5E-2
| 87 | 0 |
'''simple docstring'''
import os
from itertools import chain
from random import randrange, shuffle
import pytest
from .sola import PokerHand
_a : str = (
"4S 3H 2C 7S 5H",
"9D 8H 2C 6S 7H",
"2D 6D 9D TH 7D",
"TC 8C 2S JH 6C",
"JH 8S TH AH QH",
"TS KS 5S 9S AC",
"KD 6S 9D TH AD",
"KS 8D 4D 9S 4S", # pair
"8C 4S KH JS 4D", # pair
"QH 8H KD JH 8S", # pair
"KC 4H KS 2H 8D", # pair
"KD 4S KC 3H 8S", # pair
"AH 8S AS KC JH", # pair
"3H 4C 4H 3S 2H", # 2 pairs
"5S 5D 2C KH KH", # 2 pairs
"3C KH 5D 5S KH", # 2 pairs
"AS 3C KH AD KH", # 2 pairs
"7C 7S 3S 7H 5S", # 3 of a kind
"7C 7S KH 2H 7H", # 3 of a kind
"AC KH QH AH AS", # 3 of a kind
"2H 4D 3C AS 5S", # straight (low ace)
"3C 5C 4C 2C 6H", # straight
"6S 8S 7S 5H 9H", # straight
"JS QS 9H TS KH", # straight
"QC KH TS JS AH", # straight (high ace)
"8C 9C 5C 3C TC", # flush
"3S 8S 9S 5S KS", # flush
"4C 5C 9C 8C KC", # flush
"JH 8H AH KH QH", # flush
"3D 2H 3H 2C 2D", # full house
"2H 2C 3S 3H 3D", # full house
"KH KC 3S 3H 3D", # full house
"JC 6H JS JD JH", # 4 of a kind
"JC 7H JS JD JH", # 4 of a kind
"JC KH JS JD JH", # 4 of a kind
"2S AS 4S 5S 3S", # straight flush (low ace)
"2D 6D 3D 4D 5D", # straight flush
"5C 6C 3C 7C 4C", # straight flush
"JH 9H TH KH QH", # straight flush
"JH AH TH KH QH", # royal flush (high ace straight flush)
)
_a : str = (
("2H 3H 4H 5H 6H", "KS AS TS QS JS", "Loss"),
("2H 3H 4H 5H 6H", "AS AD AC AH JD", "Win"),
("AS AH 2H AD AC", "JS JD JC JH 3D", "Win"),
("2S AH 2H AS AC", "JS JD JC JH AD", "Loss"),
("2S AH 2H AS AC", "2H 3H 5H 6H 7H", "Win"),
("AS 3S 4S 8S 2S", "2H 3H 5H 6H 7H", "Win"),
("2H 3H 5H 6H 7H", "2S 3H 4H 5S 6C", "Win"),
("2S 3H 4H 5S 6C", "3D 4C 5H 6H 2S", "Tie"),
("2S 3H 4H 5S 6C", "AH AC 5H 6H AS", "Win"),
("2S 2H 4H 5S 4C", "AH AC 5H 6H AS", "Loss"),
("2S 2H 4H 5S 4C", "AH AC 5H 6H 7S", "Win"),
("6S AD 7H 4S AS", "AH AC 5H 6H 7S", "Loss"),
("2S AH 4H 5S KC", "AH AC 5H 6H 7S", "Loss"),
("2S 3H 6H 7S 9C", "7H 3C TH 6H 9S", "Loss"),
("4S 5H 6H TS AC", "3S 5H 6H TS AC", "Win"),
("2S AH 4H 5S 6C", "AD 4C 5H 6H 2C", "Tie"),
("AS AH 3H AD AC", "AS AH 2H AD AC", "Win"),
("AH AC 5H 5C QS", "AH AC 5H 5C KS", "Loss"),
("AH AC 5H 5C QS", "KH KC 5H 5C QS", "Win"),
("7C 7S KH 2H 7H", "3C 3S AH 2H 3H", "Win"),
("3C 3S AH 2H 3H", "7C 7S KH 2H 7H", "Loss"),
("6H 5H 4H 3H 2H", "5H 4H 3H 2H AH", "Win"),
("5H 4H 3H 2H AH", "5H 4H 3H 2H AH", "Tie"),
("5H 4H 3H 2H AH", "6H 5H 4H 3H 2H", "Loss"),
("AH AD KS KC AC", "AH KD KH AC KC", "Win"),
("2H 4D 3C AS 5S", "2H 4D 3C 6S 5S", "Loss"),
("2H 3S 3C 3H 2S", "3S 3C 2S 2H 2D", "Win"),
("4D 6D 5D 2D JH", "3S 8S 3H TC KH", "Loss"),
("4S 6C 8S 3S 7S", "AD KS 2D 7D 7C", "Loss"),
("6S 4C 7H 8C 3H", "5H JC AH 9D 9C", "Loss"),
("9D 9H JH TC QH", "3C 2S JS 5C 7H", "Win"),
("2H TC 8S AD 9S", "4H TS 7H 2C 5C", "Win"),
("9D 3S 2C 7S 7C", "JC TD 3C TC 9H", "Loss"),
)
_a : int = (
("2H 3H 4H 5H 6H", True),
("AS AH 2H AD AC", False),
("2H 3H 5H 6H 7H", True),
("KS AS TS QS JS", True),
("8H 9H QS JS TH", False),
("AS 3S 4S 8S 2S", True),
)
_a : int = (
("2H 3H 4H 5H 6H", True),
("AS AH 2H AD AC", False),
("2H 3H 5H 6H 7H", False),
("KS AS TS QS JS", True),
("8H 9H QS JS TH", True),
)
_a : Optional[int] = (
("2H 4D 3C AS 5S", True, [5, 4, 3, 2, 14]),
("2H 5D 3C AS 5S", False, [14, 5, 5, 3, 2]),
("JH QD KC AS TS", False, [14, 13, 12, 11, 10]),
("9D 3S 2C 7S 7C", False, [9, 7, 7, 3, 2]),
)
_a : Union[str, Any] = (
("JH AH TH KH QH", 0),
("JH 9H TH KH QH", 0),
("JC KH JS JD JH", 7),
("KH KC 3S 3H 3D", 6),
("8C 9C 5C 3C TC", 0),
("JS QS 9H TS KH", 0),
("7C 7S KH 2H 7H", 3),
("3C KH 5D 5S KH", 2),
("QH 8H KD JH 8S", 1),
("2D 6D 9D TH 7D", 0),
)
_a : Any = (
("JH AH TH KH QH", 23),
("JH 9H TH KH QH", 22),
("JC KH JS JD JH", 21),
("KH KC 3S 3H 3D", 20),
("8C 9C 5C 3C TC", 19),
("JS QS 9H TS KH", 18),
("7C 7S KH 2H 7H", 17),
("3C KH 5D 5S KH", 16),
("QH 8H KD JH 8S", 15),
("2D 6D 9D TH 7D", 14),
)
def _a () -> str:
"""simple docstring"""
__snake_case , __snake_case = randrange(len(lowercase__ ) ), randrange(len(lowercase__ ) )
__snake_case = ['Loss', 'Tie', 'Win'][(play >= oppo) + (play > oppo)]
__snake_case , __snake_case = SORTED_HANDS[play], SORTED_HANDS[oppo]
return hand, other, expected
def _a (lowercase__ : int = 1_0_0 ) -> Optional[int]:
"""simple docstring"""
return (generate_random_hand() for _ in range(lowercase__ ))
@pytest.mark.parametrize('hand, expected' , lowercase__ )
def _a (lowercase__ : Optional[int] , lowercase__ : Any ) -> Dict:
"""simple docstring"""
assert PokerHand(lowercase__ )._is_flush() == expected
@pytest.mark.parametrize('hand, expected' , lowercase__ )
def _a (lowercase__ : int , lowercase__ : List[Any] ) -> List[Any]:
"""simple docstring"""
assert PokerHand(lowercase__ )._is_straight() == expected
@pytest.mark.parametrize('hand, expected, card_values' , lowercase__ )
def _a (lowercase__ : int , lowercase__ : Tuple , lowercase__ : List[str] ) -> str:
"""simple docstring"""
__snake_case = PokerHand(lowercase__ )
assert player._is_five_high_straight() == expected
assert player._card_values == card_values
@pytest.mark.parametrize('hand, expected' , lowercase__ )
def _a (lowercase__ : str , lowercase__ : int ) -> Any:
"""simple docstring"""
assert PokerHand(lowercase__ )._is_same_kind() == expected
@pytest.mark.parametrize('hand, expected' , lowercase__ )
def _a (lowercase__ : List[str] , lowercase__ : str ) -> Union[str, Any]:
"""simple docstring"""
assert PokerHand(lowercase__ )._hand_type == expected
@pytest.mark.parametrize('hand, other, expected' , lowercase__ )
def _a (lowercase__ : Union[str, Any] , lowercase__ : Tuple , lowercase__ : Union[str, Any] ) -> str:
"""simple docstring"""
assert PokerHand(lowercase__ ).compare_with(PokerHand(lowercase__ ) ) == expected
@pytest.mark.parametrize('hand, other, expected' , generate_random_hands() )
def _a (lowercase__ : int , lowercase__ : int , lowercase__ : List[str] ) -> str:
"""simple docstring"""
assert PokerHand(lowercase__ ).compare_with(PokerHand(lowercase__ ) ) == expected
def _a () -> Any:
"""simple docstring"""
__snake_case = [PokerHand(lowercase__ ) for hand in SORTED_HANDS]
__snake_case = poker_hands.copy()
shuffle(lowercase__ )
__snake_case = chain(sorted(lowercase__ ) )
for index, hand in enumerate(lowercase__ ):
assert hand == poker_hands[index]
def _a () -> str:
"""simple docstring"""
# Test that five high straights are compared correctly.
__snake_case = [PokerHand('2D AC 3H 4H 5S' ), PokerHand('2S 3H 4H 5S 6C' )]
pokerhands.sort(reverse=lowercase__ )
assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C"
def _a () -> Dict:
"""simple docstring"""
# Multiple calls to five_high_straight function should still return True
# and shouldn't mutate the list in every call other than the first.
__snake_case = PokerHand('2C 4S AS 3D 5C' )
__snake_case = True
__snake_case = [5, 4, 3, 2, 1_4]
for _ in range(1_0 ):
assert pokerhand._is_five_high_straight() == expected
assert pokerhand._card_values == expected_card_values
def _a () -> Any:
"""simple docstring"""
# Problem number 54 from Project Euler
# Testing from poker_hands.txt file
__snake_case = 0
__snake_case = os.path.abspath(os.path.dirname(lowercase__ ) )
__snake_case = os.path.join(lowercase__ , 'poker_hands.txt' )
with open(lowercase__ ) as file_hand:
for line in file_hand:
__snake_case = line[:1_4].strip()
__snake_case = line[1_5:].strip()
__snake_case , __snake_case = PokerHand(lowercase__ ), PokerHand(lowercase__ )
__snake_case = player.compare_with(lowercase__ )
if output == "Win":
answer += 1
assert answer == 3_7_6
| 56 |
import argparse
import requests
import torch
from PIL import Image
from transformers import SwinConfig, SwinForMaskedImageModeling, ViTImageProcessor
def _a ( UpperCAmelCase ) -> Tuple:
"""simple docstring"""
lowerCamelCase__ : Union[str, Any] = SwinConfig(image_size=192 )
if "base" in model_name:
lowerCamelCase__ : List[str] = 6
lowerCamelCase__ : Any = 128
lowerCamelCase__ : Tuple = (2, 2, 18, 2)
lowerCamelCase__ : int = (4, 8, 16, 32)
elif "large" in model_name:
lowerCamelCase__ : Any = 12
lowerCamelCase__ : List[Any] = 192
lowerCamelCase__ : Any = (2, 2, 18, 2)
lowerCamelCase__ : Optional[int] = (6, 12, 24, 48)
else:
raise ValueError('''Model not supported, only supports base and large variants''' )
lowerCamelCase__ : List[str] = window_size
lowerCamelCase__ : Optional[int] = embed_dim
lowerCamelCase__ : Optional[int] = depths
lowerCamelCase__ : int = num_heads
return config
def _a ( UpperCAmelCase ) -> Any:
"""simple docstring"""
if "encoder.mask_token" in name:
lowerCamelCase__ : str = name.replace('''encoder.mask_token''' , '''embeddings.mask_token''' )
if "encoder.patch_embed.proj" in name:
lowerCamelCase__ : int = name.replace('''encoder.patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "encoder.patch_embed.norm" in name:
lowerCamelCase__ : Optional[int] = name.replace('''encoder.patch_embed.norm''' , '''embeddings.norm''' )
if "attn.proj" in name:
lowerCamelCase__ : int = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name:
lowerCamelCase__ : Optional[Any] = name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
lowerCamelCase__ : int = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
lowerCamelCase__ : Optional[Any] = name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
lowerCamelCase__ : Tuple = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
lowerCamelCase__ : Union[str, Any] = name.replace('''mlp.fc2''' , '''output.dense''' )
if name == "encoder.norm.weight":
lowerCamelCase__ : Union[str, Any] = '''layernorm.weight'''
if name == "encoder.norm.bias":
lowerCamelCase__ : List[Any] = '''layernorm.bias'''
if "decoder" in name:
pass
else:
lowerCamelCase__ : List[str] = '''swin.''' + name
return name
def _a ( UpperCAmelCase , UpperCAmelCase ) -> Union[str, Any]:
"""simple docstring"""
for key in orig_state_dict.copy().keys():
lowerCamelCase__ : Optional[Any] = orig_state_dict.pop(UpperCAmelCase )
if "attn_mask" in key:
pass
elif "qkv" in key:
lowerCamelCase__ : str = key.split('''.''' )
lowerCamelCase__ : Tuple = int(key_split[2] )
lowerCamelCase__ : Any = int(key_split[4] )
lowerCamelCase__ : int = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
lowerCamelCase__ : Optional[Any] = val[:dim, :]
lowerCamelCase__ : Union[str, Any] = val[
dim : dim * 2, :
]
lowerCamelCase__ : List[str] = val[-dim:, :]
else:
lowerCamelCase__ : Tuple = val[
:dim
]
lowerCamelCase__ : List[Any] = val[
dim : dim * 2
]
lowerCamelCase__ : Tuple = val[
-dim:
]
else:
lowerCamelCase__ : Any = val
return orig_state_dict
def _a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Dict:
"""simple docstring"""
lowerCamelCase__ : int = torch.load(UpperCAmelCase , map_location='''cpu''' )['''model''']
lowerCamelCase__ : Dict = get_swin_config(UpperCAmelCase )
lowerCamelCase__ : Optional[Any] = SwinForMaskedImageModeling(UpperCAmelCase )
model.eval()
lowerCamelCase__ : Optional[int] = convert_state_dict(UpperCAmelCase , UpperCAmelCase )
model.load_state_dict(UpperCAmelCase )
lowerCamelCase__ : str = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowerCamelCase__ : Any = ViTImageProcessor(size={'''height''': 192, '''width''': 192} )
lowerCamelCase__ : str = Image.open(requests.get(UpperCAmelCase , stream=UpperCAmelCase ).raw )
lowerCamelCase__ : Any = image_processor(images=UpperCAmelCase , return_tensors='''pt''' )
with torch.no_grad():
lowerCamelCase__ : List[Any] = model(**UpperCAmelCase ).logits
print(outputs.keys() )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(f"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(UpperCAmelCase )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(UpperCAmelCase )
if push_to_hub:
print(f"Pushing model and image processor for {model_name} to hub" )
model.push_to_hub(f"microsoft/{model_name}" )
image_processor.push_to_hub(f"microsoft/{model_name}" )
if __name__ == "__main__":
_A : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='swin-base-simmim-window6-192',
type=str,
choices=['swin-base-simmim-window6-192', 'swin-large-simmim-window12-192'],
help='Name of the Swin SimMIM model you\'d like to convert.',
)
parser.add_argument(
'--checkpoint_path',
default='/Users/nielsrogge/Documents/SwinSimMIM/simmim_pretrain__swin_base__img192_window6__100ep.pth',
type=str,
help='Path to the original PyTorch checkpoint (.pth file).',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
_A : List[str] = parser.parse_args()
convert_swin_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 315 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a= {
'''configuration_convbert''': ['''CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ConvBertConfig''', '''ConvBertOnnxConfig'''],
'''tokenization_convbert''': ['''ConvBertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a= ['''ConvBertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a= [
'''CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ConvBertForMaskedLM''',
'''ConvBertForMultipleChoice''',
'''ConvBertForQuestionAnswering''',
'''ConvBertForSequenceClassification''',
'''ConvBertForTokenClassification''',
'''ConvBertLayer''',
'''ConvBertModel''',
'''ConvBertPreTrainedModel''',
'''load_tf_weights_in_convbert''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a= [
'''TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFConvBertForMaskedLM''',
'''TFConvBertForMultipleChoice''',
'''TFConvBertForQuestionAnswering''',
'''TFConvBertForSequenceClassification''',
'''TFConvBertForTokenClassification''',
'''TFConvBertLayer''',
'''TFConvBertModel''',
'''TFConvBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig
from .tokenization_convbert import ConvBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_convbert_fast import ConvBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convbert import (
CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvBertForMaskedLM,
ConvBertForMultipleChoice,
ConvBertForQuestionAnswering,
ConvBertForSequenceClassification,
ConvBertForTokenClassification,
ConvBertLayer,
ConvBertModel,
ConvBertPreTrainedModel,
load_tf_weights_in_convbert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convbert import (
TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertLayer,
TFConvBertModel,
TFConvBertPreTrainedModel,
)
else:
import sys
a= _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 701 | '''simple docstring'''
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
a= logging.get_logger(__name__)
a= '''▁'''
a= {
'''vocab_file''': '''vocab.json''',
'''spm_file''': '''sentencepiece.bpe.model''',
}
a= {
'''vocab_file''': {
'''facebook/s2t-small-librispeech-asr''': (
'''https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/vocab.json'''
),
},
'''spm_file''': {
'''facebook/s2t-small-librispeech-asr''': (
'''https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/sentencepiece.bpe.model'''
)
},
}
a= {
'''facebook/s2t-small-librispeech-asr''': 1_0_2_4,
}
a= ['''pt''', '''fr''', '''ru''', '''nl''', '''ro''', '''it''', '''es''', '''de''']
a= {'''mustc''': MUSTC_LANGS}
class __lowercase ( _lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ = MAX_MODEL_INPUT_SIZES
SCREAMING_SNAKE_CASE__ = ['''input_ids''', '''attention_mask''']
SCREAMING_SNAKE_CASE__ = []
def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase="<s>" , _lowerCamelCase="</s>" , _lowerCamelCase="<pad>" , _lowerCamelCase="<unk>" , _lowerCamelCase=False , _lowerCamelCase=False , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase = None , **_lowerCamelCase , ):
__UpperCamelCase : List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_lowerCamelCase , eos_token=_lowerCamelCase , unk_token=_lowerCamelCase , pad_token=_lowerCamelCase , do_upper_case=_lowerCamelCase , do_lower_case=_lowerCamelCase , tgt_lang=_lowerCamelCase , lang_codes=_lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , **_lowerCamelCase , )
__UpperCamelCase : Union[str, Any] = do_upper_case
__UpperCamelCase : Dict = do_lower_case
__UpperCamelCase : List[str] = load_json(_lowerCamelCase )
__UpperCamelCase : List[Any] = {v: k for k, v in self.encoder.items()}
__UpperCamelCase : int = spm_file
__UpperCamelCase : List[Any] = load_spm(_lowerCamelCase , self.sp_model_kwargs )
if lang_codes is not None:
__UpperCamelCase : Any = lang_codes
__UpperCamelCase : Any = LANGUAGES[lang_codes]
__UpperCamelCase : str = [f"""<lang:{lang}>""" for lang in self.langs]
__UpperCamelCase : List[str] = {lang: self.sp_model.PieceToId(f"""<lang:{lang}>""" ) for lang in self.langs}
__UpperCamelCase : str = self.lang_tokens
__UpperCamelCase : str = tgt_lang if tgt_lang is not None else self.langs[0]
self.set_tgt_lang_special_tokens(self._tgt_lang )
else:
__UpperCamelCase : Dict = {}
@property
def lowerCAmelCase ( self ):
return len(self.encoder )
@property
def lowerCAmelCase ( self ):
return self._tgt_lang
@tgt_lang.setter
def lowerCAmelCase ( self , _lowerCamelCase ):
__UpperCamelCase : Optional[int] = new_tgt_lang
self.set_tgt_lang_special_tokens(_lowerCamelCase )
def lowerCAmelCase ( self , _lowerCamelCase ):
__UpperCamelCase : int = self.lang_code_to_id[tgt_lang]
__UpperCamelCase : List[str] = [lang_code_id]
def lowerCAmelCase ( self , _lowerCamelCase ):
return self.sp_model.encode(_lowerCamelCase , out_type=_lowerCamelCase )
def lowerCAmelCase ( self , _lowerCamelCase ):
return self.encoder.get(_lowerCamelCase , self.encoder[self.unk_token] )
def lowerCAmelCase ( self , _lowerCamelCase ):
return self.decoder.get(_lowerCamelCase , self.unk_token )
def lowerCAmelCase ( self , _lowerCamelCase ):
__UpperCamelCase : Optional[Any] = []
__UpperCamelCase : List[str] = ''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
__UpperCamelCase : int = self.sp_model.decode(_lowerCamelCase )
out_string += (decoded.upper() if self.do_upper_case else decoded) + token + " "
__UpperCamelCase : int = []
else:
current_sub_tokens.append(_lowerCamelCase )
__UpperCamelCase : str = self.sp_model.decode(_lowerCamelCase )
out_string += decoded.upper() if self.do_upper_case else decoded
return out_string.strip()
def lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase=None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + [self.eos_token_id]
def lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowerCamelCase , token_ids_a=_lowerCamelCase , already_has_special_tokens=_lowerCamelCase )
__UpperCamelCase : Union[str, Any] = [1] * len(self.prefix_tokens )
__UpperCamelCase : Any = [1]
if token_ids_a is None:
return prefix_ones + ([0] * len(_lowerCamelCase )) + suffix_ones
return prefix_ones + ([0] * len(_lowerCamelCase )) + ([0] * len(_lowerCamelCase )) + suffix_ones
def lowerCAmelCase ( self ):
__UpperCamelCase : Union[str, Any] = self.encoder.copy()
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
__UpperCamelCase : int = self.__dict__.copy()
__UpperCamelCase : Dict = None
return state
def __setstate__( self , _lowerCamelCase ):
__UpperCamelCase : List[str] = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
__UpperCamelCase : Optional[int] = {}
__UpperCamelCase : Union[str, Any] = load_spm(self.spm_file , self.sp_model_kwargs )
def lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase = None ):
__UpperCamelCase : List[str] = Path(_lowerCamelCase )
assert save_dir.is_dir(), f"""{save_directory} should be a directory"""
__UpperCamelCase : Optional[int] = save_dir / (
(filename_prefix + '-' if filename_prefix else '') + self.vocab_files_names['vocab_file']
)
__UpperCamelCase : Union[str, Any] = save_dir / (
(filename_prefix + '-' if filename_prefix else '') + self.vocab_files_names['spm_file']
)
save_json(self.encoder , _lowerCamelCase )
if os.path.abspath(self.spm_file ) != os.path.abspath(_lowerCamelCase ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , _lowerCamelCase )
elif not os.path.isfile(self.spm_file ):
with open(_lowerCamelCase , 'wb' ) as fi:
__UpperCamelCase : Dict = self.sp_model.serialized_model_proto()
fi.write(_lowerCamelCase )
return (str(_lowerCamelCase ), str(_lowerCamelCase ))
def _UpperCamelCase ( _a : str , _a : Dict[str, Any] ):
"""simple docstring"""
__UpperCamelCase : List[Any] = sentencepiece.SentencePieceProcessor(**_a )
spm.Load(str(_a ) )
return spm
def _UpperCamelCase ( _a : str ):
"""simple docstring"""
with open(_a , 'r' ) as f:
return json.load(_a )
def _UpperCamelCase ( _a : Any , _a : str ):
"""simple docstring"""
with open(_a , 'w' ) as f:
json.dump(_a , _a , indent=2 )
| 287 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a__ : Union[str, Any] = logging.get_logger(__name__)
a__ : Any = {
"""google/realm-cc-news-pretrained-embedder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/config.json"""
),
"""google/realm-cc-news-pretrained-encoder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/config.json"""
),
"""google/realm-cc-news-pretrained-scorer""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/config.json"""
),
"""google/realm-cc-news-pretrained-openqa""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/config.json"""
),
"""google/realm-orqa-nq-openqa""": """https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/config.json""",
"""google/realm-orqa-nq-reader""": """https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/config.json""",
"""google/realm-orqa-wq-openqa""": """https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/config.json""",
"""google/realm-orqa-wq-reader""": """https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/config.json""",
# See all REALM models at https://huggingface.co/models?filter=realm
}
class UpperCAmelCase__( lowerCamelCase ):
'''simple docstring'''
A : Any = "realm"
def __init__( self : Optional[int] , lowerCAmelCase : str=3_05_22 , lowerCAmelCase : str=7_68 , lowerCAmelCase : Optional[int]=1_28 , lowerCAmelCase : List[Any]=12 , lowerCAmelCase : str=12 , lowerCAmelCase : List[str]=8 , lowerCAmelCase : Tuple=30_72 , lowerCAmelCase : str="gelu_new" , lowerCAmelCase : int=0.1 , lowerCAmelCase : Optional[int]=0.1 , lowerCAmelCase : str=5_12 , lowerCAmelCase : Union[str, Any]=2 , lowerCAmelCase : str=0.02 , lowerCAmelCase : Optional[Any]=1E-1_2 , lowerCAmelCase : Union[str, Any]=2_56 , lowerCAmelCase : List[Any]=10 , lowerCAmelCase : Union[str, Any]=1E-3 , lowerCAmelCase : List[Any]=5 , lowerCAmelCase : Tuple=3_20 , lowerCAmelCase : List[str]=13_35_37_18 , lowerCAmelCase : List[Any]=50_00 , lowerCAmelCase : Dict=1 , lowerCAmelCase : List[str]=0 , lowerCAmelCase : List[str]=2 , **lowerCAmelCase : Optional[int] , ) -> str:
"""simple docstring"""
super().__init__(pad_token_id=lowerCAmelCase , bos_token_id=lowerCAmelCase , eos_token_id=lowerCAmelCase , **lowerCAmelCase)
# Common config
lowercase__ = vocab_size
lowercase__ = max_position_embeddings
lowercase__ = hidden_size
lowercase__ = retriever_proj_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = num_candidates
lowercase__ = intermediate_size
lowercase__ = hidden_act
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = initializer_range
lowercase__ = type_vocab_size
lowercase__ = layer_norm_eps
# Reader config
lowercase__ = span_hidden_size
lowercase__ = max_span_width
lowercase__ = reader_layer_norm_eps
lowercase__ = reader_beam_size
lowercase__ = reader_seq_len
# Retrieval config
lowercase__ = num_block_records
lowercase__ = searcher_beam_size
| 622 |
'''simple docstring'''
def lowerCamelCase__ ( A : int = 50 ):
'''simple docstring'''
UpperCAmelCase = [1] * (length + 1)
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
ways_number[row_length] += ways_number[
row_length - tile_start - tile_length
]
return ways_number[length]
if __name__ == "__main__":
print(F"""{solution() = }""")
| 210 | 0 |
import argparse
import fairseq
import torch
from torch import nn
from transformers import (
MBartaaTokenizer,
MBartConfig,
MBartForCausalLM,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
_lowerCamelCase : List[Any] = logging.get_logger(__name__)
_lowerCamelCase : int = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''lm_head''',
'''mask_emb''': '''masked_spec_embed''',
}
_lowerCamelCase : Optional[Any] = [
'''lm_head''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
]
def _a ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
for attribute in key.split("." ):
SCREAMING_SNAKE_CASE__ : int = getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if weight_type is not None:
SCREAMING_SNAKE_CASE__ : Any = getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).shape
else:
SCREAMING_SNAKE_CASE__ : Optional[int] = hf_pointer.shape
assert hf_shape == value.shape, (
f'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
SCREAMING_SNAKE_CASE__ : str = value
elif weight_type == "weight_g":
SCREAMING_SNAKE_CASE__ : List[str] = value
elif weight_type == "weight_v":
SCREAMING_SNAKE_CASE__ : Dict = value
elif weight_type == "bias":
SCREAMING_SNAKE_CASE__ : Any = value
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] = value
logger.info(f'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' )
def _a ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[str] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] = []
SCREAMING_SNAKE_CASE__ : Dict = fairseq_model.state_dict()
SCREAMING_SNAKE_CASE__ : Optional[int] = hf_model.feature_extractor
SCREAMING_SNAKE_CASE__ : Optional[int] = hf_model.adapter
for name, value in fairseq_dict.items():
SCREAMING_SNAKE_CASE__ : Tuple = False
if "conv_layers" in name:
load_conv_layer(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , hf_model.config.feat_extract_norm == "group" , )
SCREAMING_SNAKE_CASE__ : Dict = True
elif any(x in name for x in ["adaptor", "w2v_encoder.proj.", "w2v_proj_ln."] ):
load_adapter(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : str = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
SCREAMING_SNAKE_CASE__ : Tuple = True
if "*" in mapped_key:
SCREAMING_SNAKE_CASE__ : List[str] = name.split(SCREAMING_SNAKE_CASE__ )[0].split("." )[-2]
SCREAMING_SNAKE_CASE__ : List[Any] = mapped_key.replace("*" , SCREAMING_SNAKE_CASE__ )
if "weight_g" in name:
SCREAMING_SNAKE_CASE__ : str = "weight_g"
elif "weight_v" in name:
SCREAMING_SNAKE_CASE__ : Dict = "weight_v"
elif "bias" in name:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = "bias"
elif "weight" in name:
SCREAMING_SNAKE_CASE__ : int = "weight"
else:
SCREAMING_SNAKE_CASE__ : int = None
set_recursively(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
continue
if not is_used:
unused_weights.append(SCREAMING_SNAKE_CASE__ )
logger.warning(f'''Unused weights: {unused_weights}''' )
def _a ( SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = full_name.split("conv_layers." )[-1]
SCREAMING_SNAKE_CASE__ : Optional[Any] = name.split("." )
SCREAMING_SNAKE_CASE__ : List[Any] = int(items[0] )
SCREAMING_SNAKE_CASE__ : Any = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
SCREAMING_SNAKE_CASE__ : Tuple = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
SCREAMING_SNAKE_CASE__ : Dict = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
SCREAMING_SNAKE_CASE__ : Any = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
SCREAMING_SNAKE_CASE__ : str = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(SCREAMING_SNAKE_CASE__ )
def _a ( SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] = full_name.split("adaptor." )[-1]
SCREAMING_SNAKE_CASE__ : Optional[int] = name.split("." )
if items[1].isdigit():
SCREAMING_SNAKE_CASE__ : int = int(items[1] )
else:
SCREAMING_SNAKE_CASE__ : Any = None
if "adaptor" not in full_name:
if "proj_ln" in full_name:
# has to be layer norm
if "bias" in name:
assert (
value.shape == adapter.proj_layer_norm.bias.data.shape
), f'''{full_name} has size {value.shape}, but {adapter.proj_layer_norm.bias.data.shape} was found.'''
SCREAMING_SNAKE_CASE__ : Optional[int] = value
logger.info(f'''Adapter proj layer norm bias was initialized from {full_name}.''' )
if "weight" in name:
assert (
value.shape == adapter.proj_layer_norm.weight.data.shape
), f'''{full_name} has size {value.shape}, but {adapter.proj_layer_norm.weight.data.shape} was found.'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = value
else:
# has to be projection layer
if "bias" in name:
assert (
value.shape == adapter.proj.bias.data.shape
), f'''{full_name} has size {value.shape}, but {adapter.proj.bias.data.shape} was found.'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = value
logger.info(f'''Adapter proj layer bias was initialized from {full_name}.''' )
if "weight" in name:
assert (
value.shape == adapter.proj.weight.data.shape
), f'''{full_name} has size {value.shape}, but {adapter.proj.weight.data.shape} was found.'''
SCREAMING_SNAKE_CASE__ : List[Any] = value
logger.info(f'''Adapter proj layer weight was initialized from {full_name}.''' )
elif isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
if "bias" in name:
assert (
value.shape == adapter.layers[layer_id].conv.bias.data.shape
), f'''{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.bias.data.shape} was found.'''
SCREAMING_SNAKE_CASE__ : List[str] = value
logger.info(f'''Adapter layer {layer_id} bias was initialized from {full_name}.''' )
elif "weight" in name:
assert (
value.shape == adapter.layers[layer_id].conv.weight.data.shape
), f'''{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.weight.data.shape} was found.'''
SCREAMING_SNAKE_CASE__ : List[str] = value
logger.info(f'''Adapter layer {layer_id} bias was initialized from {full_name}.''' )
else:
unused_weights.append(SCREAMING_SNAKE_CASE__ )
def _a ( SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : Optional[int] = emb.weight.shape
SCREAMING_SNAKE_CASE__ : List[str] = nn.Linear(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , bias=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Dict = emb.weight.data
return lin_layer
@torch.no_grad()
def _a ( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = WavaVecaConfig.from_pretrained(
SCREAMING_SNAKE_CASE__ , add_adapter=SCREAMING_SNAKE_CASE__ , adapter_stride=SCREAMING_SNAKE_CASE__ , adapter_kernel_size=SCREAMING_SNAKE_CASE__ , use_auth_token=SCREAMING_SNAKE_CASE__ , output_hidden_size=SCREAMING_SNAKE_CASE__ , )
SCREAMING_SNAKE_CASE__ : Optional[int] = MBartConfig.from_pretrained(SCREAMING_SNAKE_CASE__ )
# load model
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : str = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={
"config_yaml": config_yaml_path,
"data": "/".join(dict_path.split("/" )[:-1] ),
"w2v_path": checkpoint_path,
"load_pretrained_decoder_from": None,
} , )
SCREAMING_SNAKE_CASE__ : Any = model[0].eval()
# load feature extractor
SCREAMING_SNAKE_CASE__ : Any = WavaVecaFeatureExtractor.from_pretrained(SCREAMING_SNAKE_CASE__ , use_auth_token=SCREAMING_SNAKE_CASE__ )
# set weights for wav2vec2 encoder
SCREAMING_SNAKE_CASE__ : int = WavaVecaModel(SCREAMING_SNAKE_CASE__ )
recursively_load_weights_wavaveca(model.encoder , SCREAMING_SNAKE_CASE__ )
# load decoder weights
SCREAMING_SNAKE_CASE__ : Optional[int] = MBartForCausalLM(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : int = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=SCREAMING_SNAKE_CASE__ )
logger.warning(f'''The following keys are missing when loading the decoder weights: {missing_keys}''' )
logger.warning(f'''The following keys are unexpected when loading the decoder weights: {unexpected_keys}''' )
SCREAMING_SNAKE_CASE__ : List[str] = SpeechEncoderDecoderModel(encoder=SCREAMING_SNAKE_CASE__ , decoder=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Tuple = False
SCREAMING_SNAKE_CASE__ : List[Any] = MBartaaTokenizer(SCREAMING_SNAKE_CASE__ )
tokenizer.save_pretrained(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : str = hf_wavavec.config.to_dict()
SCREAMING_SNAKE_CASE__ : int = tokenizer.pad_token_id
SCREAMING_SNAKE_CASE__ : int = tokenizer.bos_token_id
SCREAMING_SNAKE_CASE__ : Union[str, Any] = tokenizer.eos_token_id
SCREAMING_SNAKE_CASE__ : str = "mbart50"
SCREAMING_SNAKE_CASE__ : List[str] = "wav2vec2"
SCREAMING_SNAKE_CASE__ : List[str] = tokenizer.eos_token_id
SCREAMING_SNAKE_CASE__ : Any = 25_00_04
SCREAMING_SNAKE_CASE__ : List[str] = tokenizer.eos_token_id
SCREAMING_SNAKE_CASE__ : List[str] = SpeechEncoderDecoderConfig.from_dict(SCREAMING_SNAKE_CASE__ )
hf_wavavec.save_pretrained(SCREAMING_SNAKE_CASE__ )
feature_extractor.save_pretrained(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
_lowerCamelCase : List[Any] = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_yaml_path''', default=None, type=str, help='''Path to yaml file of fine-tuned model''')
parser.add_argument(
'''--encoder_config_path''',
default='''facebook/wav2vec2-xls-r-1b''',
type=str,
help='''Path to hf encoder wav2vec2 checkpoint config''',
)
parser.add_argument(
'''--decoder_config_path''',
default='''facebook/mbart-large-50-one-to-many-mmt''',
type=str,
help='''Path to hf decoder checkpoint config''',
)
parser.add_argument('''--add_adapter''', default=True, type=bool, help='''whethere to add model adapter layers''')
parser.add_argument('''--adapter_stride''', default=2, type=int, help='''stride of adapter layers''')
parser.add_argument('''--adapter_kernel_size''', default=3, type=int, help='''kernel size of adapter layers''')
parser.add_argument('''--encoder_output_dim''', default=1_0_2_4, type=int, help='''encoder output dim''')
parser.add_argument('''--start_token_id''', default=2_5_0_0_0_4, type=int, help='''`decoder_start_token_id` of model config''')
_lowerCamelCase : Dict = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
args.config_yaml_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
add_adapter=args.add_adapter,
adapter_kernel_size=args.adapter_kernel_size,
adapter_stride=args.adapter_stride,
decoder_start_token_id=args.start_token_id,
encoder_output_dim=args.encoder_output_dim,
)
| 157 |
import math
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase : Dict = logging.get_logger(__name__)
_lowerCamelCase : Union[str, Any] = {
'''facebook/data2vec-base-960h''': '''https://huggingface.co/facebook/data2vec-audio-base-960h/resolve/main/config.json''',
# See all Data2VecAudio models at https://huggingface.co/models?filter=data2vec-audio
}
class lowerCamelCase (__lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase_ = "data2vec-audio"
def __init__( self : List[str], _UpperCAmelCase : Optional[Any]=3_2, _UpperCAmelCase : str=7_6_8, _UpperCAmelCase : Dict=1_2, _UpperCAmelCase : List[Any]=1_2, _UpperCAmelCase : Dict=3_0_7_2, _UpperCAmelCase : str="gelu", _UpperCAmelCase : Union[str, Any]=0.1, _UpperCAmelCase : Optional[int]=0.1, _UpperCAmelCase : Any=0.1, _UpperCAmelCase : Tuple=0.0, _UpperCAmelCase : Dict=0.1, _UpperCAmelCase : Optional[int]=0.1, _UpperCAmelCase : Any=0.02, _UpperCAmelCase : Tuple=1E-5, _UpperCAmelCase : Union[str, Any]="gelu", _UpperCAmelCase : int=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2), _UpperCAmelCase : str=(5, 2, 2, 2, 2, 2, 2), _UpperCAmelCase : int=(1_0, 3, 3, 3, 3, 2, 2), _UpperCAmelCase : Union[str, Any]=False, _UpperCAmelCase : List[str]=1_6, _UpperCAmelCase : Any=1_9, _UpperCAmelCase : List[Any]=5, _UpperCAmelCase : Dict=0.05, _UpperCAmelCase : Union[str, Any]=1_0, _UpperCAmelCase : Optional[int]=2, _UpperCAmelCase : Optional[Any]=0.0, _UpperCAmelCase : List[Any]=1_0, _UpperCAmelCase : Optional[Any]=0, _UpperCAmelCase : Optional[Any]="sum", _UpperCAmelCase : str=False, _UpperCAmelCase : Any=False, _UpperCAmelCase : Optional[int]=2_5_6, _UpperCAmelCase : Optional[int]=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 1_5_0_0), _UpperCAmelCase : int=(5, 3, 3, 1, 1), _UpperCAmelCase : Optional[int]=(1, 2, 3, 1, 1), _UpperCAmelCase : Optional[Any]=5_1_2, _UpperCAmelCase : int=0, _UpperCAmelCase : Tuple=1, _UpperCAmelCase : Optional[int]=2, _UpperCAmelCase : List[str]=False, _UpperCAmelCase : Dict=3, _UpperCAmelCase : Any=2, _UpperCAmelCase : Dict=3, _UpperCAmelCase : Dict=None, **_UpperCAmelCase : Any, ) -> Any:
"""simple docstring"""
super().__init__(**_UpperCAmelCase, pad_token_id=_UpperCAmelCase, bos_token_id=_UpperCAmelCase, eos_token_id=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : List[Any] = hidden_size
SCREAMING_SNAKE_CASE__ : Optional[int] = feat_extract_activation
SCREAMING_SNAKE_CASE__ : Optional[int] = list(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : List[str] = list(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : int = list(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[int] = conv_bias
SCREAMING_SNAKE_CASE__ : Union[str, Any] = num_conv_pos_embeddings
SCREAMING_SNAKE_CASE__ : Tuple = num_conv_pos_embedding_groups
SCREAMING_SNAKE_CASE__ : Tuple = conv_pos_kernel_size
SCREAMING_SNAKE_CASE__ : List[str] = len(self.conv_dim )
SCREAMING_SNAKE_CASE__ : Optional[Any] = num_hidden_layers
SCREAMING_SNAKE_CASE__ : Union[str, Any] = intermediate_size
SCREAMING_SNAKE_CASE__ : Dict = hidden_act
SCREAMING_SNAKE_CASE__ : Tuple = num_attention_heads
SCREAMING_SNAKE_CASE__ : Tuple = hidden_dropout
SCREAMING_SNAKE_CASE__ : int = attention_dropout
SCREAMING_SNAKE_CASE__ : Dict = activation_dropout
SCREAMING_SNAKE_CASE__ : Optional[int] = feat_proj_dropout
SCREAMING_SNAKE_CASE__ : List[str] = final_dropout
SCREAMING_SNAKE_CASE__ : Tuple = layerdrop
SCREAMING_SNAKE_CASE__ : str = layer_norm_eps
SCREAMING_SNAKE_CASE__ : Optional[Any] = initializer_range
SCREAMING_SNAKE_CASE__ : List[Any] = vocab_size
SCREAMING_SNAKE_CASE__ : int = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="
" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="
F''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
F''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
SCREAMING_SNAKE_CASE__ : int = mask_time_prob
SCREAMING_SNAKE_CASE__ : Union[str, Any] = mask_time_length
SCREAMING_SNAKE_CASE__ : List[str] = mask_time_min_masks
SCREAMING_SNAKE_CASE__ : Tuple = mask_feature_prob
SCREAMING_SNAKE_CASE__ : Dict = mask_feature_length
SCREAMING_SNAKE_CASE__ : Optional[Any] = mask_feature_min_masks
# ctc loss
SCREAMING_SNAKE_CASE__ : Optional[int] = ctc_loss_reduction
SCREAMING_SNAKE_CASE__ : List[Any] = ctc_zero_infinity
# adapter
SCREAMING_SNAKE_CASE__ : int = add_adapter
SCREAMING_SNAKE_CASE__ : Dict = adapter_kernel_size
SCREAMING_SNAKE_CASE__ : Optional[int] = adapter_stride
SCREAMING_SNAKE_CASE__ : Dict = num_adapter_layers
SCREAMING_SNAKE_CASE__ : Dict = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
SCREAMING_SNAKE_CASE__ : Optional[Any] = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
SCREAMING_SNAKE_CASE__ : Any = list(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : str = list(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Tuple = list(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = xvector_output_dim
@property
def A_ ( self : List[str] ) -> str:
"""simple docstring"""
return math.prod(self.conv_stride )
| 157 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.