code stringlengths 82 53.2k | code_codestyle int64 0 721 | style_context stringlengths 91 41.9k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
"""simple docstring"""
import math
import tensorflow as tf
from packaging import version
def _lowerCamelCase( a ):
__a = tf.convert_to_tensor(a )
__a = 0.5 * (1.0 + tf.math.erf(x / tf.cast(tf.sqrt(2.0 ) , x.dtype ) ))
return x * cdf
def _lowerCamelCase( a ):
__a = tf.convert_to_tensor(a )
__a = tf.cast(math.pi , x.dtype )
__a = tf.cast(0.04_47_15 , x.dtype )
__a = 0.5 * (1.0 + tf.tanh(tf.sqrt(2.0 / pi ) * (x + coeff * tf.pow(a , 3 )) ))
return x * cdf
def _lowerCamelCase( a ):
__a = tf.convert_to_tensor(a )
return x * tf.tanh(tf.math.softplus(a ) )
def _lowerCamelCase( a ):
__a = tf.convert_to_tensor(a )
__a = tf.cast(0.04_47_15 , x.dtype )
__a = tf.cast(0.79_78_84_56_08 , x.dtype )
return 0.5 * x * (1.0 + tf.tanh(x * coeffa * (1.0 + coeffa * x * x) ))
def _lowerCamelCase( a ):
__a = tf.convert_to_tensor(a )
__a = tf.cast(1.7_02 , x.dtype )
return x * tf.math.sigmoid(coeff * x )
def _lowerCamelCase( a ):
return tf.clip_by_value(_gelu(a ) , -1_0 , 1_0 )
def _lowerCamelCase( a , a=-1 ):
__a , __a = tf.split(a , 2 , axis=a )
return a * tf.math.sigmoid(a )
if version.parse(tf.version.VERSION) >= version.parse("""2.4"""):
def _lowerCamelCase( a ):
return tf.keras.activations.gelu(a , approximate=a )
SCREAMING_SNAKE_CASE__:Dict = tf.keras.activations.gelu
SCREAMING_SNAKE_CASE__:Union[str, Any] = approximate_gelu_wrap
else:
SCREAMING_SNAKE_CASE__:Optional[Any] = _gelu
SCREAMING_SNAKE_CASE__:Tuple = _gelu_new
SCREAMING_SNAKE_CASE__:Any = {
"""gelu""": gelu,
"""gelu_10""": gelu_aa,
"""gelu_fast""": gelu_fast,
"""gelu_new""": gelu_new,
"""glu""": glu,
"""mish""": mish,
"""quick_gelu""": quick_gelu,
"""relu""": tf.keras.activations.relu,
"""sigmoid""": tf.keras.activations.sigmoid,
"""silu""": tf.keras.activations.swish,
"""swish""": tf.keras.activations.swish,
"""tanh""": tf.keras.activations.tanh,
}
def _lowerCamelCase( a ):
if activation_string in ACTaFN:
return ACTaFN[activation_string]
else:
raise KeyError(F"function {activation_string} not found in ACT2FN mapping {list(ACTaFN.keys() )}" )
| 528 | """simple docstring"""
import argparse
import json
import os
import re
import shutil
import torch
from transformers import BioGptConfig, BioGptForCausalLM
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
SCREAMING_SNAKE_CASE__:Union[str, Any] = 2
class snake_case__ :
def __init__( self , *, # begin keyword-only arguments
lowerCamelCase="<s>" , lowerCamelCase="<pad>" , lowerCamelCase="</s>" , lowerCamelCase="<unk>" , lowerCamelCase=None , ):
__a , __a , __a , __a = bos, unk, pad, eos
__a = []
__a = []
__a = {}
__a = self.add_symbol(lowerCamelCase )
__a = self.add_symbol(lowerCamelCase )
__a = self.add_symbol(lowerCamelCase )
__a = self.add_symbol(lowerCamelCase )
if extra_special_symbols:
for s in extra_special_symbols:
self.add_symbol(lowerCamelCase )
__a = len(self.symbols )
def __eq__( self , lowerCamelCase ):
return self.indices == other.indices
def __getitem__( self , lowerCamelCase ):
if idx < len(self.symbols ):
return self.symbols[idx]
return self.unk_word
def __len__( self ):
return len(self.symbols )
def __contains__( self , lowerCamelCase ):
return sym in self.indices
@classmethod
def a__ ( cls , lowerCamelCase ):
__a = cls()
d.add_from_file(lowerCamelCase )
return d
def a__ ( self , lowerCamelCase , lowerCamelCase=1 , lowerCamelCase=False ):
if word in self.indices and not overwrite:
__a = self.indices[word]
__a = self.count[idx] + n
return idx
else:
__a = len(self.symbols )
__a = idx
self.symbols.append(lowerCamelCase )
self.count.append(lowerCamelCase )
return idx
def a__ ( self , lowerCamelCase ):
return 0
def a__ ( self , lowerCamelCase ):
if isinstance(lowerCamelCase , lowerCamelCase ):
try:
with open(lowerCamelCase , "r" , encoding="utf-8" ) as fd:
self.add_from_file(lowerCamelCase )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception("Incorrect encoding detected in {}, please rebuild the dataset".format(lowerCamelCase ) )
return
__a = f.readlines()
__a = self._load_meta(lowerCamelCase )
for line in lines[indices_start_line:]:
try:
__a , __a = line.rstrip().rsplit(" " , 1 )
if field == "#fairseq:overwrite":
__a = True
__a , __a = line.rsplit(" " , 1 )
else:
__a = False
__a = int(lowerCamelCase )
__a = line
if word in self and not overwrite:
raise RuntimeError(
"Duplicate word found when loading Dictionary: '{}'. "
"Duplicate words can overwrite earlier ones by adding the "
"#fairseq:overwrite flag at the end of the corresponding row "
"in the dictionary file. If using the Camembert model, please "
"download an updated copy of the model file.".format(lowerCamelCase ) )
self.add_symbol(lowerCamelCase , n=lowerCamelCase , overwrite=lowerCamelCase )
except ValueError:
raise ValueError("Incorrect dictionary format, expected '<token> <cnt> [flags]'" )
def _lowerCamelCase( a ):
# (1) remove word breaking symbol, (2) add word ending symbol where the word is not broken up,
# e.g.: d = {'le@@': 5, 'tt@@': 6, 'er': 7} => {'le': 5, 'tt': 6, 'er</w>': 7}
__a = dict((re.sub(R"@@$" , "" , a ), v) if k.endswith("@@" ) else (re.sub(R"$" , "</w>" , a ), v) for k, v in d.items() )
__a = "<s> <pad> </s> <unk>".split()
# restore the special tokens
for k in keep_keys:
del da[F"{k}</w>"]
__a = d[k] # restore
return da
def _lowerCamelCase( a , a ):
# prep
if not os.path.exists(a ):
raise ValueError(F"path {biogpt_checkpoint_path} does not exist!" )
os.makedirs(a , exist_ok=a )
print(F"Writing results to {pytorch_dump_folder_path}" )
# handle various types of models
__a = os.path.join(a , "checkpoint.pt" )
if not os.path.isfile(a ):
raise ValueError(F"path to the file {checkpoint_file} does not exist!" )
__a = torch.load(a , map_location="cpu" )
__a = chkpt["cfg"]["model"]
# dicts
__a = os.path.join(a , "dict.txt" )
if not os.path.isfile(a ):
raise ValueError(F"path to the file {dict_file} does not exist!" )
__a = Dictionary.load(a )
__a = rewrite_dict_keys(src_dict.indices )
__a = len(a )
__a = os.path.join(a , VOCAB_FILES_NAMES["vocab_file"] )
print(F"Generating {src_vocab_file} of {src_vocab_size} records" )
with open(a , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(a , ensure_ascii=a , indent=a ) )
# merges_file (bpecodes)
__a = os.path.join(a , "bpecodes" )
if not os.path.isfile(a ):
raise ValueError(F"path to the file {bpecodes_file} does not exist!" )
__a = os.path.join(a , VOCAB_FILES_NAMES["merges_file"] )
shutil.copyfile(a , a )
# model config
__a = os.path.join(a , "config.json" )
__a = {
"activation_dropout": args["activation_dropout"],
"architectures": ["BioGptForCausalLM"],
"attention_probs_dropout_prob": args["attention_dropout"],
"bos_token_id": 0,
"eos_token_id": 2,
"hidden_act": args["activation_fn"],
"hidden_dropout_prob": args["dropout"],
"hidden_size": args["decoder_embed_dim"],
"initializer_range": 0.02,
"intermediate_size": args["decoder_ffn_embed_dim"],
"layer_norm_eps": 1E-1_2,
"layerdrop": args["decoder_layerdrop"],
"max_position_embeddings": args["max_target_positions"],
"model_type": "biogpt",
"num_attention_heads": args["decoder_attention_heads"],
"num_hidden_layers": args["decoder_layers"],
"pad_token_id": 1,
"scale_embedding": not args["no_scale_embedding"],
"tie_word_embeddings": args["share_decoder_input_output_embed"],
"vocab_size": src_vocab_size,
}
# good hparam defaults to start with
print(F"Generating {biogpt_model_config_file}" )
with open(a , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(a , ensure_ascii=a , indent=a ) )
# tokenizer config
__a = os.path.join(a , a )
__a = {
"bos_token": "<s>",
"eos_token": "</s>",
"model_max_length": 1_0_2_4,
"pad_token": "<pad>",
"special_tokens_map_file": None,
"tokenizer_class": "BioGptTokenizer",
"unk_token": "<unk>",
}
print(F"Generating {biogpt_tokenizer_config_file}" )
with open(a , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(a , ensure_ascii=a , indent=a ) )
# model
__a = chkpt["model"]
# remove unneeded keys
__a = [
"decoder.version",
]
for k in ignore_keys:
model_state_dict.pop(a , a )
__a = list(model_state_dict.keys() )
for layer_name in layer_names:
if layer_name.endswith("output_projection.weight" ):
__a = model_state_dict.pop(a )
else:
__a = model_state_dict.pop(a )
__a = BioGptConfig.from_pretrained(a )
__a = BioGptForCausalLM(a )
# check that it loads ok
model_new.load_state_dict(a )
# save
__a = os.path.join(a , a )
print(F"Generating {pytorch_weights_dump_path}" )
torch.save(a , a )
print("Conversion is done!" )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__:Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--biogpt_checkpoint_path""",
default=None,
type=str,
required=True,
help=(
"""Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,"""
""" bpecodes, etc."""
),
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
SCREAMING_SNAKE_CASE__:Any = parser.parse_args()
convert_biogpt_checkpoint_to_pytorch(args.biogpt_checkpoint_path, args.pytorch_dump_folder_path)
| 528 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_UpperCamelCase : Tuple =logging.get_logger(__name__)
_UpperCamelCase : Any ={
"shi-labs/dinat-mini-in1k-224": "https://huggingface.co/shi-labs/dinat-mini-in1k-224/resolve/main/config.json",
# See all Dinat models at https://huggingface.co/models?filter=dinat
}
class _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = 'dinat'
SCREAMING_SNAKE_CASE_ = {
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self , _snake_case=4 , _snake_case=3 , _snake_case=64 , _snake_case=[3, 4, 6, 5] , _snake_case=[2, 4, 8, 16] , _snake_case=7 , _snake_case=[[1, 8, 1], [1, 4, 1, 4], [1, 2, 1, 2, 1, 2], [1, 1, 1, 1, 1]] , _snake_case=3.0 , _snake_case=True , _snake_case=0.0 , _snake_case=0.0 , _snake_case=0.1 , _snake_case="gelu" , _snake_case=0.0_2 , _snake_case=1E-5 , _snake_case=0.0 , _snake_case=None , _snake_case=None , **_snake_case , ):
"""simple docstring"""
super().__init__(**_snake_case )
__lowerCamelCase = patch_size
__lowerCamelCase = num_channels
__lowerCamelCase = embed_dim
__lowerCamelCase = depths
__lowerCamelCase = len(_snake_case )
__lowerCamelCase = num_heads
__lowerCamelCase = kernel_size
__lowerCamelCase = dilations
__lowerCamelCase = mlp_ratio
__lowerCamelCase = qkv_bias
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = drop_path_rate
__lowerCamelCase = hidden_act
__lowerCamelCase = layer_norm_eps
__lowerCamelCase = initializer_range
# we set the hidden_size attribute in order to make Dinat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
__lowerCamelCase = int(embed_dim * 2 ** (len(_snake_case ) - 1) )
__lowerCamelCase = layer_scale_init_value
__lowerCamelCase = ['''stem'''] + [F'''stage{idx}''' for idx in range(1 , len(_snake_case ) + 1 )]
__lowerCamelCase , __lowerCamelCase = get_aligned_output_features_output_indices(
out_features=_snake_case , out_indices=_snake_case , stage_names=self.stage_names )
| 575 |
'''simple docstring'''
def lowerCamelCase_ ( A_ ):
__lowerCamelCase = []
__lowerCamelCase = []
__lowerCamelCase = {
'''^''': 3,
'''*''': 2,
'''/''': 2,
'''%''': 2,
'''+''': 1,
'''-''': 1,
} # Priority of each operator
__lowerCamelCase = len(A_ ) if (len(A_ ) > 7) else 7
# Print table header for output
print(
'''Symbol'''.center(8 ) , '''Stack'''.center(A_ ) , '''Postfix'''.center(A_ ) , sep=''' | ''' , )
print('''-''' * (print_width * 3 + 7) )
for x in infix:
if x.isalpha() or x.isdigit():
post_fix.append(A_ ) # if x is Alphabet / Digit, add it to Postfix
elif x == "(":
stack.append(A_ ) # if x is "(" push to Stack
elif x == ")": # if x is ")" pop stack until "(" is encountered
while stack[-1] != "(":
post_fix.append(stack.pop() ) # Pop stack & add the content to Postfix
stack.pop()
else:
if len(A_ ) == 0:
stack.append(A_ ) # If stack is empty, push x to stack
else: # while priority of x is not > priority of element in the stack
while len(A_ ) > 0 and priority[x] <= priority[stack[-1]]:
post_fix.append(stack.pop() ) # pop stack & add to Postfix
stack.append(A_ ) # push x to stack
print(
x.center(8 ) , (''''''.join(A_ )).ljust(A_ ) , (''''''.join(A_ )).ljust(A_ ) , sep=''' | ''' , ) # Output in tabular format
while len(A_ ) > 0: # while stack is not empty
post_fix.append(stack.pop() ) # pop stack & add to Postfix
print(
''' '''.center(8 ) , (''''''.join(A_ )).ljust(A_ ) , (''''''.join(A_ )).ljust(A_ ) , sep=''' | ''' , ) # Output in tabular format
return "".join(A_ ) # return Postfix as str
def lowerCamelCase_ ( A_ ):
__lowerCamelCase = list(infix[::-1] ) # reverse the infix equation
for i in range(len(A_ ) ):
if infix[i] == "(":
__lowerCamelCase = ''')''' # change "(" to ")"
elif infix[i] == ")":
__lowerCamelCase = '''(''' # change ")" to "("
return (infix_2_postfix(''''''.join(A_ ) ))[
::-1
] # call infix_2_postfix on Infix, return reverse of Postfix
if __name__ == "__main__":
_UpperCamelCase : Optional[Any] =input("\nEnter an Infix Equation = ") # Input an Infix equation
_UpperCamelCase : str ="".join(Infix.split()) # Remove spaces from the input
print("\n\t", Infix, "(Infix) -> ", infix_2_prefix(Infix), "(Prefix)")
| 575 | 1 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
'hustvl/yolos-small': 'https://huggingface.co/hustvl/yolos-small/resolve/main/config.json',
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class _A ( UpperCAmelCase_ ):
lowercase_ : str = '''yolos'''
def __init__( self : List[Any] , lowerCamelCase__ : Union[str, Any]=7_68 , lowerCamelCase__ : Union[str, Any]=12 , lowerCamelCase__ : Tuple=12 , lowerCamelCase__ : Any=30_72 , lowerCamelCase__ : Union[str, Any]="gelu" , lowerCamelCase__ : str=0.0 , lowerCamelCase__ : Any=0.0 , lowerCamelCase__ : List[Any]=0.02 , lowerCamelCase__ : str=1e-12 , lowerCamelCase__ : str=[5_12, 8_64] , lowerCamelCase__ : Dict=16 , lowerCamelCase__ : Tuple=3 , lowerCamelCase__ : List[str]=True , lowerCamelCase__ : int=1_00 , lowerCamelCase__ : List[str]=True , lowerCamelCase__ : str=False , lowerCamelCase__ : Dict=1 , lowerCamelCase__ : List[str]=5 , lowerCamelCase__ : Optional[int]=2 , lowerCamelCase__ : Dict=5 , lowerCamelCase__ : List[Any]=2 , lowerCamelCase__ : int=0.1 , **lowerCamelCase__ : List[Any] , ):
"""simple docstring"""
super().__init__(**lowerCamelCase__ )
__UpperCamelCase : Optional[int] = hidden_size
__UpperCamelCase : Dict = num_hidden_layers
__UpperCamelCase : Optional[Any] = num_attention_heads
__UpperCamelCase : Optional[Any] = intermediate_size
__UpperCamelCase : Dict = hidden_act
__UpperCamelCase : str = hidden_dropout_prob
__UpperCamelCase : Tuple = attention_probs_dropout_prob
__UpperCamelCase : Any = initializer_range
__UpperCamelCase : List[Any] = layer_norm_eps
__UpperCamelCase : int = image_size
__UpperCamelCase : Optional[Any] = patch_size
__UpperCamelCase : List[str] = num_channels
__UpperCamelCase : int = qkv_bias
__UpperCamelCase : Tuple = num_detection_tokens
__UpperCamelCase : List[str] = use_mid_position_embeddings
__UpperCamelCase : List[str] = auxiliary_loss
# Hungarian matcher
__UpperCamelCase : Dict = class_cost
__UpperCamelCase : Any = bbox_cost
__UpperCamelCase : Tuple = giou_cost
# Loss coefficients
__UpperCamelCase : Dict = bbox_loss_coefficient
__UpperCamelCase : Union[str, Any] = giou_loss_coefficient
__UpperCamelCase : str = eos_coefficient
class _A ( UpperCAmelCase_ ):
lowercase_ : List[str] = version.parse('''1.11''' )
@property
def a ( self : str ):
"""simple docstring"""
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def a ( self : Tuple ):
"""simple docstring"""
return 1e-4
@property
def a ( self : List[Any] ):
"""simple docstring"""
return 12
| 269 |
from __future__ import annotations
def __lowerCamelCase ( __lowerCAmelCase : str , __lowerCAmelCase : Tuple , __lowerCAmelCase : int , __lowerCAmelCase : Tuple ) -> Optional[Any]: # noqa: E741
while r - l > 1:
__UpperCamelCase : int = (l + r) // 2
if v[m] >= key:
__UpperCamelCase : Dict = m
else:
__UpperCamelCase : Optional[Any] = m # noqa: E741
return r
def __lowerCamelCase ( __lowerCAmelCase : list[int] ) -> int:
if len(__lowerCAmelCase ) == 0:
return 0
__UpperCamelCase : Optional[Any] = [0] * len(__lowerCAmelCase )
__UpperCamelCase : Optional[int] = 1
__UpperCamelCase : Union[str, Any] = v[0]
for i in range(1 , len(__lowerCAmelCase ) ):
if v[i] < tail[0]:
__UpperCamelCase : Dict = v[i]
elif v[i] > tail[length - 1]:
__UpperCamelCase : Optional[int] = v[i]
length += 1
else:
__UpperCamelCase : List[Any] = v[i]
return length
if __name__ == "__main__":
import doctest
doctest.testmod()
| 269 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowerCAmelCase__ = {
'''configuration_blip''': [
'''BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''BlipConfig''',
'''BlipTextConfig''',
'''BlipVisionConfig''',
],
'''processing_blip''': ['''BlipProcessor'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ['''BlipImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''BLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BlipModel''',
'''BlipPreTrainedModel''',
'''BlipForConditionalGeneration''',
'''BlipForQuestionAnswering''',
'''BlipVisionModel''',
'''BlipTextModel''',
'''BlipForImageTextRetrieval''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFBlipModel''',
'''TFBlipPreTrainedModel''',
'''TFBlipForConditionalGeneration''',
'''TFBlipForQuestionAnswering''',
'''TFBlipVisionModel''',
'''TFBlipTextModel''',
'''TFBlipForImageTextRetrieval''',
]
if TYPE_CHECKING:
from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig
from .processing_blip import BlipProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_blip import BlipImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip import (
BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
BlipModel,
BlipPreTrainedModel,
BlipTextModel,
BlipVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blip import (
TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBlipForConditionalGeneration,
TFBlipForImageTextRetrieval,
TFBlipForQuestionAnswering,
TFBlipModel,
TFBlipPreTrainedModel,
TFBlipTextModel,
TFBlipVisionModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 598 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''google/canine-s''': '''https://huggingface.co/google/canine-s/resolve/main/config.json''',
# See all CANINE models at https://huggingface.co/models?filter=canine
}
class __snake_case ( _lowercase):
snake_case__ : Optional[int] = "canine"
def __init__( self : List[Any] , __lowerCAmelCase : str=7_6_8 , __lowerCAmelCase : List[Any]=1_2 , __lowerCAmelCase : Optional[int]=1_2 , __lowerCAmelCase : str=3_0_7_2 , __lowerCAmelCase : str="gelu" , __lowerCAmelCase : Any=0.1 , __lowerCAmelCase : str=0.1 , __lowerCAmelCase : Any=1_6_3_8_4 , __lowerCAmelCase : str=1_6 , __lowerCAmelCase : Union[str, Any]=0.02 , __lowerCAmelCase : Dict=1E-12 , __lowerCAmelCase : Optional[Any]=0 , __lowerCAmelCase : Dict=0xe0_00 , __lowerCAmelCase : Optional[int]=0xe0_01 , __lowerCAmelCase : List[Any]=4 , __lowerCAmelCase : str=4 , __lowerCAmelCase : Optional[int]=8 , __lowerCAmelCase : List[Any]=1_6_3_8_4 , __lowerCAmelCase : Optional[Any]=1_2_8 , **__lowerCAmelCase : Optional[Any] , ):
"""simple docstring"""
super().__init__(pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , **__lowerCAmelCase )
_lowerCamelCase : Dict = max_position_embeddings
_lowerCamelCase : int = hidden_size
_lowerCamelCase : List[str] = num_hidden_layers
_lowerCamelCase : List[str] = num_attention_heads
_lowerCamelCase : str = intermediate_size
_lowerCamelCase : Optional[int] = hidden_act
_lowerCamelCase : Any = hidden_dropout_prob
_lowerCamelCase : Dict = attention_probs_dropout_prob
_lowerCamelCase : Dict = initializer_range
_lowerCamelCase : Tuple = type_vocab_size
_lowerCamelCase : int = layer_norm_eps
# Character config:
_lowerCamelCase : Dict = downsampling_rate
_lowerCamelCase : str = upsampling_kernel_size
_lowerCamelCase : List[Any] = num_hash_functions
_lowerCamelCase : Dict = num_hash_buckets
_lowerCamelCase : Optional[Any] = local_transformer_stride
| 598 | 1 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import DistilBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.distilbert.modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertModel,
)
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self : Optional[Any] , _snake_case : Any , ) -> Optional[Any]:
'''simple docstring'''
a__ = parent
a__ = 13
a__ = 7
a__ = True
a__ = True
a__ = False
a__ = True
a__ = 99
a__ = 32
a__ = 2
a__ = 4
a__ = 37
a__ = 'gelu'
a__ = 0.1
a__ = 0.1
a__ = 512
a__ = 16
a__ = 2
a__ = 0.02
a__ = 3
a__ = 4
a__ = None
def _lowerCAmelCase ( self : int ) -> Dict:
'''simple docstring'''
a__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a__ = None
if self.use_input_mask:
a__ = random_attention_mask([self.batch_size, self.seq_length] )
a__ = None
a__ = None
a__ = None
if self.use_labels:
a__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
a__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
a__ = ids_tensor([self.batch_size] , self.num_choices )
a__ = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowerCAmelCase ( self : str , _snake_case : str , _snake_case : Any , _snake_case : Dict , _snake_case : Tuple , _snake_case : Union[str, Any] , _snake_case : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
a__ = TFDistilBertModel(config=_snake_case )
a__ = {'input_ids': input_ids, 'attention_mask': input_mask}
a__ = model(_snake_case )
a__ = [input_ids, input_mask]
a__ = model(_snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCAmelCase ( self : Any , _snake_case : Optional[int] , _snake_case : Union[str, Any] , _snake_case : str , _snake_case : List[Any] , _snake_case : Dict , _snake_case : List[str] ) -> List[str]:
'''simple docstring'''
a__ = TFDistilBertForMaskedLM(config=_snake_case )
a__ = {'input_ids': input_ids, 'attention_mask': input_mask}
a__ = model(_snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowerCAmelCase ( self : Union[str, Any] , _snake_case : List[str] , _snake_case : List[str] , _snake_case : Tuple , _snake_case : int , _snake_case : str , _snake_case : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
a__ = TFDistilBertForQuestionAnswering(config=_snake_case )
a__ = {
'input_ids': input_ids,
'attention_mask': input_mask,
}
a__ = model(_snake_case )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _lowerCAmelCase ( self : Any , _snake_case : Tuple , _snake_case : Any , _snake_case : int , _snake_case : Union[str, Any] , _snake_case : Any , _snake_case : Optional[int] ) -> Any:
'''simple docstring'''
a__ = self.num_labels
a__ = TFDistilBertForSequenceClassification(_snake_case )
a__ = {'input_ids': input_ids, 'attention_mask': input_mask}
a__ = model(_snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowerCAmelCase ( self : Optional[Any] , _snake_case : Dict , _snake_case : List[str] , _snake_case : List[str] , _snake_case : Optional[Any] , _snake_case : List[str] , _snake_case : Optional[Any] ) -> Tuple:
'''simple docstring'''
a__ = self.num_choices
a__ = TFDistilBertForMultipleChoice(_snake_case )
a__ = tf.tile(tf.expand_dims(_snake_case , 1 ) , (1, self.num_choices, 1) )
a__ = tf.tile(tf.expand_dims(_snake_case , 1 ) , (1, self.num_choices, 1) )
a__ = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
}
a__ = model(_snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _lowerCAmelCase ( self : Tuple , _snake_case : Dict , _snake_case : Optional[Any] , _snake_case : Dict , _snake_case : Optional[Any] , _snake_case : Any , _snake_case : Optional[int] ) -> int:
'''simple docstring'''
a__ = self.num_labels
a__ = TFDistilBertForTokenClassification(_snake_case )
a__ = {'input_ids': input_ids, 'attention_mask': input_mask}
a__ = model(_snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _lowerCAmelCase ( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
a__ = self.prepare_config_and_inputs()
((a__) , (a__) , (a__) , (a__) , (a__) , (a__)) = config_and_inputs
a__ = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class SCREAMING_SNAKE_CASE ( a , a , unittest.TestCase ):
"""simple docstring"""
a_ : int =(
(
TFDistilBertModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertForMultipleChoice,
)
if is_tf_available()
else None
)
a_ : Any =(
{
"feature-extraction": TFDistilBertModel,
"fill-mask": TFDistilBertForMaskedLM,
"question-answering": TFDistilBertForQuestionAnswering,
"text-classification": TFDistilBertForSequenceClassification,
"token-classification": TFDistilBertForTokenClassification,
"zero-shot": TFDistilBertForSequenceClassification,
}
if is_tf_available()
else {}
)
a_ : Optional[Any] =False
a_ : List[str] =False
def _lowerCAmelCase ( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
a__ = TFDistilBertModelTester(self )
a__ = ConfigTester(self , config_class=_snake_case , dim=37 )
def _lowerCAmelCase ( self : int ) -> Dict:
'''simple docstring'''
self.config_tester.run_common_tests()
def _lowerCAmelCase ( self : Any ) -> Any:
'''simple docstring'''
a__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*_snake_case )
def _lowerCAmelCase ( self : List[Any] ) -> Any:
'''simple docstring'''
a__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*_snake_case )
def _lowerCAmelCase ( self : List[str] ) -> str:
'''simple docstring'''
a__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*_snake_case )
def _lowerCAmelCase ( self : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
a__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*_snake_case )
def _lowerCAmelCase ( self : Any ) -> Optional[Any]:
'''simple docstring'''
a__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*_snake_case )
def _lowerCAmelCase ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
a__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*_snake_case )
@slow
def _lowerCAmelCase ( self : str ) -> Union[str, Any]:
'''simple docstring'''
for model_name in list(TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1] ):
a__ = TFDistilBertModel.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
@require_tf
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
@slow
def _lowerCAmelCase ( self : List[Any] ) -> Any:
'''simple docstring'''
a__ = TFDistilBertModel.from_pretrained('distilbert-base-uncased' )
a__ = tf.constant([[0, 1, 2, 3, 4, 5]] )
a__ = model(_snake_case )[0]
a__ = [1, 6, 768]
self.assertEqual(output.shape , _snake_case )
a__ = tf.constant(
[
[
[0.1926_1885, -0.1373_2955, 0.411_9799],
[0.2215_0156, -0.0742_2661, 0.3903_7204],
[0.2275_6018, -0.089_6414, 0.370_1467],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , _snake_case , atol=1E-4 )
| 232 | """simple docstring"""
import json
from typing import Iterator, List, Union
from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, trainers
from tokenizers.implementations.base_tokenizer import BaseTokenizer
from tokenizers.models import Unigram
from tokenizers.processors import TemplateProcessing
class SCREAMING_SNAKE_CASE ( a ):
"""simple docstring"""
def __init__( self : List[str] , _snake_case : str = "▁" , _snake_case : bool = True , _snake_case : Union[str, AddedToken] = "<unk>" , _snake_case : Union[str, AddedToken] = "</s>" , _snake_case : Union[str, AddedToken] = "<pad>" , ) -> Optional[int]:
'''simple docstring'''
a__ = {
'pad': {'id': 0, 'token': pad_token},
'eos': {'id': 1, 'token': eos_token},
'unk': {'id': 2, 'token': unk_token},
}
a__ = [None] * len(self.special_tokens )
for token_dict in self.special_tokens.values():
a__ = token_dict['token']
a__ = Tokenizer(Unigram() )
a__ = normalizers.Sequence(
[
normalizers.Nmt(),
normalizers.NFKC(),
normalizers.Replace(Regex(' {2,}' ) , ' ' ),
normalizers.Lowercase(),
] )
a__ = pre_tokenizers.Sequence(
[
pre_tokenizers.Metaspace(replacement=_snake_case , add_prefix_space=_snake_case ),
pre_tokenizers.Digits(individual_digits=_snake_case ),
pre_tokenizers.Punctuation(),
] )
a__ = decoders.Metaspace(replacement=_snake_case , add_prefix_space=_snake_case )
a__ = TemplateProcessing(
single=F'''$A {self.special_tokens["eos"]["token"]}''' , special_tokens=[(self.special_tokens['eos']['token'], self.special_tokens['eos']['id'])] , )
a__ = {
'model': 'SentencePieceUnigram',
'replacement': replacement,
'add_prefix_space': add_prefix_space,
}
super().__init__(_snake_case , _snake_case )
def _lowerCAmelCase ( self : Dict , _snake_case : Union[str, List[str]] , _snake_case : int = 8000 , _snake_case : bool = True , ) -> str:
'''simple docstring'''
a__ = trainers.UnigramTrainer(
vocab_size=_snake_case , special_tokens=self.special_tokens_list , show_progress=_snake_case , )
if isinstance(_snake_case , _snake_case ):
a__ = [files]
self._tokenizer.train(_snake_case , trainer=_snake_case )
self.add_unk_id()
def _lowerCAmelCase ( self : Union[str, Any] , _snake_case : Union[Iterator[str], Iterator[Iterator[str]]] , _snake_case : int = 8000 , _snake_case : bool = True , ) -> Optional[Any]:
'''simple docstring'''
a__ = trainers.UnigramTrainer(
vocab_size=_snake_case , special_tokens=self.special_tokens_list , show_progress=_snake_case , )
self._tokenizer.train_from_iterator(_snake_case , trainer=_snake_case )
self.add_unk_id()
def _lowerCAmelCase ( self : List[Any] ) -> int:
'''simple docstring'''
a__ = json.loads(self._tokenizer.to_str() )
a__ = self.special_tokens['unk']['id']
a__ = Tokenizer.from_str(json.dumps(_snake_case ) )
| 232 | 1 |
import colorsys
from PIL import Image # type: ignore
def snake_case ( snake_case__ :float , snake_case__ :float , snake_case__ :int) -> float:
_A = x
_A = y
for step in range(snake_case__): # noqa: B007
_A = a * a - b * b + x
_A = 2 * a * b + y
_A = a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def snake_case ( snake_case__ :float) -> tuple:
if distance == 1:
return (0, 0, 0)
else:
return (255, 255, 255)
def snake_case ( snake_case__ :float) -> tuple:
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 255) for i in colorsys.hsv_to_rgb(snake_case__ , 1 , 1))
def snake_case ( snake_case__ :int = 800 , snake_case__ :int = 600 , snake_case__ :float = -0.6 , snake_case__ :float = 0 , snake_case__ :float = 3.2 , snake_case__ :int = 50 , snake_case__ :bool = True , ) -> Image.Image:
_A = Image.new("""RGB""" , (image_width, image_height))
_A = img.load()
# loop through the image-coordinates
for image_x in range(snake_case__):
for image_y in range(snake_case__):
# determine the figure-coordinates based on the image-coordinates
_A = figure_width / image_width * image_height
_A = figure_center_x + (image_x / image_width - 0.5) * figure_width
_A = figure_center_y + (image_y / image_height - 0.5) * figure_height
_A = get_distance(snake_case__ , snake_case__ , snake_case__)
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
_A = get_color_coded_rgb(snake_case__)
else:
_A = get_black_and_white_rgb(snake_case__)
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
_SCREAMING_SNAKE_CASE = get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show()
| 702 | import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from transformers import (
VisualBertConfig,
VisualBertForMultipleChoice,
VisualBertForPreTraining,
VisualBertForQuestionAnswering,
VisualBertForVisualReasoning,
)
from transformers.utils import logging
logging.set_verbosity_info()
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = [
('bert.bert', 'visual_bert'),
('bert.cls', 'cls'),
('bert.classifier', 'cls'),
('token_type_embeddings_visual', 'visual_token_type_embeddings'),
('position_embeddings_visual', 'visual_position_embeddings'),
('projection', 'visual_projection'),
]
_SCREAMING_SNAKE_CASE = [
'nlvr2_coco_pre_trained.th',
'nlvr2_fine_tuned.th',
'nlvr2_pre_trained.th',
'vcr_coco_pre_train.th',
'vcr_fine_tune.th',
'vcr_pre_train.th',
'vqa_coco_pre_trained.th',
'vqa_fine_tuned.th',
'vqa_pre_trained.th',
]
def snake_case ( snake_case__ :Union[str, Any]) -> Dict:
_A = torch.load(snake_case__ , map_location="""cpu""")
return sd
def snake_case ( snake_case__ :List[str] , snake_case__ :Optional[Any] , snake_case__ :int=rename_keys_prefix) -> Optional[Any]:
_A = OrderedDict()
_A = torch.arange(config.max_position_embeddings).expand((1, -1))
# detector_d = OrderedDict()
for key in d:
if "detector" in key:
# detector_d[key.replace('detector.','')] = d[key]
continue
_A = key
for name_pair in rename_keys_prefix:
_A = new_key.replace(name_pair[0] , name_pair[1])
_A = d[key]
if key == "bert.cls.predictions.decoder.weight":
# Old bert code didn't have `decoder.bias`, but was added separately
_A = new_d["""cls.predictions.bias"""]
return new_d
@torch.no_grad()
def snake_case ( snake_case__ :Tuple , snake_case__ :Tuple) -> int:
assert (
checkpoint_path.split("""/""")[-1] in ACCEPTABLE_CHECKPOINTS
), F'''The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}.'''
# Get Config
if "pre" in checkpoint_path:
_A = """pretraining"""
if "vcr" in checkpoint_path:
_A = {"""visual_embedding_dim""": 512}
elif "vqa_advanced" in checkpoint_path:
_A = {"""visual_embedding_dim""": 2_048}
elif "vqa" in checkpoint_path:
_A = {"""visual_embedding_dim""": 2_048}
elif "nlvr" in checkpoint_path:
_A = {"""visual_embedding_dim""": 1_024}
else:
raise NotImplementedError(F'''No implementation found for `{checkpoint_path}`.''')
else:
if "vcr" in checkpoint_path:
_A = {"""visual_embedding_dim""": 512}
_A = """multichoice"""
elif "vqa_advanced" in checkpoint_path:
_A = {"""visual_embedding_dim""": 2_048}
_A = """vqa_advanced"""
elif "vqa" in checkpoint_path:
_A = {"""visual_embedding_dim""": 2_048, """num_labels""": 3_129}
_A = """vqa"""
elif "nlvr" in checkpoint_path:
_A = {
"""visual_embedding_dim""": 1_024,
"""num_labels""": 2,
}
_A = """nlvr"""
_A = VisualBertConfig(**snake_case__)
# Load State Dict
_A = load_state_dict(snake_case__)
_A = get_new_dict(snake_case__ , snake_case__)
if model_type == "pretraining":
_A = VisualBertForPreTraining(snake_case__)
elif model_type == "vqa":
_A = VisualBertForQuestionAnswering(snake_case__)
elif model_type == "nlvr":
_A = VisualBertForVisualReasoning(snake_case__)
elif model_type == "multichoice":
_A = VisualBertForMultipleChoice(snake_case__)
model.load_state_dict(snake_case__)
# Save Checkpoints
Path(snake_case__).mkdir(exist_ok=snake_case__)
model.save_pretrained(snake_case__)
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument('orig_checkpoint_path', type=str, help='A path to .th on local filesystem.')
parser.add_argument('pytorch_dump_folder_path', type=str, help='Path to the output PyTorch model.')
_SCREAMING_SNAKE_CASE = parser.parse_args()
convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
| 83 | 0 |
'''simple docstring'''
import tempfile
import unittest
import numpy as np
import transformers
from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel
if is_torch_available():
import torch
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self : Union[str, Any] , __A : Dict , __A : Any=1_4 , __A : Union[str, Any]=7 , __A : Dict=True , __A : int=True , __A : int=False , __A : Any=True , __A : str=9_9 , __A : int=3_2 , __A : Optional[Any]=4 , __A : Optional[Any]=4 , __A : int=4 , __A : str=3_7 , __A : str="gelu" , __A : int=0.1 , __A : Tuple=0.1 , __A : Optional[int]=5_1_2 , __A : Optional[Any]=0.0_2 , ):
"""simple docstring"""
_lowercase = parent
_lowercase = batch_size
_lowercase = seq_length
_lowercase = is_training
_lowercase = use_input_mask
_lowercase = use_token_type_ids
_lowercase = use_labels
_lowercase = vocab_size
_lowercase = hidden_size
_lowercase = rotary_dim
_lowercase = num_hidden_layers
_lowercase = num_attention_heads
_lowercase = intermediate_size
_lowercase = hidden_act
_lowercase = hidden_dropout_prob
_lowercase = attention_probs_dropout_prob
_lowercase = max_position_embeddings
_lowercase = initializer_range
_lowercase = None
_lowercase = vocab_size - 1
_lowercase = vocab_size - 1
_lowercase = vocab_size - 1
def snake_case ( self : List[Any] ):
"""simple docstring"""
_lowercase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowercase = None
if self.use_input_mask:
_lowercase = random_attention_mask([self.batch_size, self.seq_length] )
_lowercase = GPTJConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , use_cache=UpperCamelCase__ , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , rotary_dim=self.rotary_dim , )
return (config, input_ids, input_mask)
def snake_case ( self : List[str] ):
"""simple docstring"""
_lowercase = self.prepare_config_and_inputs()
_lowercase = config_and_inputs
_lowercase = {'''input_ids''': input_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
def snake_case ( self : str , __A : List[str] , __A : Dict , __A : List[str] , __A : List[str] ):
"""simple docstring"""
_lowercase = 2_0
_lowercase = model_class_name(UpperCamelCase__ )
_lowercase = model.init_cache(input_ids.shape[0] , UpperCamelCase__ )
_lowercase = jnp.ones((input_ids.shape[0], max_decoder_length) , dtype="i4" )
_lowercase = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
_lowercase = model(
input_ids[:, :-1] , attention_mask=UpperCamelCase__ , past_key_values=UpperCamelCase__ , position_ids=UpperCamelCase__ , )
_lowercase = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype="i4" )
_lowercase = model(
input_ids[:, -1:] , attention_mask=UpperCamelCase__ , past_key_values=outputs_cache.past_key_values , position_ids=UpperCamelCase__ , )
_lowercase = model(UpperCamelCase__ )
_lowercase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f"""Max diff is {diff}""" )
def snake_case ( self : Optional[int] , __A : Any , __A : str , __A : Tuple , __A : Optional[int] ):
"""simple docstring"""
_lowercase = 2_0
_lowercase = model_class_name(UpperCamelCase__ )
_lowercase = jnp.concatenate(
[attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]) )] , axis=-1 , )
_lowercase = model.init_cache(input_ids.shape[0] , UpperCamelCase__ )
_lowercase = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
_lowercase = model(
input_ids[:, :-1] , attention_mask=UpperCamelCase__ , past_key_values=UpperCamelCase__ , position_ids=UpperCamelCase__ , )
_lowercase = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype="i4" )
_lowercase = model(
input_ids[:, -1:] , past_key_values=outputs_cache.past_key_values , attention_mask=UpperCamelCase__ , position_ids=UpperCamelCase__ , )
_lowercase = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ )
_lowercase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f"""Max diff is {diff}""" )
@require_flax
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else ()
UpperCAmelCase__ = (FlaxGPTJForCausalLM,) if is_flax_available() else ()
def snake_case ( self : Tuple ):
"""simple docstring"""
_lowercase = FlaxGPTJModelTester(self )
def snake_case ( self : List[str] ):
"""simple docstring"""
for model_class_name in self.all_model_classes:
_lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def snake_case ( self : Optional[Any] ):
"""simple docstring"""
for model_class_name in self.all_model_classes:
_lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward_with_attn_mask(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
@tooslow
def snake_case ( self : Dict ):
"""simple docstring"""
_lowercase = GPTaTokenizer.from_pretrained("gpt2" , pad_token="<|endoftext|>" , padding_side="left" )
_lowercase = tokenizer(["Hello this is a long string", "Hey"] , return_tensors="np" , padding=UpperCamelCase__ , truncation=UpperCamelCase__ )
_lowercase = FlaxGPTJForCausalLM.from_pretrained("EleutherAI/gpt-j-6B" )
_lowercase = False
_lowercase = model.config.eos_token_id
_lowercase = jax.jit(model.generate )
_lowercase = jit_generate(
inputs["input_ids"] , attention_mask=inputs["attention_mask"] , pad_token_id=tokenizer.pad_token_id ).sequences
_lowercase = tokenizer.batch_decode(UpperCamelCase__ , skip_special_tokens=UpperCamelCase__ )
_lowercase = [
'''Hello this is a long string of text.\n\nI\'m trying to get the text of the''',
'''Hey, I\'m a little late to the party. I\'m going to''',
]
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
@is_pt_flax_cross_test
def snake_case ( self : str ):
"""simple docstring"""
_lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
_lowercase = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ )
_lowercase = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
_lowercase = model_class.__name__[4:] # Skip the "Flax" at the beginning
_lowercase = getattr(UpperCamelCase__ , UpperCamelCase__ )
_lowercase = pt_inputs['''input_ids'''].shape
_lowercase = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(UpperCamelCase__ ):
_lowercase = 0
_lowercase = 1
_lowercase = 0
_lowercase = 1
_lowercase = pt_model_class(UpperCamelCase__ ).eval()
_lowercase = model_class(UpperCamelCase__ , dtype=jnp.floataa )
_lowercase = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , UpperCamelCase__ )
_lowercase = fx_state
with torch.no_grad():
_lowercase = pt_model(**UpperCamelCase__ ).to_tuple()
_lowercase = fx_model(**UpperCamelCase__ ).to_tuple()
self.assertEqual(len(UpperCamelCase__ ) , len(UpperCamelCase__ ) , "Output lengths differ between Flax and PyTorch" )
for fx_output, pt_output in zip(UpperCamelCase__ , UpperCamelCase__ ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(UpperCamelCase__ )
_lowercase = model_class.from_pretrained(UpperCamelCase__ , from_pt=UpperCamelCase__ )
_lowercase = fx_model_loaded(**UpperCamelCase__ ).to_tuple()
self.assertEqual(
len(UpperCamelCase__ ) , len(UpperCamelCase__ ) , "Output lengths differ between Flax and PyTorch" )
for fx_output_loaded, pt_output in zip(UpperCamelCase__ , UpperCamelCase__ ):
self.assert_almost_equals(fx_output_loaded[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
@is_pt_flax_cross_test
def snake_case ( self : str ):
"""simple docstring"""
_lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
_lowercase = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ )
_lowercase = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
_lowercase = model_class.__name__[4:] # Skip the "Flax" at the beginning
_lowercase = getattr(UpperCamelCase__ , UpperCamelCase__ )
_lowercase = pt_model_class(UpperCamelCase__ ).eval()
_lowercase = model_class(UpperCamelCase__ , dtype=jnp.floataa )
_lowercase = load_flax_weights_in_pytorch_model(UpperCamelCase__ , fx_model.params )
_lowercase = pt_inputs['''input_ids'''].shape
_lowercase = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(UpperCamelCase__ ):
_lowercase = 0
_lowercase = 1
_lowercase = 0
_lowercase = 1
# make sure weights are tied in PyTorch
pt_model.tie_weights()
with torch.no_grad():
_lowercase = pt_model(**UpperCamelCase__ ).to_tuple()
_lowercase = fx_model(**UpperCamelCase__ ).to_tuple()
self.assertEqual(len(UpperCamelCase__ ) , len(UpperCamelCase__ ) , "Output lengths differ between Flax and PyTorch" )
for fx_output, pt_output in zip(UpperCamelCase__ , UpperCamelCase__ ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(UpperCamelCase__ )
_lowercase = pt_model_class.from_pretrained(UpperCamelCase__ , from_flax=UpperCamelCase__ )
with torch.no_grad():
_lowercase = pt_model_loaded(**UpperCamelCase__ ).to_tuple()
self.assertEqual(
len(UpperCamelCase__ ) , len(UpperCamelCase__ ) , "Output lengths differ between Flax and PyTorch" )
for fx_output, pt_output in zip(UpperCamelCase__ , UpperCamelCase__ ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
@tooslow
def snake_case ( self : Tuple ):
"""simple docstring"""
for model_class_name in self.all_model_classes:
_lowercase = model_class_name.from_pretrained("EleutherAI/gpt-j-6B" )
_lowercase = model(np.ones((1, 1) ) )
self.assertIsNotNone(UpperCamelCase__ )
| 497 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowercase__ = {
"configuration_poolformer": [
"POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"PoolFormerConfig",
"PoolFormerOnnxConfig",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = ["PoolFormerFeatureExtractor"]
lowercase__ = ["PoolFormerImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = [
"POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"PoolFormerForImageClassification",
"PoolFormerModel",
"PoolFormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_poolformer import (
POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
PoolFormerConfig,
PoolFormerOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_poolformer import PoolFormerFeatureExtractor
from .image_processing_poolformer import PoolFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_poolformer import (
POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
PoolFormerForImageClassification,
PoolFormerModel,
PoolFormerPreTrainedModel,
)
else:
import sys
lowercase__ = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 638 | 0 |
'''simple docstring'''
import inspect
import unittest
from transformers import ConvNextVaConfig
from transformers.models.auto import get_values
from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel
from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class a__:
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=13 , _UpperCAmelCase=32 , _UpperCAmelCase=3 , _UpperCAmelCase=4 , _UpperCAmelCase=[10, 20, 30, 40] , _UpperCAmelCase=[2, 2, 3, 2] , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=37 , _UpperCAmelCase="gelu" , _UpperCAmelCase=10 , _UpperCAmelCase=0.02 , _UpperCAmelCase=["stage2", "stage3", "stage4"] , _UpperCAmelCase=[2, 3, 4] , _UpperCAmelCase=None , ) -> Dict:
snake_case__ =parent
snake_case__ =batch_size
snake_case__ =image_size
snake_case__ =num_channels
snake_case__ =num_stages
snake_case__ =hidden_sizes
snake_case__ =depths
snake_case__ =is_training
snake_case__ =use_labels
snake_case__ =intermediate_size
snake_case__ =hidden_act
snake_case__ =num_labels
snake_case__ =initializer_range
snake_case__ =out_features
snake_case__ =out_indices
snake_case__ =scope
def _lowercase ( self ) -> int:
snake_case__ =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case__ =None
if self.use_labels:
snake_case__ =ids_tensor([self.batch_size] , self.num_labels )
snake_case__ =self.get_config()
return config, pixel_values, labels
def _lowercase ( self ) -> List[str]:
return ConvNextVaConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=_UpperCamelCase , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def _lowercase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Tuple:
snake_case__ =ConvNextVaModel(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
snake_case__ =model(_UpperCamelCase )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def _lowercase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> List[Any]:
snake_case__ =ConvNextVaForImageClassification(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
snake_case__ =model(_UpperCamelCase , labels=_UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowercase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> List[Any]:
snake_case__ =ConvNextVaBackbone(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
snake_case__ =model(_UpperCamelCase )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
snake_case__ =None
snake_case__ =ConvNextVaBackbone(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
snake_case__ =model(_UpperCamelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def _lowercase ( self ) -> Optional[int]:
snake_case__ =self.prepare_config_and_inputs()
snake_case__ =config_and_inputs
snake_case__ ={"""pixel_values""": pixel_values}
return config, inputs_dict
def _lowercase ( self ) -> Tuple:
snake_case__ =self.prepare_config_and_inputs()
snake_case__ =config_and_inputs
snake_case__ ={"""pixel_values""": pixel_values, """labels""": labels}
return config, inputs_dict
@require_torch
class a__( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ):
a_ : Optional[int] = (
(
ConvNextVaModel,
ConvNextVaForImageClassification,
ConvNextVaBackbone,
)
if is_torch_available()
else ()
)
a_ : Tuple = (
{'''feature-extraction''': ConvNextVaModel, '''image-classification''': ConvNextVaForImageClassification}
if is_torch_available()
else {}
)
a_ : Tuple = False
a_ : Tuple = False
a_ : List[Any] = False
a_ : int = False
a_ : List[Any] = False
def _lowercase ( self ) -> Union[str, Any]:
snake_case__ =ConvNextVaModelTester(self )
snake_case__ =ConfigTester(self , config_class=_UpperCamelCase , has_text_modality=_UpperCamelCase , hidden_size=37 )
def _lowercase ( self ) -> str:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _lowercase ( self ) -> Any:
return
@unittest.skip(reason='ConvNextV2 does not use inputs_embeds' )
def _lowercase ( self ) -> Union[str, Any]:
pass
@unittest.skip(reason='ConvNextV2 does not support input and output embeddings' )
def _lowercase ( self ) -> Dict:
pass
@unittest.skip(reason='ConvNextV2 does not use feedforward chunking' )
def _lowercase ( self ) -> Union[str, Any]:
pass
def _lowercase ( self ) -> Dict:
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
snake_case__ =self.model_tester.prepare_config_and_inputs_with_labels()
snake_case__ =True
if model_class.__name__ in [
*get_values(_UpperCamelCase ),
*get_values(_UpperCamelCase ),
]:
continue
snake_case__ =model_class(_UpperCamelCase )
model.to(_UpperCamelCase )
model.train()
snake_case__ =self._prepare_for_class(_UpperCamelCase , _UpperCamelCase , return_labels=_UpperCamelCase )
snake_case__ =model(**_UpperCamelCase ).loss
loss.backward()
def _lowercase ( self ) -> Optional[int]:
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
snake_case__ =self.model_tester.prepare_config_and_inputs_with_labels()
snake_case__ =False
snake_case__ =True
if (
model_class.__name__
in [*get_values(_UpperCamelCase ), *get_values(_UpperCamelCase )]
or not model_class.supports_gradient_checkpointing
):
continue
snake_case__ =model_class(_UpperCamelCase )
model.to(_UpperCamelCase )
model.gradient_checkpointing_enable()
model.train()
snake_case__ =self._prepare_for_class(_UpperCamelCase , _UpperCamelCase , return_labels=_UpperCamelCase )
snake_case__ =model(**_UpperCamelCase ).loss
loss.backward()
def _lowercase ( self ) -> Union[str, Any]:
snake_case__ =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ =model_class(_UpperCamelCase )
snake_case__ =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case__ =[*signature.parameters.keys()]
snake_case__ =["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _UpperCamelCase )
def _lowercase ( self ) -> str:
snake_case__ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCamelCase )
def _lowercase ( self ) -> Dict:
def check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
snake_case__ =model_class(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
with torch.no_grad():
snake_case__ =model(**self._prepare_for_class(_UpperCamelCase , _UpperCamelCase ) )
snake_case__ =outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
snake_case__ =self.model_tester.num_stages
self.assertEqual(len(_UpperCamelCase ) , expected_num_stages + 1 )
# ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
snake_case__ =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ =True
check_hidden_states_output(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case__ =True
check_hidden_states_output(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
def _lowercase ( self ) -> Tuple:
snake_case__ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_UpperCamelCase )
@slow
def _lowercase ( self ) -> Union[str, Any]:
for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case__ =ConvNextVaModel.from_pretrained(_UpperCamelCase )
self.assertIsNotNone(_UpperCamelCase )
def a ( ) -> Union[str, Any]:
snake_case__ =Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class a__( unittest.TestCase ):
@cached_property
def _lowercase ( self ) -> Tuple:
return AutoImageProcessor.from_pretrained('facebook/convnextv2-tiny-1k-224' ) if is_vision_available() else None
@slow
def _lowercase ( self ) -> int:
snake_case__ =ConvNextVaForImageClassification.from_pretrained('facebook/convnextv2-tiny-1k-224' ).to(_UpperCamelCase )
snake_case__ =self.default_image_processor
snake_case__ =prepare_img()
snake_case__ =preprocessor(images=_UpperCamelCase , return_tensors='pt' ).to(_UpperCamelCase )
# forward pass
with torch.no_grad():
snake_case__ =model(**_UpperCamelCase )
# verify the logits
snake_case__ =torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _UpperCamelCase )
snake_case__ =torch.tensor([0.9_996, 0.1_966, -0.4_386] ).to(_UpperCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _UpperCamelCase , atol=1E-4 ) )
| 715 |
'''simple docstring'''
import io
import json
import fsspec
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.json import JsonDatasetReader, JsonDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def a ( UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Dict ) -> Any:
assert isinstance(UpperCamelCase_ , UpperCamelCase_ )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('keep_in_memory' , [False, True] )
def a ( UpperCamelCase_ : Dict , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Union[str, Any] ) -> Union[str, Any]:
snake_case__ =tmp_path / 'cache'
snake_case__ ={'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
snake_case__ =JsonDatasetReader(UpperCamelCase_ , cache_dir=UpperCamelCase_ , keep_in_memory=UpperCamelCase_ ).read()
_check_json_dataset(UpperCamelCase_ , UpperCamelCase_ )
@pytest.mark.parametrize(
'features' , [
None,
{'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'},
{'col_1': 'string', 'col_2': 'string', 'col_3': 'string'},
{'col_1': 'int32', 'col_2': 'int32', 'col_3': 'int32'},
{'col_1': 'float32', 'col_2': 'float32', 'col_3': 'float32'},
] , )
def a ( UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Dict , UpperCamelCase_ : List[Any] ) -> Optional[int]:
snake_case__ =tmp_path / 'cache'
snake_case__ ={'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
snake_case__ =features.copy() if features else default_expected_features
snake_case__ =(
Features({feature: Value(UpperCamelCase_ ) for feature, dtype in features.items()} ) if features is not None else None
)
snake_case__ =JsonDatasetReader(UpperCamelCase_ , features=UpperCamelCase_ , cache_dir=UpperCamelCase_ ).read()
_check_json_dataset(UpperCamelCase_ , UpperCamelCase_ )
@pytest.mark.parametrize(
'features' , [
None,
{'col_3': 'float64', 'col_1': 'string', 'col_2': 'int64'},
] , )
def a ( UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : List[Any] ) -> List[str]:
snake_case__ =tmp_path / 'cache'
snake_case__ ={'col_3': 'float64', 'col_1': 'string', 'col_2': 'int64'}
snake_case__ =features.copy() if features else default_expected_features
snake_case__ =(
Features({feature: Value(UpperCamelCase_ ) for feature, dtype in features.items()} ) if features is not None else None
)
snake_case__ =JsonDatasetReader(UpperCamelCase_ , features=UpperCamelCase_ , cache_dir=UpperCamelCase_ ).read()
assert isinstance(UpperCamelCase_ , UpperCamelCase_ )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_3", "col_1", "col_2"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
def a ( UpperCamelCase_ : List[str] , UpperCamelCase_ : List[Any] ) -> str:
# jsonl_312_path features are {"col_3": "float64", "col_1": "string", "col_2": "int64"}
snake_case__ ={'col_2': 'int64', 'col_3': 'float64', 'col_1': 'string'}
snake_case__ =features.copy()
snake_case__ =(
Features({feature: Value(UpperCamelCase_ ) for feature, dtype in features.items()} ) if features is not None else None
)
snake_case__ =tmp_path / 'cache'
snake_case__ =JsonDatasetReader(UpperCamelCase_ , features=UpperCamelCase_ , cache_dir=UpperCamelCase_ ).read()
assert isinstance(UpperCamelCase_ , UpperCamelCase_ )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_2", "col_3", "col_1"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('split' , [None, NamedSplit('train' ), 'train', 'test'] )
def a ( UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : List[Any] ) -> Any:
snake_case__ =tmp_path / 'cache'
snake_case__ ={'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
snake_case__ =JsonDatasetReader(UpperCamelCase_ , cache_dir=UpperCamelCase_ , split=UpperCamelCase_ ).read()
_check_json_dataset(UpperCamelCase_ , UpperCamelCase_ )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('path_type' , [str, list] )
def a ( UpperCamelCase_ : List[str] , UpperCamelCase_ : str , UpperCamelCase_ : Optional[Any] ) -> str:
if issubclass(UpperCamelCase_ , UpperCamelCase_ ):
snake_case__ =jsonl_path
elif issubclass(UpperCamelCase_ , UpperCamelCase_ ):
snake_case__ =[jsonl_path]
snake_case__ =tmp_path / 'cache'
snake_case__ ={'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
snake_case__ =JsonDatasetReader(UpperCamelCase_ , cache_dir=UpperCamelCase_ ).read()
_check_json_dataset(UpperCamelCase_ , UpperCamelCase_ )
def a ( UpperCamelCase_ : int , UpperCamelCase_ : str , UpperCamelCase_ : str=("train",) ) -> str:
assert isinstance(UpperCamelCase_ , UpperCamelCase_ )
for split in splits:
snake_case__ =dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('keep_in_memory' , [False, True] )
def a ( UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Any , UpperCamelCase_ : str ) -> List[Any]:
snake_case__ =tmp_path / 'cache'
snake_case__ ={'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
snake_case__ =JsonDatasetReader({'train': jsonl_path} , cache_dir=UpperCamelCase_ , keep_in_memory=UpperCamelCase_ ).read()
_check_json_datasetdict(UpperCamelCase_ , UpperCamelCase_ )
@pytest.mark.parametrize(
'features' , [
None,
{'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'},
{'col_1': 'string', 'col_2': 'string', 'col_3': 'string'},
{'col_1': 'int32', 'col_2': 'int32', 'col_3': 'int32'},
{'col_1': 'float32', 'col_2': 'float32', 'col_3': 'float32'},
] , )
def a ( UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : str , UpperCamelCase_ : Optional[Any] ) -> Optional[int]:
snake_case__ =tmp_path / 'cache'
snake_case__ ={'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
snake_case__ =features.copy() if features else default_expected_features
snake_case__ =(
Features({feature: Value(UpperCamelCase_ ) for feature, dtype in features.items()} ) if features is not None else None
)
snake_case__ =JsonDatasetReader({'train': jsonl_path} , features=UpperCamelCase_ , cache_dir=UpperCamelCase_ ).read()
_check_json_datasetdict(UpperCamelCase_ , UpperCamelCase_ )
@pytest.mark.parametrize('split' , [None, NamedSplit('train' ), 'train', 'test'] )
def a ( UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Any ) -> Any:
if split:
snake_case__ ={split: jsonl_path}
else:
snake_case__ ='train'
snake_case__ ={'train': jsonl_path, 'test': jsonl_path}
snake_case__ =tmp_path / 'cache'
snake_case__ ={'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
snake_case__ =JsonDatasetReader(UpperCamelCase_ , cache_dir=UpperCamelCase_ ).read()
_check_json_datasetdict(UpperCamelCase_ , UpperCamelCase_ , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def a ( UpperCamelCase_ : Union[str, Any] ) -> int:
return json.load(UpperCamelCase_ )
def a ( UpperCamelCase_ : str ) -> Union[str, Any]:
return [json.loads(UpperCamelCase_ ) for line in buffer]
class a__:
@pytest.mark.parametrize('lines, load_json_function' , [(True, load_json_lines), (False, load_json)] )
def _lowercase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> str:
with io.BytesIO() as buffer:
JsonDatasetWriter(_UpperCAmelCase , _UpperCAmelCase , lines=_UpperCAmelCase ).write()
buffer.seek(0 )
snake_case__ =load_json_function(_UpperCAmelCase )
assert isinstance(_UpperCAmelCase , _UpperCAmelCase )
assert isinstance(exported_content[0] , _UpperCAmelCase )
assert len(_UpperCAmelCase ) == 10
@pytest.mark.parametrize(
'orient, container, keys, len_at' , [
('records', list, {'tokens', 'labels', 'answers', 'id'}, None),
('split', dict, {'columns', 'data'}, 'data'),
('index', dict, set('0123456789' ), None),
('columns', dict, {'tokens', 'labels', 'answers', 'id'}, 'tokens'),
('values', list, None, None),
('table', dict, {'schema', 'data'}, 'data'),
] , )
def _lowercase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Dict:
with io.BytesIO() as buffer:
JsonDatasetWriter(_UpperCAmelCase , _UpperCAmelCase , lines=_UpperCAmelCase , orient=_UpperCAmelCase ).write()
buffer.seek(0 )
snake_case__ =load_json(_UpperCAmelCase )
assert isinstance(_UpperCAmelCase , _UpperCAmelCase )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(_UpperCAmelCase , 'keys' ) and not hasattr(exported_content[0] , 'keys' )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(_UpperCAmelCase ) == 10
@pytest.mark.parametrize('lines, load_json_function' , [(True, load_json_lines), (False, load_json)] )
def _lowercase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Tuple:
with io.BytesIO() as buffer:
JsonDatasetWriter(_UpperCAmelCase , _UpperCAmelCase , lines=_UpperCAmelCase , num_proc=2 ).write()
buffer.seek(0 )
snake_case__ =load_json_function(_UpperCAmelCase )
assert isinstance(_UpperCAmelCase , _UpperCAmelCase )
assert isinstance(exported_content[0] , _UpperCAmelCase )
assert len(_UpperCAmelCase ) == 10
@pytest.mark.parametrize(
'orient, container, keys, len_at' , [
('records', list, {'tokens', 'labels', 'answers', 'id'}, None),
('split', dict, {'columns', 'data'}, 'data'),
('index', dict, set('0123456789' ), None),
('columns', dict, {'tokens', 'labels', 'answers', 'id'}, 'tokens'),
('values', list, None, None),
('table', dict, {'schema', 'data'}, 'data'),
] , )
def _lowercase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Dict:
with io.BytesIO() as buffer:
JsonDatasetWriter(_UpperCAmelCase , _UpperCAmelCase , lines=_UpperCAmelCase , orient=_UpperCAmelCase , num_proc=2 ).write()
buffer.seek(0 )
snake_case__ =load_json(_UpperCAmelCase )
assert isinstance(_UpperCAmelCase , _UpperCAmelCase )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(_UpperCAmelCase , 'keys' ) and not hasattr(exported_content[0] , 'keys' )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(_UpperCAmelCase ) == 10
def _lowercase ( self , _UpperCAmelCase ) -> List[str]:
with pytest.raises(_UpperCAmelCase ):
with io.BytesIO() as buffer:
JsonDatasetWriter(_UpperCAmelCase , _UpperCAmelCase , num_proc=0 )
@pytest.mark.parametrize('compression, extension' , [('gzip', 'gz'), ('bz2', 'bz2'), ('xz', 'xz')] )
def _lowercase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> List[str]:
snake_case__ =tmp_path_factory.mktemp('data' ) / f"""test.json.{extension}"""
snake_case__ =str(shared_datadir / f"""test_file.json.{extension}""" )
JsonDatasetWriter(_UpperCAmelCase , _UpperCAmelCase , compression=_UpperCAmelCase ).write()
with fsspec.open(_UpperCAmelCase , 'rb' , compression='infer' ) as f:
snake_case__ =f.read()
with fsspec.open(_UpperCAmelCase , 'rb' , compression='infer' ) as f:
snake_case__ =f.read()
assert exported_content == original_content
| 581 | 0 |
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _A ( snake_case , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : str = MgpstrTokenizer
__lowerCamelCase : Tuple = False
__lowerCamelCase : int = {}
__lowerCamelCase : Optional[int] = False
def snake_case_ ( self ):
'''simple docstring'''
super().setUp()
# fmt: off
snake_case : Optional[int] = ["""[GO]""", """[s]""", """0""", """1""", """2""", """3""", """4""", """5""", """6""", """7""", """8""", """9""", """a""", """b""", """c""", """d""", """e""", """f""", """g""", """h""", """i""", """j""", """k""", """l""", """m""", """n""", """o""", """p""", """q""", """r""", """s""", """t""", """u""", """v""", """w""", """x""", """y""", """z"""]
# fmt: on
snake_case : Union[str, Any] = dict(zip(SCREAMING_SNAKE_CASE_ ,range(len(SCREAMING_SNAKE_CASE_ ) ) ) )
snake_case : Dict = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file ,"""w""" ,encoding="""utf-8""" ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE_ ) + """\n""" )
def snake_case_ ( self ,**SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
return MgpstrTokenizer.from_pretrained(self.tmpdirname ,**SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case : str = """tester"""
snake_case : Tuple = """tester"""
return input_text, output_text
@unittest.skip("""MGP-STR always lower cases letters.""" )
def snake_case_ ( self ):
'''simple docstring'''
pass
def snake_case_ ( self ):
'''simple docstring'''
snake_case : Union[str, Any] = self.get_tokenizers(do_lower_case=SCREAMING_SNAKE_CASE_ )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
snake_case : Union[str, Any] = """[SPECIAL_TOKEN]"""
tokenizer.add_special_tokens({"""cls_token""": special_token} )
snake_case : str = tokenizer.encode([special_token] ,add_special_tokens=SCREAMING_SNAKE_CASE_ )
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) ,1 )
snake_case : Dict = tokenizer.decode(SCREAMING_SNAKE_CASE_ ,skip_special_tokens=SCREAMING_SNAKE_CASE_ )
self.assertTrue(special_token not in decoded )
def snake_case_ ( self ):
'''simple docstring'''
snake_case : Tuple = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
snake_case , snake_case : List[str] = self.get_input_output_texts(SCREAMING_SNAKE_CASE_ )
snake_case : Any = tokenizer.tokenize(SCREAMING_SNAKE_CASE_ )
snake_case : str = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ )
snake_case : Optional[Any] = tokenizer.encode(SCREAMING_SNAKE_CASE_ ,add_special_tokens=SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
snake_case : Dict = tokenizer.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_ )
self.assertNotEqual(len(SCREAMING_SNAKE_CASE_ ) ,0 )
snake_case : Any = tokenizer.decode(SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
self.assertEqual(text_a.replace(""" """ ,"""""" ) ,SCREAMING_SNAKE_CASE_ )
@unittest.skip("""MGP-STR tokenizer only handles one sequence.""" )
def snake_case_ ( self ):
'''simple docstring'''
pass
@unittest.skip("""inputs cannot be pretokenized in MgpstrTokenizer""" )
def snake_case_ ( self ):
'''simple docstring'''
pass
| 36 |
import numpy as np
def lowercase ( __A : np.array ) -> np.array:
'''simple docstring'''
return (2 / (1 + np.exp(-2 * vector ))) - 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 36 | 1 |
'''simple docstring'''
class __a :
def __init__( self : Dict ) -> str:
"""simple docstring"""
UpperCAmelCase_ : str = ''''''
UpperCAmelCase_ : List[Any] = ''''''
UpperCAmelCase_ : Optional[int] = []
def UpperCAmelCase__ ( self : List[Any] , __magic_name__ : int , __magic_name__ : int ) -> int:
"""simple docstring"""
if m == -1:
return n + 1
elif n == -1:
return m + 1
elif self.dp[m][n] > -1:
return self.dp[m][n]
else:
if self.worda[m] == self.worda[n]:
UpperCAmelCase_ : Union[str, Any] = self.__min_dist_top_down_dp(m - 1 , n - 1 )
else:
UpperCAmelCase_ : List[str] = self.__min_dist_top_down_dp(__UpperCamelCase , n - 1 )
UpperCAmelCase_ : Dict = self.__min_dist_top_down_dp(m - 1 , __UpperCamelCase )
UpperCAmelCase_ : Union[str, Any] = self.__min_dist_top_down_dp(m - 1 , n - 1 )
UpperCAmelCase_ : int = 1 + min(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
return self.dp[m][n]
def UpperCAmelCase__ ( self : List[Any] , __magic_name__ : str , __magic_name__ : str ) -> int:
"""simple docstring"""
UpperCAmelCase_ : Dict = worda
UpperCAmelCase_ : Tuple = worda
UpperCAmelCase_ : List[Any] = [[-1 for _ in range(len(__UpperCamelCase ) )] for _ in range(len(__UpperCamelCase ) )]
return self.__min_dist_top_down_dp(len(__UpperCamelCase ) - 1 , len(__UpperCamelCase ) - 1 )
def UpperCAmelCase__ ( self : Tuple , __magic_name__ : str , __magic_name__ : str ) -> int:
"""simple docstring"""
UpperCAmelCase_ : str = worda
UpperCAmelCase_ : Optional[Any] = worda
UpperCAmelCase_ : Dict = len(__UpperCamelCase )
UpperCAmelCase_ : Tuple = len(__UpperCamelCase )
UpperCAmelCase_ : Optional[int] = [[0 for _ in range(n + 1 )] for _ in range(m + 1 )]
for i in range(m + 1 ):
for j in range(n + 1 ):
if i == 0: # first string is empty
UpperCAmelCase_ : Dict = j
elif j == 0: # second string is empty
UpperCAmelCase_ : Dict = i
elif worda[i - 1] == worda[j - 1]: # last characters are equal
UpperCAmelCase_ : int = self.dp[i - 1][j - 1]
else:
UpperCAmelCase_ : Optional[Any] = self.dp[i][j - 1]
UpperCAmelCase_ : int = self.dp[i - 1][j]
UpperCAmelCase_ : List[Any] = self.dp[i - 1][j - 1]
UpperCAmelCase_ : Any = 1 + min(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
return self.dp[m][n]
if __name__ == "__main__":
lowerCamelCase : List[Any] = EditDistance()
print("****************** Testing Edit Distance DP Algorithm ******************")
print()
lowerCamelCase : Dict = input("Enter the first string: ").strip()
lowerCamelCase : str = input("Enter the second string: ").strip()
print()
print(f'''The minimum edit distance is: {solver.min_dist_top_down(Sa, Sa)}''')
print(f'''The minimum edit distance is: {solver.min_dist_bottom_up(Sa, Sa)}''')
print()
print("*************** End of Testing Edit Distance DP Algorithm ***************")
| 707 |
'''simple docstring'''
import json
import os
import unittest
from transformers.models.blenderbot_small.tokenization_blenderbot_small import (
VOCAB_FILES_NAMES,
BlenderbotSmallTokenizer,
)
from ...test_tokenization_common import TokenizerTesterMixin
class __a (lowerCamelCase , unittest.TestCase ):
__a : List[str] = BlenderbotSmallTokenizer
__a : List[Any] = False
def UpperCAmelCase__ ( self : str ) -> str:
"""simple docstring"""
super().setUp()
UpperCAmelCase_ : Tuple = ['''__start__''', '''adapt''', '''act''', '''ap@@''', '''te''', '''__end__''', '''__unk__''']
UpperCAmelCase_ : Optional[Any] = dict(zip(__magic_name__ , range(len(__magic_name__ ) ) ) )
UpperCAmelCase_ : int = ['''#version: 0.2''', '''a p''', '''t e</w>''', '''ap t</w>''', '''a d''', '''ad apt</w>''', '''a c''', '''ac t</w>''', '''''']
UpperCAmelCase_ : Optional[Any] = {'''unk_token''': '''__unk__''', '''bos_token''': '''__start__''', '''eos_token''': '''__end__'''}
UpperCAmelCase_ : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
UpperCAmelCase_ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__magic_name__ ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(__magic_name__ ) )
def UpperCAmelCase__ ( self : List[Any] , **__magic_name__ : Dict ) -> Tuple:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return BlenderbotSmallTokenizer.from_pretrained(self.tmpdirname , **__magic_name__ )
def UpperCAmelCase__ ( self : Optional[int] , __magic_name__ : List[str] ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ : str = '''adapt act apte'''
UpperCAmelCase_ : Tuple = '''adapt act apte'''
return input_text, output_text
def UpperCAmelCase__ ( self : str ) -> Any:
"""simple docstring"""
UpperCAmelCase_ : str = BlenderbotSmallTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
UpperCAmelCase_ : List[Any] = '''adapt act apte'''
UpperCAmelCase_ : Dict = ['''adapt''', '''act''', '''ap@@''', '''te''']
UpperCAmelCase_ : Dict = tokenizer.tokenize(__magic_name__ )
self.assertListEqual(__magic_name__ , __magic_name__ )
UpperCAmelCase_ : Tuple = [tokenizer.bos_token] + tokens + [tokenizer.eos_token]
UpperCAmelCase_ : Dict = [0, 1, 2, 3, 4, 5]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__magic_name__ ) , __magic_name__ )
def UpperCAmelCase__ ( self : int ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ : List[Any] = BlenderbotSmallTokenizer.from_pretrained('''facebook/blenderbot-90M''' )
assert tok('''sam''' ).input_ids == [13_84]
UpperCAmelCase_ : Optional[int] = '''I am a small frog.'''
UpperCAmelCase_ : List[str] = tok([src_text] , padding=__magic_name__ , truncation=__magic_name__ )['''input_ids''']
UpperCAmelCase_ : Dict = tok.batch_decode(__magic_name__ , skip_special_tokens=__magic_name__ , clean_up_tokenization_spaces=__magic_name__ )[0]
assert src_text != decoded # I wish it did!
assert decoded == "i am a small frog ."
def UpperCAmelCase__ ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase_ : int = BlenderbotSmallTokenizer.from_pretrained('''facebook/blenderbot-90M''' )
UpperCAmelCase_ : List[Any] = '''I am a small frog .'''
UpperCAmelCase_ : Any = '''.'''
UpperCAmelCase_ : List[Any] = tok(__magic_name__ )['''input_ids''']
UpperCAmelCase_ : Optional[int] = tok(__magic_name__ )['''input_ids''']
assert encoded[-1] == encoded_dot[0]
| 644 | 0 |
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class _lowerCamelCase ( UpperCamelCase_ ):
__a = (DPMSolverSinglestepScheduler,)
__a = (("num_inference_steps", 25),)
def UpperCamelCase_ ( self , **lowerCAmelCase ) -> str:
SCREAMING_SNAKE_CASE__: Any= {
'''num_train_timesteps''': 1000,
'''beta_start''': 0.0001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''solver_order''': 2,
'''prediction_type''': '''epsilon''',
'''thresholding''': False,
'''sample_max_value''': 1.0,
'''algorithm_type''': '''dpmsolver++''',
'''solver_type''': '''midpoint''',
'''lambda_min_clipped''': -float('''inf''' ),
'''variance_type''': None,
}
config.update(**lowerCAmelCase )
return config
def UpperCamelCase_ ( self , lowerCAmelCase=0 , **lowerCAmelCase ) -> Any:
SCREAMING_SNAKE_CASE__: Union[str, Any]= dict(self.forward_default_kwargs )
SCREAMING_SNAKE_CASE__: Tuple= kwargs.pop('''num_inference_steps''' , lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Tuple= self.dummy_sample
SCREAMING_SNAKE_CASE__: List[Any]= 0.1 * sample
SCREAMING_SNAKE_CASE__: Optional[Any]= [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
SCREAMING_SNAKE_CASE__: str= self.get_scheduler_config(**lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Dict= scheduler_class(**lowerCAmelCase )
scheduler.set_timesteps(lowerCAmelCase )
# copy over dummy past residuals
SCREAMING_SNAKE_CASE__: Tuple= dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: List[str]= scheduler_class.from_pretrained(lowerCAmelCase )
new_scheduler.set_timesteps(lowerCAmelCase )
# copy over dummy past residuals
SCREAMING_SNAKE_CASE__: int= dummy_past_residuals[: new_scheduler.config.solver_order]
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: Dict= sample, sample
for t in range(lowerCAmelCase , time_step + scheduler.config.solver_order + 1 ):
SCREAMING_SNAKE_CASE__: str= scheduler.step(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , **lowerCAmelCase ).prev_sample
SCREAMING_SNAKE_CASE__: str= new_scheduler.step(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , **lowerCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def UpperCamelCase_ ( self ) -> Optional[Any]:
pass
def UpperCamelCase_ ( self , lowerCAmelCase=0 , **lowerCAmelCase ) -> Dict:
SCREAMING_SNAKE_CASE__: Optional[int]= dict(self.forward_default_kwargs )
SCREAMING_SNAKE_CASE__: List[Any]= kwargs.pop('''num_inference_steps''' , lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Tuple= self.dummy_sample
SCREAMING_SNAKE_CASE__: Dict= 0.1 * sample
SCREAMING_SNAKE_CASE__: Optional[Any]= [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
SCREAMING_SNAKE_CASE__: Optional[Any]= self.get_scheduler_config()
SCREAMING_SNAKE_CASE__: Tuple= scheduler_class(**lowerCAmelCase )
scheduler.set_timesteps(lowerCAmelCase )
# copy over dummy past residuals (must be after setting timesteps)
SCREAMING_SNAKE_CASE__: Tuple= dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: List[Any]= scheduler_class.from_pretrained(lowerCAmelCase )
# copy over dummy past residuals
new_scheduler.set_timesteps(lowerCAmelCase )
# copy over dummy past residual (must be after setting timesteps)
SCREAMING_SNAKE_CASE__: Union[str, Any]= dummy_past_residuals[: new_scheduler.config.solver_order]
SCREAMING_SNAKE_CASE__: Dict= scheduler.step(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , **lowerCAmelCase ).prev_sample
SCREAMING_SNAKE_CASE__: str= new_scheduler.step(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , **lowerCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def UpperCamelCase_ ( self , lowerCAmelCase=None , **lowerCAmelCase ) -> List[Any]:
if scheduler is None:
SCREAMING_SNAKE_CASE__: Dict= self.scheduler_classes[0]
SCREAMING_SNAKE_CASE__: Dict= self.get_scheduler_config(**lowerCAmelCase )
SCREAMING_SNAKE_CASE__: List[str]= scheduler_class(**lowerCAmelCase )
SCREAMING_SNAKE_CASE__: str= self.scheduler_classes[0]
SCREAMING_SNAKE_CASE__: Tuple= self.get_scheduler_config(**lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Optional[Any]= scheduler_class(**lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Tuple= 10
SCREAMING_SNAKE_CASE__: Optional[Any]= self.dummy_model()
SCREAMING_SNAKE_CASE__: List[Any]= self.dummy_sample_deter
scheduler.set_timesteps(lowerCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
SCREAMING_SNAKE_CASE__: str= model(lowerCAmelCase , lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Tuple= scheduler.step(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ).prev_sample
return sample
def UpperCamelCase_ ( self ) -> List[str]:
SCREAMING_SNAKE_CASE__: Union[str, Any]= DPMSolverSinglestepScheduler(**self.get_scheduler_config() )
SCREAMING_SNAKE_CASE__: Tuple= 50
SCREAMING_SNAKE_CASE__: List[Any]= self.dummy_model()
SCREAMING_SNAKE_CASE__: List[str]= self.dummy_sample_deter
scheduler.set_timesteps(lowerCAmelCase )
# make sure that the first t is uneven
for i, t in enumerate(scheduler.timesteps[3:] ):
SCREAMING_SNAKE_CASE__: Dict= model(lowerCAmelCase , lowerCAmelCase )
SCREAMING_SNAKE_CASE__: str= scheduler.step(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ).prev_sample
SCREAMING_SNAKE_CASE__: Union[str, Any]= torch.mean(torch.abs(lowerCAmelCase ) )
assert abs(result_mean.item() - 0.2574 ) < 1e-3
def UpperCamelCase_ ( self ) -> str:
for timesteps in [25, 50, 100, 999, 1000]:
self.check_over_configs(num_train_timesteps=lowerCAmelCase )
def UpperCamelCase_ ( self ) -> int:
# make sure that iterating over schedulers with same config names gives same results
# for defaults
SCREAMING_SNAKE_CASE__: Optional[int]= DPMSolverSinglestepScheduler(**self.get_scheduler_config() )
SCREAMING_SNAKE_CASE__: int= self.full_loop(scheduler=lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Optional[int]= torch.mean(torch.abs(lowerCAmelCase ) )
assert abs(result_mean.item() - 0.2791 ) < 1e-3
SCREAMING_SNAKE_CASE__: Tuple= DEISMultistepScheduler.from_config(scheduler.config )
SCREAMING_SNAKE_CASE__: str= DPMSolverMultistepScheduler.from_config(scheduler.config )
SCREAMING_SNAKE_CASE__: Dict= UniPCMultistepScheduler.from_config(scheduler.config )
SCREAMING_SNAKE_CASE__: Union[str, Any]= DPMSolverSinglestepScheduler.from_config(scheduler.config )
SCREAMING_SNAKE_CASE__: List[Any]= self.full_loop(scheduler=lowerCAmelCase )
SCREAMING_SNAKE_CASE__: int= torch.mean(torch.abs(lowerCAmelCase ) )
assert abs(result_mean.item() - 0.2791 ) < 1e-3
def UpperCamelCase_ ( self ) -> int:
self.check_over_configs(thresholding=lowerCAmelCase )
for order in [1, 2, 3]:
for solver_type in ["midpoint", "heun"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=lowerCAmelCase , prediction_type=lowerCAmelCase , sample_max_value=lowerCAmelCase , algorithm_type='''dpmsolver++''' , solver_order=lowerCAmelCase , solver_type=lowerCAmelCase , )
def UpperCamelCase_ ( self ) -> Any:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCAmelCase )
def UpperCamelCase_ ( self ) -> int:
for algorithm_type in ["dpmsolver", "dpmsolver++"]:
for solver_type in ["midpoint", "heun"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=lowerCAmelCase , solver_type=lowerCAmelCase , prediction_type=lowerCAmelCase , algorithm_type=lowerCAmelCase , )
SCREAMING_SNAKE_CASE__: int= self.full_loop(
solver_order=lowerCAmelCase , solver_type=lowerCAmelCase , prediction_type=lowerCAmelCase , algorithm_type=lowerCAmelCase , )
assert not torch.isnan(lowerCAmelCase ).any(), "Samples have nan numbers"
def UpperCamelCase_ ( self ) -> int:
self.check_over_configs(lower_order_final=lowerCAmelCase )
self.check_over_configs(lower_order_final=lowerCAmelCase )
def UpperCamelCase_ ( self ) -> Dict:
self.check_over_configs(lambda_min_clipped=-float('''inf''' ) )
self.check_over_configs(lambda_min_clipped=-5.1 )
def UpperCamelCase_ ( self ) -> Union[str, Any]:
self.check_over_configs(variance_type=lowerCAmelCase )
self.check_over_configs(variance_type='''learned_range''' )
def UpperCamelCase_ ( self ) -> Union[str, Any]:
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]:
self.check_over_forward(num_inference_steps=lowerCAmelCase , time_step=0 )
def UpperCamelCase_ ( self ) -> List[str]:
SCREAMING_SNAKE_CASE__: List[str]= self.full_loop()
SCREAMING_SNAKE_CASE__: List[Any]= torch.mean(torch.abs(lowerCAmelCase ) )
assert abs(result_mean.item() - 0.2791 ) < 1e-3
def UpperCamelCase_ ( self ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__: Optional[int]= self.full_loop(use_karras_sigmas=lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Dict= torch.mean(torch.abs(lowerCAmelCase ) )
assert abs(result_mean.item() - 0.2248 ) < 1e-3
def UpperCamelCase_ ( self ) -> Dict:
SCREAMING_SNAKE_CASE__: Any= self.full_loop(prediction_type='''v_prediction''' )
SCREAMING_SNAKE_CASE__: Union[str, Any]= torch.mean(torch.abs(lowerCAmelCase ) )
assert abs(result_mean.item() - 0.1453 ) < 1e-3
def UpperCamelCase_ ( self ) -> Dict:
SCREAMING_SNAKE_CASE__: str= self.full_loop(prediction_type='''v_prediction''' , use_karras_sigmas=lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Tuple= torch.mean(torch.abs(lowerCAmelCase ) )
assert abs(result_mean.item() - 0.0649 ) < 1e-3
def UpperCamelCase_ ( self ) -> Tuple:
SCREAMING_SNAKE_CASE__: int= self.scheduler_classes[0]
SCREAMING_SNAKE_CASE__: int= self.get_scheduler_config(thresholding=lowerCAmelCase , dynamic_thresholding_ratio=0 )
SCREAMING_SNAKE_CASE__: Optional[int]= scheduler_class(**lowerCAmelCase )
SCREAMING_SNAKE_CASE__: str= 10
SCREAMING_SNAKE_CASE__: Tuple= self.dummy_model()
SCREAMING_SNAKE_CASE__: Any= self.dummy_sample_deter.half()
scheduler.set_timesteps(lowerCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
SCREAMING_SNAKE_CASE__: Union[str, Any]= model(lowerCAmelCase , lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Union[str, Any]= scheduler.step(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ).prev_sample
assert sample.dtype == torch.floataa
| 64 | from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_tf_available,
is_torch_available,
)
lowercase_ : List[Any] = {
'configuration_speech_to_text': ['SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Speech2TextConfig'],
'processing_speech_to_text': ['Speech2TextProcessor'],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : Optional[Any] = ['Speech2TextTokenizer']
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : Any = ['Speech2TextFeatureExtractor']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : Optional[int] = [
'TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFSpeech2TextForConditionalGeneration',
'TFSpeech2TextModel',
'TFSpeech2TextPreTrainedModel',
]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : Optional[int] = [
'SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST',
'Speech2TextForConditionalGeneration',
'Speech2TextModel',
'Speech2TextPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig
from .processing_speech_to_text import SpeechaTextProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speech_to_text import SpeechaTextTokenizer
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_speech_to_text import (
TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSpeechaTextForConditionalGeneration,
TFSpeechaTextModel,
TFSpeechaTextPreTrainedModel,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_to_text import (
SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechaTextForConditionalGeneration,
SpeechaTextModel,
SpeechaTextPreTrainedModel,
)
else:
import sys
lowercase_ : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 64 | 1 |
import io
import json
import unittest
from parameterized import parameterized
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device
from utils import calculate_bleu
__UpperCAmelCase : Optional[Any] = get_tests_dir() + "/test_data/fsmt/fsmt_val_data.json"
with io.open(filename, "r", encoding="utf-8") as f:
__UpperCAmelCase : Optional[int] = json.load(f)
@require_torch
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase__ ( self : List[str] , A : Any ):
return FSMTTokenizer.from_pretrained(A )
def UpperCAmelCase__ ( self : Optional[int] , A : int ):
__snake_case: str = FSMTForConditionalGeneration.from_pretrained(A ).to(A )
if torch_device == "cuda":
model.half()
return model
@parameterized.expand(
[
["""en-ru""", 26.0],
["""ru-en""", 22.0],
["""en-de""", 22.0],
["""de-en""", 29.0],
] )
@slow
def UpperCAmelCase__ ( self : Dict , A : Any , A : List[str] ):
# note: this test is not testing the best performance since it only evals a small batch
# but it should be enough to detect a regression in the output quality
__snake_case: List[str] = f'''facebook/wmt19-{pair}'''
__snake_case: List[str] = self.get_tokenizer(A )
__snake_case: str = self.get_model(A )
__snake_case: str = bleu_data[pair]["""src"""]
__snake_case: Optional[int] = bleu_data[pair]["""tgt"""]
__snake_case: int = tokenizer(A , return_tensors="""pt""" , truncation=A , padding="""longest""" ).to(A )
__snake_case: Dict = model.generate(
input_ids=batch.input_ids , num_beams=8 , )
__snake_case: int = tokenizer.batch_decode(
A , skip_special_tokens=A , clean_up_tokenization_spaces=A )
__snake_case: Tuple = calculate_bleu(A , A )
print(A )
self.assertGreaterEqual(scores["""bleu"""] , A )
| 705 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
__UpperCAmelCase : List[Any] = logging.get_logger(__name__)
__UpperCAmelCase : Any = {
"microsoft/layoutlmv3-base": "https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json",
}
class __snake_case ( __lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = """layoutlmv3"""
def __init__( self : Any , A : List[Any]=50_265 , A : Any=768 , A : List[Any]=12 , A : Optional[Any]=12 , A : str=3_072 , A : List[str]="gelu" , A : Tuple=0.1 , A : Optional[Any]=0.1 , A : Optional[Any]=512 , A : str=2 , A : int=0.02 , A : Any=1E-5 , A : List[str]=1 , A : int=0 , A : str=2 , A : Optional[int]=1_024 , A : Optional[int]=128 , A : List[str]=128 , A : str=True , A : Union[str, Any]=32 , A : List[Any]=128 , A : Union[str, Any]=64 , A : List[str]=256 , A : Tuple=True , A : Optional[int]=True , A : Dict=True , A : Dict=224 , A : Dict=3 , A : List[Any]=16 , A : Tuple=None , **A : int , ):
super().__init__(
vocab_size=A , hidden_size=A , num_hidden_layers=A , num_attention_heads=A , intermediate_size=A , hidden_act=A , hidden_dropout_prob=A , attention_probs_dropout_prob=A , max_position_embeddings=A , type_vocab_size=A , initializer_range=A , layer_norm_eps=A , pad_token_id=A , bos_token_id=A , eos_token_id=A , **A , )
__snake_case: int = max_ad_position_embeddings
__snake_case: List[Any] = coordinate_size
__snake_case: List[Any] = shape_size
__snake_case: Any = has_relative_attention_bias
__snake_case: Any = rel_pos_bins
__snake_case: Optional[Any] = max_rel_pos
__snake_case: Union[str, Any] = has_spatial_attention_bias
__snake_case: str = rel_ad_pos_bins
__snake_case: Tuple = max_rel_ad_pos
__snake_case: Optional[int] = text_embed
__snake_case: List[Any] = visual_embed
__snake_case: int = input_size
__snake_case: List[Any] = num_channels
__snake_case: Union[str, Any] = patch_size
__snake_case: str = classifier_dropout
class __snake_case ( __lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = version.parse("""1.12""" )
@property
def UpperCAmelCase__ ( self : Optional[int] ):
# The order of inputs is different for question answering and sequence classification
if self.task in ["question-answering", "sequence-classification"]:
return OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """sequence"""}),
("""attention_mask""", {0: """batch""", 1: """sequence"""}),
("""bbox""", {0: """batch""", 1: """sequence"""}),
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
else:
return OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """sequence"""}),
("""bbox""", {0: """batch""", 1: """sequence"""}),
("""attention_mask""", {0: """batch""", 1: """sequence"""}),
("""pixel_values""", {0: """batch""", 1: """num_channels"""}),
] )
@property
def UpperCAmelCase__ ( self : Dict ):
return 1E-5
@property
def UpperCAmelCase__ ( self : str ):
return 12
def UpperCAmelCase__ ( self : str , A : "ProcessorMixin" , A : int = -1 , A : int = -1 , A : bool = False , A : Optional["TensorType"] = None , A : int = 3 , A : int = 40 , A : int = 40 , ):
setattr(processor.image_processor , """apply_ocr""" , A )
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
__snake_case: Optional[int] = compute_effective_axis_dimension(
A , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
__snake_case: Tuple = processor.tokenizer.num_special_tokens_to_add(A )
__snake_case: Optional[int] = compute_effective_axis_dimension(
A , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=A )
# Generate dummy inputs according to compute batch and sequence
__snake_case: Optional[Any] = [[""" """.join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size
# Generate dummy bounding boxes
__snake_case: Optional[int] = [[[48, 84, 73, 128]]] * batch_size
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
# batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch)
__snake_case: List[str] = self._generate_dummy_images(A , A , A , A )
__snake_case: int = dict(
processor(
A , text=A , boxes=A , return_tensors=A , ) )
return inputs
| 155 | 0 |
import logging
import os
from .state import PartialState
class _SCREAMING_SNAKE_CASE ( logging.LoggerAdapter ):
'''simple docstring'''
@staticmethod
def _snake_case ( __lowerCamelCase : int ):
SCREAMING_SNAKE_CASE = PartialState()
return not main_process_only or (main_process_only and state.is_main_process)
def _snake_case ( self : Optional[int] , __lowerCamelCase : Any , __lowerCamelCase : Optional[int] , *__lowerCamelCase : Tuple , **__lowerCamelCase : Any ):
if PartialState._shared_state == {}:
raise RuntimeError(
"You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility." )
SCREAMING_SNAKE_CASE = kwargs.pop("main_process_only" , __lowerCamelCase )
SCREAMING_SNAKE_CASE = kwargs.pop("in_order" , __lowerCamelCase )
if self.isEnabledFor(__lowerCamelCase ):
if self._should_log(__lowerCamelCase ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.process(__lowerCamelCase , __lowerCamelCase )
self.logger.log(__lowerCamelCase , __lowerCamelCase , *__lowerCamelCase , **__lowerCamelCase )
elif in_order:
SCREAMING_SNAKE_CASE = PartialState()
for i in range(state.num_processes ):
if i == state.process_index:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.process(__lowerCamelCase , __lowerCamelCase )
self.logger.log(__lowerCamelCase , __lowerCamelCase , *__lowerCamelCase , **__lowerCamelCase )
state.wait_for_everyone()
def __a ( A__ : str , A__ : str = None ):
if log_level is None:
SCREAMING_SNAKE_CASE = os.environ.get("ACCELERATE_LOG_LEVEL" , A__ )
SCREAMING_SNAKE_CASE = logging.getLogger(A__ )
if log_level is not None:
logger.setLevel(log_level.upper() )
logger.root.setLevel(log_level.upper() )
return MultiProcessAdapter(A__ , {} ) | 16 |
import copy
import inspect
import unittest
from transformers import PretrainedConfig, SwiftFormerConfig
from transformers.testing_utils import (
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwiftFormerForImageClassification, SwiftFormerModel
from transformers.models.swiftformer.modeling_swiftformer import SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : List[str] , __lowerCamelCase : int , __lowerCamelCase : int=13 , __lowerCamelCase : Optional[int]=3 , __lowerCamelCase : str=True , __lowerCamelCase : Any=True , __lowerCamelCase : Optional[Any]=0.1 , __lowerCamelCase : Any=0.1 , __lowerCamelCase : Optional[int]=224 , __lowerCamelCase : Any=1000 , __lowerCamelCase : Optional[Any]=[3, 3, 6, 4] , __lowerCamelCase : List[Any]=[48, 56, 112, 220] , ):
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = num_channels
SCREAMING_SNAKE_CASE = is_training
SCREAMING_SNAKE_CASE = use_labels
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = num_labels
SCREAMING_SNAKE_CASE = image_size
SCREAMING_SNAKE_CASE = layer_depths
SCREAMING_SNAKE_CASE = embed_dims
def _snake_case ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE = None
if self.use_labels:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_labels )
SCREAMING_SNAKE_CASE = self.get_config()
return config, pixel_values, labels
def _snake_case ( self : Dict ):
return SwiftFormerConfig(
depths=self.layer_depths , embed_dims=self.embed_dims , mlp_ratio=4 , downsamples=[True, True, True, True] , hidden_act="gelu" , num_labels=self.num_labels , down_patch_size=3 , down_stride=2 , down_pad=1 , drop_rate=0.0 , drop_path_rate=0.0 , use_layer_scale=__lowerCamelCase , layer_scale_init_value=1e-5 , )
def _snake_case ( self : List[str] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Tuple , __lowerCamelCase : List[Any] ):
SCREAMING_SNAKE_CASE = SwiftFormerModel(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE = model(__lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dims[-1], 7, 7) )
def _snake_case ( self : Dict , __lowerCamelCase : int , __lowerCamelCase : Any , __lowerCamelCase : Optional[int] ):
SCREAMING_SNAKE_CASE = self.num_labels
SCREAMING_SNAKE_CASE = SwiftFormerForImageClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE = model(__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
SCREAMING_SNAKE_CASE = SwiftFormerForImageClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE = model(__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _snake_case ( self : int ):
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = (SwiftFormerModel, SwiftFormerForImageClassification) if is_torch_available() else ()
lowerCamelCase__ = (
{"feature-extraction": SwiftFormerModel, "image-classification": SwiftFormerForImageClassification}
if is_torch_available()
else {}
)
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
def _snake_case ( self : int ):
SCREAMING_SNAKE_CASE = SwiftFormerModelTester(self )
SCREAMING_SNAKE_CASE = ConfigTester(
self , config_class=__lowerCamelCase , has_text_modality=__lowerCamelCase , hidden_size=37 , num_attention_heads=12 , num_hidden_layers=12 , )
def _snake_case ( self : List[Any] ):
self.config_tester.run_common_tests()
@unittest.skip(reason="SwiftFormer does not use inputs_embeds" )
def _snake_case ( self : Optional[int] ):
pass
def _snake_case ( self : Optional[int] ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE = model_class(__lowerCamelCase )
SCREAMING_SNAKE_CASE = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__lowerCamelCase , nn.Linear ) )
def _snake_case ( self : List[Any] ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE = model_class(__lowerCamelCase )
SCREAMING_SNAKE_CASE = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __lowerCamelCase )
def _snake_case ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase )
def _snake_case ( self : int ):
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowerCamelCase )
@slow
def _snake_case ( self : Tuple ):
for model_name in SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE = SwiftFormerModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
@unittest.skip(reason="SwiftFormer does not output attentions" )
def _snake_case ( self : Union[str, Any] ):
pass
def _snake_case ( self : Optional[Any] ):
def check_hidden_states_output(__lowerCamelCase : Optional[int] , __lowerCamelCase : str , __lowerCamelCase : Tuple ):
SCREAMING_SNAKE_CASE = model_class(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
SCREAMING_SNAKE_CASE = outputs.hidden_states
SCREAMING_SNAKE_CASE = 8
self.assertEqual(len(__lowerCamelCase ) , __lowerCamelCase ) # TODO
# SwiftFormer's feature maps are of shape (batch_size, embed_dims, height, width)
# with the width and height being successively divided by 2, after every 2 blocks
for i in range(len(__lowerCamelCase ) ):
self.assertEqual(
hidden_states[i].shape , torch.Size(
[
self.model_tester.batch_size,
self.model_tester.embed_dims[i // 2],
(self.model_tester.image_size // 4) // 2 ** (i // 2),
(self.model_tester.image_size // 4) // 2 ** (i // 2),
] ) , )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE = True
check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE = True
check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
def _snake_case ( self : List[Any] ):
def _config_zero_init(__lowerCamelCase : List[Any] ):
SCREAMING_SNAKE_CASE = copy.deepcopy(__lowerCamelCase )
for key in configs_no_init.__dict__.keys():
if "_range" in key or "_std" in key or "initializer_factor" in key or "layer_scale" in key:
setattr(__lowerCamelCase , __lowerCamelCase , 1e-10 )
if isinstance(getattr(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) , __lowerCamelCase ):
SCREAMING_SNAKE_CASE = _config_zero_init(getattr(__lowerCamelCase , __lowerCamelCase ) )
setattr(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
return configs_no_init
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE = _config_zero_init(__lowerCamelCase )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE = model_class(config=__lowerCamelCase )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9) / 1e9).round().item() , [0.0, 1.0] , msg=f"Parameter {name} of model {model_class} seems not properly initialized" , )
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def _snake_case ( self : str ):
pass
def __a ( ):
SCREAMING_SNAKE_CASE = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _snake_case ( self : List[str] ):
return ViTImageProcessor.from_pretrained("MBZUAI/swiftformer-xs" ) if is_vision_available() else None
@slow
def _snake_case ( self : List[Any] ):
SCREAMING_SNAKE_CASE = SwiftFormerForImageClassification.from_pretrained("MBZUAI/swiftformer-xs" ).to(__lowerCamelCase )
SCREAMING_SNAKE_CASE = self.default_image_processor
SCREAMING_SNAKE_CASE = prepare_img()
SCREAMING_SNAKE_CASE = image_processor(images=__lowerCamelCase , return_tensors="pt" ).to(__lowerCamelCase )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(**__lowerCamelCase )
# verify the logits
SCREAMING_SNAKE_CASE = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , __lowerCamelCase )
SCREAMING_SNAKE_CASE = torch.tensor([[-2.17_03e00, 2.11_07e00, -2.08_11e00]] ).to(__lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowerCamelCase , atol=1e-4 ) ) | 16 | 1 |
'''simple docstring'''
from typing import Dict, Iterable, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
__UpperCAmelCase = logging.get_logger(__name__)
class _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
A = ['pixel_values']
def __init__( self ,__SCREAMING_SNAKE_CASE = True ,__SCREAMING_SNAKE_CASE = None ,__SCREAMING_SNAKE_CASE = PILImageResampling.BICUBIC ,__SCREAMING_SNAKE_CASE = True ,__SCREAMING_SNAKE_CASE = None ,__SCREAMING_SNAKE_CASE = True ,__SCREAMING_SNAKE_CASE = 1 / 255 ,__SCREAMING_SNAKE_CASE = True ,__SCREAMING_SNAKE_CASE = IMAGENET_DEFAULT_MEAN ,__SCREAMING_SNAKE_CASE = IMAGENET_DEFAULT_STD ,**__SCREAMING_SNAKE_CASE ,):
super().__init__(**__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : Any = size if size is not None else {'shortest_edge': 224}
SCREAMING_SNAKE_CASE : int = get_size_dict(__SCREAMING_SNAKE_CASE ,default_to_square=__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : List[Any] = crop_size if crop_size is not None else {'height': 224, 'width': 224}
SCREAMING_SNAKE_CASE : Optional[Any] = get_size_dict(__SCREAMING_SNAKE_CASE ,param_name='crop_size' )
SCREAMING_SNAKE_CASE : str = do_resize
SCREAMING_SNAKE_CASE : Tuple = size
SCREAMING_SNAKE_CASE : Tuple = resample
SCREAMING_SNAKE_CASE : Any = do_center_crop
SCREAMING_SNAKE_CASE : str = crop_size
SCREAMING_SNAKE_CASE : Union[str, Any] = do_rescale
SCREAMING_SNAKE_CASE : List[str] = rescale_factor
SCREAMING_SNAKE_CASE : Dict = do_normalize
SCREAMING_SNAKE_CASE : int = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
SCREAMING_SNAKE_CASE : List[str] = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def __a ( self ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = PILImageResampling.BICUBIC ,__SCREAMING_SNAKE_CASE = None ,**__SCREAMING_SNAKE_CASE ,):
SCREAMING_SNAKE_CASE : Any = get_size_dict(__SCREAMING_SNAKE_CASE ,default_to_square=__SCREAMING_SNAKE_CASE )
# size_dict is a dict with either keys "height" and "width" or "shortest_edge"
if "shortest_edge" in size:
SCREAMING_SNAKE_CASE : str = int((256 / 224) * size['shortest_edge'] )
SCREAMING_SNAKE_CASE : Any = get_resize_output_image_size(__SCREAMING_SNAKE_CASE ,size=__SCREAMING_SNAKE_CASE ,default_to_square=__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : Optional[Any] = {'height': output_size[0], 'width': output_size[1]}
if "height" not in size_dict or "width" not in size_dict:
raise ValueError(
f"""Size dict must have keys 'height' and 'width' or 'shortest_edge'. Got {size_dict.keys()}""" )
return resize(
__SCREAMING_SNAKE_CASE ,size=(size_dict['height'], size_dict['width']) ,resample=__SCREAMING_SNAKE_CASE ,data_format=__SCREAMING_SNAKE_CASE ,**__SCREAMING_SNAKE_CASE )
def __a ( self ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = None ,**__SCREAMING_SNAKE_CASE ,):
SCREAMING_SNAKE_CASE : List[str] = get_size_dict(__SCREAMING_SNAKE_CASE )
if "height" not in size or "width" not in size:
raise ValueError(f"""Size dict must have keys 'height' and 'width'. Got {size.keys()}""" )
return center_crop(__SCREAMING_SNAKE_CASE ,size=(size['height'], size['width']) ,data_format=__SCREAMING_SNAKE_CASE ,**__SCREAMING_SNAKE_CASE )
def __a ( self ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = None ,**__SCREAMING_SNAKE_CASE ,):
return rescale(__SCREAMING_SNAKE_CASE ,scale=__SCREAMING_SNAKE_CASE ,data_format=__SCREAMING_SNAKE_CASE ,**__SCREAMING_SNAKE_CASE )
def __a ( self ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = None ,**__SCREAMING_SNAKE_CASE ,):
return normalize(__SCREAMING_SNAKE_CASE ,mean=__SCREAMING_SNAKE_CASE ,std=__SCREAMING_SNAKE_CASE ,data_format=__SCREAMING_SNAKE_CASE ,**__SCREAMING_SNAKE_CASE )
def __a ( self ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = None ,__SCREAMING_SNAKE_CASE = None ,__SCREAMING_SNAKE_CASE = None ,__SCREAMING_SNAKE_CASE = None ,__SCREAMING_SNAKE_CASE = None ,__SCREAMING_SNAKE_CASE = None ,__SCREAMING_SNAKE_CASE = None ,__SCREAMING_SNAKE_CASE = None ,__SCREAMING_SNAKE_CASE = None ,__SCREAMING_SNAKE_CASE = None ,__SCREAMING_SNAKE_CASE = None ,__SCREAMING_SNAKE_CASE = ChannelDimension.FIRST ,**__SCREAMING_SNAKE_CASE ,):
SCREAMING_SNAKE_CASE : Union[str, Any] = do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE : str = resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE : Optional[int] = do_center_crop if do_center_crop is not None else self.do_center_crop
SCREAMING_SNAKE_CASE : Optional[int] = do_rescale if do_rescale is not None else self.do_rescale
SCREAMING_SNAKE_CASE : Union[str, Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
SCREAMING_SNAKE_CASE : Any = do_normalize if do_normalize is not None else self.do_normalize
SCREAMING_SNAKE_CASE : Union[str, Any] = image_mean if image_mean is not None else self.image_mean
SCREAMING_SNAKE_CASE : str = image_std if image_std is not None else self.image_std
SCREAMING_SNAKE_CASE : str = size if size is not None else self.size
SCREAMING_SNAKE_CASE : str = get_size_dict(__SCREAMING_SNAKE_CASE ,default_to_square=__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : Any = crop_size if crop_size is not None else self.crop_size
SCREAMING_SNAKE_CASE : Optional[int] = get_size_dict(__SCREAMING_SNAKE_CASE ,param_name='crop_size' )
SCREAMING_SNAKE_CASE : int = make_list_of_images(__SCREAMING_SNAKE_CASE )
if not valid_images(__SCREAMING_SNAKE_CASE ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE : Tuple = [to_numpy_array(__SCREAMING_SNAKE_CASE ) for image in images]
if do_resize:
SCREAMING_SNAKE_CASE : Any = [self.resize(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ) for image in images]
if do_center_crop:
SCREAMING_SNAKE_CASE : Dict = [self.center_crop(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ) for image in images]
if do_rescale:
SCREAMING_SNAKE_CASE : List[str] = [self.rescale(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ) for image in images]
if do_normalize:
SCREAMING_SNAKE_CASE : Dict = [self.normalize(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ) for image in images]
SCREAMING_SNAKE_CASE : Tuple = [to_channel_dimension_format(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ) for image in images]
SCREAMING_SNAKE_CASE : Optional[int] = {'pixel_values': images}
return BatchFeature(data=__SCREAMING_SNAKE_CASE ,tensor_type=__SCREAMING_SNAKE_CASE )
| 220 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaImgaImgPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class _a ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
A = KandinskyVaaImgaImgPipeline
A = ['image_embeds', 'negative_image_embeds', 'image']
A = [
'image_embeds',
'negative_image_embeds',
'image',
]
A = [
'generator',
'height',
'width',
'strength',
'guidance_scale',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
A = False
@property
def __a ( self ):
return 32
@property
def __a ( self ):
return 32
@property
def __a ( self ):
return self.time_input_dim
@property
def __a ( self ):
return self.time_input_dim * 4
@property
def __a ( self ):
return 100
@property
def __a ( self ):
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Optional[Any] = {
'in_channels': 4,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'image',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
SCREAMING_SNAKE_CASE : Tuple = UNetaDConditionModel(**__SCREAMING_SNAKE_CASE )
return model
@property
def __a ( self ):
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def __a ( self ):
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Tuple = VQModel(**self.dummy_movq_kwargs )
return model
def __a ( self ):
SCREAMING_SNAKE_CASE : List[Any] = self.dummy_unet
SCREAMING_SNAKE_CASE : List[str] = self.dummy_movq
SCREAMING_SNAKE_CASE : int = {
'num_train_timesteps': 1000,
'beta_schedule': 'linear',
'beta_start': 0.0_0085,
'beta_end': 0.012,
'clip_sample': False,
'set_alpha_to_one': False,
'steps_offset': 0,
'prediction_type': 'epsilon',
'thresholding': False,
}
SCREAMING_SNAKE_CASE : int = DDIMScheduler(**__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : str = {
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def __a ( self ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE=0 ):
SCREAMING_SNAKE_CASE : int = floats_tensor((1, self.text_embedder_hidden_size) ,rng=random.Random(__SCREAMING_SNAKE_CASE ) ).to(__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : str = floats_tensor((1, self.text_embedder_hidden_size) ,rng=random.Random(seed + 1 ) ).to(
__SCREAMING_SNAKE_CASE )
# create init_image
SCREAMING_SNAKE_CASE : Any = floats_tensor((1, 3, 64, 64) ,rng=random.Random(__SCREAMING_SNAKE_CASE ) ).to(__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : Any = image.cpu().permute(0 ,2 ,3 ,1 )[0]
SCREAMING_SNAKE_CASE : Optional[int] = Image.fromarray(np.uinta(__SCREAMING_SNAKE_CASE ) ).convert('RGB' ).resize((256, 256) )
if str(__SCREAMING_SNAKE_CASE ).startswith('mps' ):
SCREAMING_SNAKE_CASE : Dict = torch.manual_seed(__SCREAMING_SNAKE_CASE )
else:
SCREAMING_SNAKE_CASE : int = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : str = {
'image': init_image,
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'generator': generator,
'height': 64,
'width': 64,
'num_inference_steps': 10,
'guidance_scale': 7.0,
'strength': 0.2,
'output_type': 'np',
}
return inputs
def __a ( self ):
SCREAMING_SNAKE_CASE : Optional[Any] = 'cpu'
SCREAMING_SNAKE_CASE : List[str] = self.get_dummy_components()
SCREAMING_SNAKE_CASE : Tuple = self.pipeline_class(**__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : Any = pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : Any = pipe(**self.get_dummy_inputs(__SCREAMING_SNAKE_CASE ) )
SCREAMING_SNAKE_CASE : Union[str, Any] = output.images
SCREAMING_SNAKE_CASE : Tuple = pipe(
**self.get_dummy_inputs(__SCREAMING_SNAKE_CASE ) ,return_dict=__SCREAMING_SNAKE_CASE ,)[0]
SCREAMING_SNAKE_CASE : List[Any] = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE : str = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
SCREAMING_SNAKE_CASE : Any = np.array(
[0.619_9778, 0.6398_4406, 0.4614_5785, 0.6294_4984, 0.562_2215, 0.4730_6132, 0.4744_1456, 0.460_7606, 0.4871_9263] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
@slow
@require_torch_gpu
class _a ( unittest.TestCase ):
"""simple docstring"""
def __a ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __a ( self ):
SCREAMING_SNAKE_CASE : Any = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/kandinskyv22_img2img_frog.npy' )
SCREAMING_SNAKE_CASE : Any = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png' )
SCREAMING_SNAKE_CASE : Optional[Any] = 'A red cartoon frog, 4k'
SCREAMING_SNAKE_CASE : str = KandinskyVaaPriorPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-prior' ,torch_dtype=torch.floataa )
pipe_prior.to(__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : Optional[Any] = KandinskyVaaImgaImgPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-decoder' ,torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE : str = pipeline.to(__SCREAMING_SNAKE_CASE )
pipeline.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : Optional[int] = torch.Generator(device='cpu' ).manual_seed(0 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str = pipe_prior(
__SCREAMING_SNAKE_CASE ,generator=__SCREAMING_SNAKE_CASE ,num_inference_steps=5 ,negative_prompt='' ,).to_tuple()
SCREAMING_SNAKE_CASE : Optional[Any] = pipeline(
image=__SCREAMING_SNAKE_CASE ,image_embeds=__SCREAMING_SNAKE_CASE ,negative_image_embeds=__SCREAMING_SNAKE_CASE ,generator=__SCREAMING_SNAKE_CASE ,num_inference_steps=100 ,height=768 ,width=768 ,strength=0.2 ,output_type='np' ,)
SCREAMING_SNAKE_CASE : List[Any] = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE )
| 220 | 1 |
def __lowerCamelCase ( UpperCAmelCase_ : list ):
"""simple docstring"""
a :Any = len(UpperCAmelCase_ )
for i in range(1 , UpperCAmelCase_ ):
a :Dict = collection[i]
a :List[Any] = 0
a :List[str] = i - 1
while low <= high:
a :Optional[Any] = (low + high) // 2
if val < collection[mid]:
a :Tuple = mid - 1
else:
a :Dict = mid + 1
for j in range(UpperCAmelCase_ , UpperCAmelCase_ , -1 ):
a :Optional[int] = collection[j - 1]
a :Dict = val
return collection
if __name__ == "__main__":
snake_case : str = input('''Enter numbers separated by a comma:\n''').strip()
snake_case : Dict = [int(item) for item in user_input.split(''',''')]
print(binary_insertion_sort(unsorted))
| 445 |
def __lowerCamelCase ( UpperCAmelCase_ : int , UpperCAmelCase_ : int ):
"""simple docstring"""
while b:
a , a :int = b, a % b
return a
def __lowerCamelCase ( UpperCAmelCase_ : int , UpperCAmelCase_ : int ):
"""simple docstring"""
return a if b == 0 else euclidean_gcd_recursive(UpperCAmelCase_ , a % b )
def __lowerCamelCase ( ):
"""simple docstring"""
print(F'''euclidean_gcd(3, 5) = {euclidean_gcd(3 , 5 )}''' )
print(F'''euclidean_gcd(5, 3) = {euclidean_gcd(5 , 3 )}''' )
print(F'''euclidean_gcd(1, 3) = {euclidean_gcd(1 , 3 )}''' )
print(F'''euclidean_gcd(3, 6) = {euclidean_gcd(3 , 6 )}''' )
print(F'''euclidean_gcd(6, 3) = {euclidean_gcd(6 , 3 )}''' )
print(F'''euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3 , 5 )}''' )
print(F'''euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5 , 3 )}''' )
print(F'''euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1 , 3 )}''' )
print(F'''euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3 , 6 )}''' )
print(F'''euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6 , 3 )}''' )
if __name__ == "__main__":
main()
| 445 | 1 |
"""simple docstring"""
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
UNetaDConditionModel,
VideoToVideoSDPipeline,
)
from diffusers.utils import floats_tensor, is_xformers_available, skip_mps
from diffusers.utils.testing_utils import enable_full_determinism, slow, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class __snake_case( __A , unittest.TestCase ):
_A = VideoToVideoSDPipeline
_A = TEXT_GUIDED_IMAGE_VARIATION_PARAMS.union({'''video'''} ) - {'''image''', '''width''', '''height'''}
_A = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'''video'''} ) - {'''image'''}
_A = PipelineTesterMixin.required_optional_params - {'''latents'''}
_A = False
# No `output_type`.
_A = frozenset(
[
'''num_inference_steps''',
'''generator''',
'''latents''',
'''return_dict''',
'''callback''',
'''callback_steps''',
] )
def A ( self ):
'''simple docstring'''
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''CrossAttnDownBlock3D''', '''CrossAttnDownBlock3D''', '''CrossAttnDownBlock3D''', '''DownBlock3D''') , up_block_types=('''UpBlock3D''', '''CrossAttnUpBlock3D''', '''CrossAttnUpBlock3D''', '''CrossAttnUpBlock3D''') , cross_attention_dim=32 , attention_head_dim=4 , )
_SCREAMING_SNAKE_CASE = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=A_ , set_alpha_to_one=A_ , )
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act='''gelu''' , projection_dim=512 , )
_SCREAMING_SNAKE_CASE = CLIPTextModel(A_ )
_SCREAMING_SNAKE_CASE = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
_SCREAMING_SNAKE_CASE = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
}
return components
def A ( self , A_ , A_=0 ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = floats_tensor((1, 3, 3, 32, 32) , rng=random.Random(A_ ) ).to(A_ )
if str(A_ ).startswith('''mps''' ):
_SCREAMING_SNAKE_CASE = torch.manual_seed(A_ )
else:
_SCREAMING_SNAKE_CASE = torch.Generator(device=A_ ).manual_seed(A_ )
_SCREAMING_SNAKE_CASE = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''video''': video,
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''pt''',
}
return inputs
def A ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = '''cpu''' # ensure determinism for the device-dependent torch.Generator
_SCREAMING_SNAKE_CASE = self.get_dummy_components()
_SCREAMING_SNAKE_CASE = VideoToVideoSDPipeline(**A_ )
_SCREAMING_SNAKE_CASE = sd_pipe.to(A_ )
sd_pipe.set_progress_bar_config(disable=A_ )
_SCREAMING_SNAKE_CASE = self.get_dummy_inputs(A_ )
_SCREAMING_SNAKE_CASE = '''np'''
_SCREAMING_SNAKE_CASE = sd_pipe(**A_ ).frames
_SCREAMING_SNAKE_CASE = frames[0][-3:, -3:, -1]
assert frames[0].shape == (32, 32, 3)
_SCREAMING_SNAKE_CASE = np.array([106, 117, 113, 174, 137, 112, 148, 151, 131] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def A ( self ):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=A_ , expected_max_diff=5e-3 )
@unittest.skip(reason='''Batching needs to be properly figured out first for this pipeline.''' )
def A ( self ):
'''simple docstring'''
pass
@unittest.skip(reason='''Batching needs to be properly figured out first for this pipeline.''' )
def A ( self ):
'''simple docstring'''
pass
@unittest.skip(reason='''`num_images_per_prompt` argument is not supported for this pipeline.''' )
def A ( self ):
'''simple docstring'''
pass
def A ( self ):
'''simple docstring'''
return super().test_progress_bar()
@slow
@skip_mps
class __snake_case( unittest.TestCase ):
def A ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = VideoToVideoSDPipeline.from_pretrained('''cerspense/zeroscope_v2_XL''' , torch_dtype=torch.floataa )
pipe.enable_model_cpu_offload()
# 10 frames
_SCREAMING_SNAKE_CASE = torch.Generator(device='''cpu''' ).manual_seed(0 )
_SCREAMING_SNAKE_CASE = torch.randn((1, 10, 3, 1_024, 576) , generator=A_ )
_SCREAMING_SNAKE_CASE = video.to('''cuda''' )
_SCREAMING_SNAKE_CASE = '''Spiderman is surfing'''
_SCREAMING_SNAKE_CASE = pipe(A_ , video=A_ , generator=A_ , num_inference_steps=3 , output_type='''pt''' ).frames
_SCREAMING_SNAKE_CASE = np.array([-1.0_458_984, -1.1_279_297, -0.9_663_086, -0.91_503_906, -0.75_097_656] )
assert np.abs(video_frames.cpu().numpy()[0, 0, 0, 0, -5:] - expected_array ).sum() < 1e-2
| 168 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import TransformeraDModel, VQDiffusionPipeline, VQDiffusionScheduler, VQModel
from diffusers.pipelines.vq_diffusion.pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings
from diffusers.utils import load_numpy, slow, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
lowerCamelCase : str = False
class __snake_case( unittest.TestCase ):
def A ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def A ( self ):
'''simple docstring'''
return 12
@property
def A ( self ):
'''simple docstring'''
return 12
@property
def A ( self ):
'''simple docstring'''
return 32
@property
def A ( self ):
'''simple docstring'''
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=3 , num_vq_embeddings=self.num_embed , vq_embed_dim=3 , )
return model
@property
def A ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
return tokenizer
@property
def A ( self ):
'''simple docstring'''
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
return CLIPTextModel(A_ )
@property
def A ( self ):
'''simple docstring'''
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE = 12
_SCREAMING_SNAKE_CASE = 12
_SCREAMING_SNAKE_CASE = {
'''attention_bias''': True,
'''cross_attention_dim''': 32,
'''attention_head_dim''': height * width,
'''num_attention_heads''': 1,
'''num_vector_embeds''': self.num_embed,
'''num_embeds_ada_norm''': self.num_embeds_ada_norm,
'''norm_num_groups''': 32,
'''sample_size''': width,
'''activation_fn''': '''geglu-approximate''',
}
_SCREAMING_SNAKE_CASE = TransformeraDModel(**A_ )
return model
def A ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = '''cpu'''
_SCREAMING_SNAKE_CASE = self.dummy_vqvae
_SCREAMING_SNAKE_CASE = self.dummy_text_encoder
_SCREAMING_SNAKE_CASE = self.dummy_tokenizer
_SCREAMING_SNAKE_CASE = self.dummy_transformer
_SCREAMING_SNAKE_CASE = VQDiffusionScheduler(self.num_embed )
_SCREAMING_SNAKE_CASE = LearnedClassifierFreeSamplingEmbeddings(learnable=A_ )
_SCREAMING_SNAKE_CASE = VQDiffusionPipeline(
vqvae=A_ , text_encoder=A_ , tokenizer=A_ , transformer=A_ , scheduler=A_ , learned_classifier_free_sampling_embeddings=A_ , )
_SCREAMING_SNAKE_CASE = pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
_SCREAMING_SNAKE_CASE = '''teddy bear playing in the pool'''
_SCREAMING_SNAKE_CASE = torch.Generator(device=A_ ).manual_seed(0 )
_SCREAMING_SNAKE_CASE = pipe([prompt] , generator=A_ , num_inference_steps=2 , output_type='''np''' )
_SCREAMING_SNAKE_CASE = output.images
_SCREAMING_SNAKE_CASE = torch.Generator(device=A_ ).manual_seed(0 )
_SCREAMING_SNAKE_CASE = pipe(
[prompt] , generator=A_ , output_type='''np''' , return_dict=A_ , num_inference_steps=2 )[0]
_SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
_SCREAMING_SNAKE_CASE = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
_SCREAMING_SNAKE_CASE = np.array([0.6_551, 0.6_168, 0.5_008, 0.5_676, 0.5_659, 0.4_295, 0.6_073, 0.5_599, 0.4_992] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def A ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = '''cpu'''
_SCREAMING_SNAKE_CASE = self.dummy_vqvae
_SCREAMING_SNAKE_CASE = self.dummy_text_encoder
_SCREAMING_SNAKE_CASE = self.dummy_tokenizer
_SCREAMING_SNAKE_CASE = self.dummy_transformer
_SCREAMING_SNAKE_CASE = VQDiffusionScheduler(self.num_embed )
_SCREAMING_SNAKE_CASE = LearnedClassifierFreeSamplingEmbeddings(
learnable=A_ , hidden_size=self.text_embedder_hidden_size , length=tokenizer.model_max_length )
_SCREAMING_SNAKE_CASE = VQDiffusionPipeline(
vqvae=A_ , text_encoder=A_ , tokenizer=A_ , transformer=A_ , scheduler=A_ , learned_classifier_free_sampling_embeddings=A_ , )
_SCREAMING_SNAKE_CASE = pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
_SCREAMING_SNAKE_CASE = '''teddy bear playing in the pool'''
_SCREAMING_SNAKE_CASE = torch.Generator(device=A_ ).manual_seed(0 )
_SCREAMING_SNAKE_CASE = pipe([prompt] , generator=A_ , num_inference_steps=2 , output_type='''np''' )
_SCREAMING_SNAKE_CASE = output.images
_SCREAMING_SNAKE_CASE = torch.Generator(device=A_ ).manual_seed(0 )
_SCREAMING_SNAKE_CASE = pipe(
[prompt] , generator=A_ , output_type='''np''' , return_dict=A_ , num_inference_steps=2 )[0]
_SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
_SCREAMING_SNAKE_CASE = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
_SCREAMING_SNAKE_CASE = np.array([0.6_693, 0.6_075, 0.4_959, 0.5_701, 0.5_583, 0.4_333, 0.6_171, 0.5_684, 0.4_988] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2.0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class __snake_case( unittest.TestCase ):
def A ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/vq_diffusion/teddy_bear_pool_classifier_free_sampling.npy''' )
_SCREAMING_SNAKE_CASE = VQDiffusionPipeline.from_pretrained('''microsoft/vq-diffusion-ithq''' )
_SCREAMING_SNAKE_CASE = pipeline.to(A_ )
pipeline.set_progress_bar_config(disable=A_ )
# requires GPU generator for gumbel softmax
# don't use GPU generator in tests though
_SCREAMING_SNAKE_CASE = torch.Generator(device=A_ ).manual_seed(0 )
_SCREAMING_SNAKE_CASE = pipeline(
'''teddy bear playing in the pool''' , num_images_per_prompt=1 , generator=A_ , output_type='''np''' , )
_SCREAMING_SNAKE_CASE = output.images[0]
assert image.shape == (256, 256, 3)
assert np.abs(expected_image - image ).max() < 2.0
| 168 | 1 |
'''simple docstring'''
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, ClassLabel, Features
from .base import TaskTemplate
@dataclass(frozen=snake_case__ )
class a__( snake_case__ ):
a_ : str = field(default='''audio-classification''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
a_ : ClassVar[Features] = Features({'''audio''': Audio()} )
a_ : ClassVar[Features] = Features({'''labels''': ClassLabel} )
a_ : str = "audio"
a_ : str = "labels"
def _lowercase ( self , _UpperCAmelCase ) -> Optional[Any]:
if self.label_column not in features:
raise ValueError(f"""Column {self.label_column} is not present in features.""" )
if not isinstance(features[self.label_column] , _UpperCAmelCase ):
raise ValueError(f"""Column {self.label_column} is not a ClassLabel.""" )
snake_case__ =copy.deepcopy(self )
snake_case__ =self.label_schema.copy()
snake_case__ =features[self.label_column]
snake_case__ =label_schema
return task_template
@property
def _lowercase ( self ) -> Dict[str, str]:
return {
self.audio_column: "audio",
self.label_column: "labels",
}
| 538 |
'''simple docstring'''
import shutil
import tempfile
import unittest
from unittest.mock import patch
from transformers import (
DefaultFlowCallback,
IntervalStrategy,
PrinterCallback,
ProgressCallback,
Trainer,
TrainerCallback,
TrainingArguments,
is_torch_available,
)
from transformers.testing_utils import require_torch
if is_torch_available():
from transformers.trainer import DEFAULT_CALLBACKS
from .test_trainer import RegressionDataset, RegressionModelConfig, RegressionPreTrainedModel
class a__( snake_case__ ):
def __init__( self ) -> Dict:
snake_case__ =[]
def _lowercase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ) -> List[str]:
self.events.append('on_init_end' )
def _lowercase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ) -> Dict:
self.events.append('on_train_begin' )
def _lowercase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ) -> Optional[Any]:
self.events.append('on_train_end' )
def _lowercase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ) -> int:
self.events.append('on_epoch_begin' )
def _lowercase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ) -> List[str]:
self.events.append('on_epoch_end' )
def _lowercase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ) -> Optional[int]:
self.events.append('on_step_begin' )
def _lowercase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ) -> str:
self.events.append('on_step_end' )
def _lowercase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ) -> List[Any]:
self.events.append('on_evaluate' )
def _lowercase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ) -> Dict:
self.events.append('on_predict' )
def _lowercase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ) -> Any:
self.events.append('on_save' )
def _lowercase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ) -> Tuple:
self.events.append('on_log' )
def _lowercase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ) -> List[Any]:
self.events.append('on_prediction_step' )
@require_torch
class a__( unittest.TestCase ):
def _lowercase ( self ) -> Optional[Any]:
snake_case__ =tempfile.mkdtemp()
def _lowercase ( self ) -> List[str]:
shutil.rmtree(self.output_dir )
def _lowercase ( self , _UpperCAmelCase=0 , _UpperCAmelCase=0 , _UpperCAmelCase=64 , _UpperCAmelCase=64 , _UpperCAmelCase=None , _UpperCAmelCase=False , **_UpperCAmelCase ) -> int:
# disable_tqdm in TrainingArguments has a flaky default since it depends on the level of logging. We make sure
# its set to False since the tests later on depend on its value.
snake_case__ =RegressionDataset(length=_UpperCAmelCase )
snake_case__ =RegressionDataset(length=_UpperCAmelCase )
snake_case__ =RegressionModelConfig(a=_UpperCAmelCase , b=_UpperCAmelCase )
snake_case__ =RegressionPreTrainedModel(_UpperCAmelCase )
snake_case__ =TrainingArguments(self.output_dir , disable_tqdm=_UpperCAmelCase , report_to=[] , **_UpperCAmelCase )
return Trainer(
_UpperCAmelCase , _UpperCAmelCase , train_dataset=_UpperCAmelCase , eval_dataset=_UpperCAmelCase , callbacks=_UpperCAmelCase , )
def _lowercase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> Optional[Any]:
self.assertEqual(len(_UpperCAmelCase ) , len(_UpperCAmelCase ) )
# Order doesn't matter
snake_case__ =sorted(_UpperCAmelCase , key=lambda _UpperCAmelCase : cb.__name__ if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else cb.__class__.__name__ )
snake_case__ =sorted(_UpperCAmelCase , key=lambda _UpperCAmelCase : cb.__name__ if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else cb.__class__.__name__ )
for cba, cba in zip(_UpperCAmelCase , _UpperCAmelCase ):
if isinstance(_UpperCAmelCase , _UpperCAmelCase ) and isinstance(_UpperCAmelCase , _UpperCAmelCase ):
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
elif isinstance(_UpperCAmelCase , _UpperCAmelCase ) and not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
self.assertEqual(_UpperCAmelCase , cba.__class__ )
elif not isinstance(_UpperCAmelCase , _UpperCAmelCase ) and isinstance(_UpperCAmelCase , _UpperCAmelCase ):
self.assertEqual(cba.__class__ , _UpperCAmelCase )
else:
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
def _lowercase ( self , _UpperCAmelCase ) -> Any:
snake_case__ =['on_init_end', 'on_train_begin']
snake_case__ =0
snake_case__ =len(trainer.get_eval_dataloader() )
snake_case__ =['on_prediction_step'] * len(trainer.get_eval_dataloader() ) + ['on_log', 'on_evaluate']
for _ in range(trainer.state.num_train_epochs ):
expected_events.append('on_epoch_begin' )
for _ in range(_UpperCAmelCase ):
step += 1
expected_events += ["on_step_begin", "on_step_end"]
if step % trainer.args.logging_steps == 0:
expected_events.append('on_log' )
if trainer.args.evaluation_strategy == IntervalStrategy.STEPS and step % trainer.args.eval_steps == 0:
expected_events += evaluation_events.copy()
if step % trainer.args.save_steps == 0:
expected_events.append('on_save' )
expected_events.append('on_epoch_end' )
if trainer.args.evaluation_strategy == IntervalStrategy.EPOCH:
expected_events += evaluation_events.copy()
expected_events += ["on_log", "on_train_end"]
return expected_events
def _lowercase ( self ) -> Optional[int]:
snake_case__ =self.get_trainer()
snake_case__ =DEFAULT_CALLBACKS.copy() + [ProgressCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , _UpperCAmelCase )
# Callbacks passed at init are added to the default callbacks
snake_case__ =self.get_trainer(callbacks=[MyTestTrainerCallback] )
expected_callbacks.append(_UpperCAmelCase )
self.check_callbacks_equality(trainer.callback_handler.callbacks , _UpperCAmelCase )
# TrainingArguments.disable_tqdm controls if use ProgressCallback or PrinterCallback
snake_case__ =self.get_trainer(disable_tqdm=_UpperCAmelCase )
snake_case__ =DEFAULT_CALLBACKS.copy() + [PrinterCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , _UpperCAmelCase )
def _lowercase ( self ) -> Optional[Any]:
snake_case__ =DEFAULT_CALLBACKS.copy() + [ProgressCallback]
snake_case__ =self.get_trainer()
# We can add, pop, or remove by class name
trainer.remove_callback(_UpperCAmelCase )
expected_callbacks.remove(_UpperCAmelCase )
self.check_callbacks_equality(trainer.callback_handler.callbacks , _UpperCAmelCase )
snake_case__ =self.get_trainer()
snake_case__ =trainer.pop_callback(_UpperCAmelCase )
self.assertEqual(cb.__class__ , _UpperCAmelCase )
self.check_callbacks_equality(trainer.callback_handler.callbacks , _UpperCAmelCase )
trainer.add_callback(_UpperCAmelCase )
expected_callbacks.insert(0 , _UpperCAmelCase )
self.check_callbacks_equality(trainer.callback_handler.callbacks , _UpperCAmelCase )
# We can also add, pop, or remove by instance
snake_case__ =self.get_trainer()
snake_case__ =trainer.callback_handler.callbacks[0]
trainer.remove_callback(_UpperCAmelCase )
expected_callbacks.remove(_UpperCAmelCase )
self.check_callbacks_equality(trainer.callback_handler.callbacks , _UpperCAmelCase )
snake_case__ =self.get_trainer()
snake_case__ =trainer.callback_handler.callbacks[0]
snake_case__ =trainer.pop_callback(_UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
self.check_callbacks_equality(trainer.callback_handler.callbacks , _UpperCAmelCase )
trainer.add_callback(_UpperCAmelCase )
expected_callbacks.insert(0 , _UpperCAmelCase )
self.check_callbacks_equality(trainer.callback_handler.callbacks , _UpperCAmelCase )
def _lowercase ( self ) -> Dict:
import warnings
# XXX: for now ignore scatter_gather warnings in this test since it's not relevant to what's being tested
warnings.simplefilter(action='ignore' , category=_UpperCAmelCase )
snake_case__ =self.get_trainer(callbacks=[MyTestTrainerCallback] )
trainer.train()
snake_case__ =trainer.callback_handler.callbacks[-2].events
self.assertEqual(_UpperCAmelCase , self.get_expected_events(_UpperCAmelCase ) )
# Independent log/save/eval
snake_case__ =self.get_trainer(callbacks=[MyTestTrainerCallback] , logging_steps=5 )
trainer.train()
snake_case__ =trainer.callback_handler.callbacks[-2].events
self.assertEqual(_UpperCAmelCase , self.get_expected_events(_UpperCAmelCase ) )
snake_case__ =self.get_trainer(callbacks=[MyTestTrainerCallback] , save_steps=5 )
trainer.train()
snake_case__ =trainer.callback_handler.callbacks[-2].events
self.assertEqual(_UpperCAmelCase , self.get_expected_events(_UpperCAmelCase ) )
snake_case__ =self.get_trainer(callbacks=[MyTestTrainerCallback] , eval_steps=5 , evaluation_strategy='steps' )
trainer.train()
snake_case__ =trainer.callback_handler.callbacks[-2].events
self.assertEqual(_UpperCAmelCase , self.get_expected_events(_UpperCAmelCase ) )
snake_case__ =self.get_trainer(callbacks=[MyTestTrainerCallback] , evaluation_strategy='epoch' )
trainer.train()
snake_case__ =trainer.callback_handler.callbacks[-2].events
self.assertEqual(_UpperCAmelCase , self.get_expected_events(_UpperCAmelCase ) )
# A bit of everything
snake_case__ =self.get_trainer(
callbacks=[MyTestTrainerCallback] , logging_steps=3 , save_steps=10 , eval_steps=5 , evaluation_strategy='steps' , )
trainer.train()
snake_case__ =trainer.callback_handler.callbacks[-2].events
self.assertEqual(_UpperCAmelCase , self.get_expected_events(_UpperCAmelCase ) )
# warning should be emitted for duplicated callbacks
with patch('transformers.trainer_callback.logger.warning' ) as warn_mock:
snake_case__ =self.get_trainer(
callbacks=[MyTestTrainerCallback, MyTestTrainerCallback] , )
assert str(_UpperCAmelCase ) in warn_mock.call_args[0][0]
| 538 | 1 |
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def A (__A : Optional[Any] , __A : Dict , __A : int , __A : str , __A : Optional[int] ) -> str:
"""simple docstring"""
with open(__A ) as metadata_file:
UpperCAmelCase_ = json.load(__A )
UpperCAmelCase_ = LukeConfig(use_entity_aware_attention=__A , **metadata['''model_config'''] )
# Load in the weights from the checkpoint_path
UpperCAmelCase_ = torch.load(__A , map_location='''cpu''' )['''module''']
# Load the entity vocab file
UpperCAmelCase_ = load_original_entity_vocab(__A )
# add an entry for [MASK2]
UpperCAmelCase_ = max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
UpperCAmelCase_ = XLMRobertaTokenizer.from_pretrained(metadata['''model_config''']['''bert_model_name'''] )
# Add special tokens to the token vocabulary for downstream tasks
UpperCAmelCase_ = AddedToken('''<ent>''' , lstrip=__A , rstrip=__A )
UpperCAmelCase_ = AddedToken('''<ent2>''' , lstrip=__A , rstrip=__A )
tokenizer.add_special_tokens({'''additional_special_tokens''': [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(F"""Saving tokenizer to {pytorch_dump_folder_path}""" )
tokenizer.save_pretrained(__A )
with open(os.path.join(__A , '''tokenizer_config.json''' ) , '''r''' ) as f:
UpperCAmelCase_ = json.load(__A )
UpperCAmelCase_ = '''MLukeTokenizer'''
with open(os.path.join(__A , '''tokenizer_config.json''' ) , '''w''' ) as f:
json.dump(__A , __A )
with open(os.path.join(__A , MLukeTokenizer.vocab_files_names['''entity_vocab_file'''] ) , '''w''' ) as f:
json.dump(__A , __A )
UpperCAmelCase_ = MLukeTokenizer.from_pretrained(__A )
# Initialize the embeddings of the special tokens
UpperCAmelCase_ = tokenizer.convert_tokens_to_ids(['''@'''] )[0]
UpperCAmelCase_ = tokenizer.convert_tokens_to_ids(['''#'''] )[0]
UpperCAmelCase_ = state_dict['''embeddings.word_embeddings.weight''']
UpperCAmelCase_ = word_emb[ent_init_index].unsqueeze(0 )
UpperCAmelCase_ = word_emb[enta_init_index].unsqueeze(0 )
UpperCAmelCase_ = torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
UpperCAmelCase_ = state_dict[bias_name]
UpperCAmelCase_ = decoder_bias[ent_init_index].unsqueeze(0 )
UpperCAmelCase_ = decoder_bias[enta_init_index].unsqueeze(0 )
UpperCAmelCase_ = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
UpperCAmelCase_ = F"""encoder.layer.{layer_index}.attention.self."""
UpperCAmelCase_ = state_dict[prefix + matrix_name]
UpperCAmelCase_ = state_dict[prefix + matrix_name]
UpperCAmelCase_ = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
UpperCAmelCase_ = state_dict['''entity_embeddings.entity_embeddings.weight''']
UpperCAmelCase_ = entity_emb[entity_vocab['''[MASK]''']].unsqueeze(0 )
UpperCAmelCase_ = torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
UpperCAmelCase_ = state_dict['''entity_predictions.bias''']
UpperCAmelCase_ = entity_prediction_bias[entity_vocab['''[MASK]''']].unsqueeze(0 )
UpperCAmelCase_ = torch.cat([entity_prediction_bias, entity_mask_bias] )
UpperCAmelCase_ = LukeForMaskedLM(config=__A ).eval()
state_dict.pop('''entity_predictions.decoder.weight''' )
state_dict.pop('''lm_head.decoder.weight''' )
state_dict.pop('''lm_head.decoder.bias''' )
UpperCAmelCase_ = OrderedDict()
for key, value in state_dict.items():
if not (key.startswith('''lm_head''' ) or key.startswith('''entity_predictions''' )):
UpperCAmelCase_ = state_dict[key]
else:
UpperCAmelCase_ = state_dict[key]
UpperCAmelCase_ , UpperCAmelCase_ = model.load_state_dict(__A , strict=__A )
if set(__A ) != {"luke.embeddings.position_ids"}:
raise ValueError(F"""Unexpected unexpected_keys: {unexpected_keys}""" )
if set(__A ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(F"""Unexpected missing_keys: {missing_keys}""" )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
UpperCAmelCase_ = MLukeTokenizer.from_pretrained(__A , task='''entity_classification''' )
UpperCAmelCase_ = '''ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan).'''
UpperCAmelCase_ = (0, 9)
UpperCAmelCase_ = tokenizer(__A , entity_spans=[span] , return_tensors='''pt''' )
UpperCAmelCase_ = model(**__A )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
UpperCAmelCase_ = torch.Size((1, 33, 768) )
UpperCAmelCase_ = torch.tensor([[0.0_892, 0.0_596, -0.2_819], [0.0_134, 0.1_199, 0.0_573], [-0.0_169, 0.0_927, 0.0_644]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
F"""Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}""" )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , __A , atol=1E-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
UpperCAmelCase_ = torch.Size((1, 1, 768) )
UpperCAmelCase_ = torch.tensor([[-0.1_482, 0.0_609, 0.0_322]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
F"""Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is"""
F""" {expected_shape}""" )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , __A , atol=1E-4 ):
raise ValueError
# Verify masked word/entity prediction
UpperCAmelCase_ = MLukeTokenizer.from_pretrained(__A )
UpperCAmelCase_ = '''Tokyo is the capital of <mask>.'''
UpperCAmelCase_ = (24, 30)
UpperCAmelCase_ = tokenizer(__A , entity_spans=[span] , return_tensors='''pt''' )
UpperCAmelCase_ = model(**__A )
UpperCAmelCase_ = encoding['''input_ids'''][0].tolist()
UpperCAmelCase_ = input_ids.index(tokenizer.convert_tokens_to_ids('''<mask>''' ) )
UpperCAmelCase_ = outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(__A )
UpperCAmelCase_ = outputs.entity_logits[0][0].argmax().item()
UpperCAmelCase_ = [
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith('''en:''' )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print('''Saving PyTorch model to {}'''.format(__A ) )
model.save_pretrained(__A )
def A (__A : List[str] ) -> Any:
"""simple docstring"""
UpperCAmelCase_ = ['''[MASK]''', '''[PAD]''', '''[UNK]''']
UpperCAmelCase_ = [json.loads(__A ) for line in open(__A )]
UpperCAmelCase_ = {}
for entry in data:
UpperCAmelCase_ = entry['''id''']
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
UpperCAmelCase_ = entity_id
break
UpperCAmelCase_ = F"""{language}:{entity_name}"""
UpperCAmelCase_ = entity_id
return new_mapping
if __name__ == "__main__":
snake_case_ : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--checkpoint_path", type=str, help="Path to a pytorch_model.bin file.")
parser.add_argument(
"--metadata_path", default=None, type=str, help="Path to a metadata.json file, defining the configuration."
)
parser.add_argument(
"--entity_vocab_path",
default=None,
type=str,
help="Path to an entity_vocab.tsv file, containing the entity vocabulary.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to where to dump the output PyTorch model."
)
parser.add_argument(
"--model_size", default="base", type=str, choices=["base", "large"], help="Size of the model to be converted."
)
snake_case_ : List[str] = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 169 |
import numpy as np
from cva import destroyAllWindows, imread, imshow, waitKey
class __snake_case :
def __init__( self : Dict , _snake_case : Optional[int] , _snake_case : int , _snake_case : int):
"""simple docstring"""
if dst_width < 0 or dst_height < 0:
raise ValueError('''Destination width/height should be > 0''')
UpperCAmelCase_ = img
UpperCAmelCase_ = img.shape[1]
UpperCAmelCase_ = img.shape[0]
UpperCAmelCase_ = dst_width
UpperCAmelCase_ = dst_height
UpperCAmelCase_ = self.src_w / self.dst_w
UpperCAmelCase_ = self.src_h / self.dst_h
UpperCAmelCase_ = UpperCAmelCase_ = (
np.ones((self.dst_h, self.dst_w, 3) , np.uinta) * 255
)
def lowerCamelCase ( self : str):
"""simple docstring"""
for i in range(self.dst_h):
for j in range(self.dst_w):
UpperCAmelCase_ = self.img[self.get_y(_snake_case)][self.get_x(_snake_case)]
def lowerCamelCase ( self : int , _snake_case : int):
"""simple docstring"""
return int(self.ratio_x * x)
def lowerCamelCase ( self : List[str] , _snake_case : int):
"""simple docstring"""
return int(self.ratio_y * y)
if __name__ == "__main__":
snake_case_ , snake_case_ : List[Any] = 800, 600
snake_case_ : Optional[Any] = imread("image_data/lena.jpg", 1)
snake_case_ : Any = NearestNeighbour(im, dst_w, dst_h)
n.process()
imshow(
f"Image resized from: {im.shape[1]}x{im.shape[0]} to {dst_w}x{dst_h}", n.output
)
waitKey(0)
destroyAllWindows()
| 169 | 1 |
'''simple docstring'''
# HF Trainer benchmarking tool
#
# This tool can be used to run and compare multiple dimensions of the HF Trainers args.
#
# It then prints a report once in github format with all the information that needs to be shared
# with others and second time in a console-friendly format, so it's easier to use for tuning things up.
#
# The main idea is:
#
# ./trainer-benchmark.py --base-cmd '<cmd args that don't change>' \
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1' \
# --target-metric-key train_samples_per_second
#
# The variations can be any command line argument that you want to compare and not just dtype as in
# the example.
#
# --variations allows you to compare variations in multiple dimensions.
#
# as the first dimention has 2 options and the second 3 in our example, this will run the trainer 6
# times adding one of:
#
# 1. --tf32 0 --fp16 0
# 2. --tf32 0 --fp16 1
# 3. --tf32 0 --bf16 1
# 4. --tf32 1 --fp16 0
# 5. --tf32 1 --fp16 1
# 6. --tf32 1 --bf16 1
#
# and print the results. This is just a cartesian product - and more than 2 dimensions can be used.
#
# If you want to rely on defaults, this:
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1'
# is identical to this:
# --variations '--tf32 0|--tf32 1' '|--fp16|--bf16'
#
# the leading empty variation in the 2nd dimension is a valid variation.
#
# So here we get the following 6 variations:
#
# 1. --tf32 0
# 2. --tf32 0 --fp16
# 3. --tf32 0 --bf16
# 4. --tf32 1
# 5. --tf32 1 --fp16
# 6. --tf32 1 --bf16
#
# In this particular case we don't know what the default tf32 setting is as it's normally
# pytorch-version dependent). That's why it's best to do an explicit setting of each variation:
# `--tf32 0|--tf32 1`
#
# Here is a full example of a train:
#
# CUDA_VISIBLE_DEVICES=0 python ./scripts/benchmark/trainer-benchmark.py \
# --base-cmd \
# ' examples/pytorch/translation/run_translation.py --model_name_or_path t5-small \
# --output_dir output_dir --do_train --label_smoothing 0.1 --logging_strategy no \
# --save_strategy no --per_device_train_batch_size 32 --max_source_length 512 \
# --max_target_length 512 --num_train_epochs 1 --overwrite_output_dir \
# --source_lang en --target_lang ro --dataset_name wmt16 --dataset_config "ro-en" \
# --source_prefix "translate English to Romanian: " --warmup_steps 50 \
# --max_train_samples 20000 --dataloader_num_workers 2 ' \
# --target-metric-key train_samples_per_second --repeat-times 1 --variations \
# '|--fp16|--bf16' '--tf32 0|--tf32 1' --report-metric-keys train_loss \
# --repeat-times 1 --base-variation '--tf32 0'
#
# and here is a possible output:
#
#
# | Variation | Train | Diff | Train |
# | | samples | % | loss |
# | | per | | |
# | | second | | |
# |:----------------|----------:|-------:|--------:|
# | --tf32 0 | 285.11 | 0 | 2.51 |
# | --tf32 1 | 342.09 | 20 | 2.51 |
# | --fp16 --tf32 0 | 423.49 | 49 | 2.51 |
# | --fp16 --tf32 1 | 423.13 | 48 | 2.51 |
# | --bf16 --tf32 0 | 416.80 | 46 | 2.52 |
# | --bf16 --tf32 1 | 415.87 | 46 | 2.52 |
#
#
# So you can quickly compare the different outcomes.
#
# Typically running each experiment once is enough, but if the environment is unstable you can
# re-run each multiple times, e.g., 3 using --repeat-times 3 and it will report the averaged results.
#
# By default it'll use the lowest result as the base line to use as 100% and then compare the rest to
# it as can be seen from the table above, but you can also specify which combination is the one to use as
# the baseline, e.g., to change to another entry use: --base-variation '--tf32 1 --fp16 0'
#
# --target-metric-key is there to tell the program which metrics to compare - the different metric keys are
# inside output_dir/all_results.json. e.g., to measure eval performance instead of train use:
# --target-metric-key eval_samples_per_second
# but of course you will need to adjust the --base-cmd value in the example to perform evaluation as
# well (as currently it doesn't)
#
import argparse
import datetime
import io
import itertools
import json
import math
import os
import platform
import re
import shlex
import subprocess
import sys
from pathlib import Path
from statistics import fmean
import pandas as pd
import torch
from tqdm import tqdm
import transformers
__lowerCamelCase = float('''nan''')
class A__ :
def __init__( self , UpperCamelCase__ ) -> int:
'''simple docstring'''
A_ = sys.stdout
A_ = open(UpperCamelCase__ , """a""" )
def __getattr__( self , UpperCamelCase__ ) -> Tuple:
'''simple docstring'''
return getattr(self.stdout , UpperCamelCase__ )
def snake_case_ ( self , UpperCamelCase__ ) -> Any:
'''simple docstring'''
self.stdout.write(UpperCamelCase__ )
# strip tqdm codes
self.file.write(re.sub(R"""^.*\r""" , """""" , UpperCamelCase__ , 0 , re.M ) )
def UpperCAmelCase__ ( UpperCAmelCase__=80, UpperCAmelCase__=False ) -> Union[str, Any]:
A_ = []
# deal with critical env vars
A_ = ["""CUDA_VISIBLE_DEVICES"""]
for key in env_keys:
A_ = os.environ.get(UpperCAmelCase__, UpperCAmelCase__ )
if val is not None:
cmd.append(F'''{key}={val}''' )
# python executable (not always needed if the script is executable)
A_ = sys.executable if full_python_path else sys.executable.split("""/""" )[-1]
cmd.append(UpperCAmelCase__ )
# now the normal args
cmd += list(map(shlex.quote, sys.argv ) )
# split up into up to MAX_WIDTH lines with shell multi-line escapes
A_ = []
A_ = """"""
while len(UpperCAmelCase__ ) > 0:
current_line += F'''{cmd.pop(0 )} '''
if len(UpperCAmelCase__ ) == 0 or len(UpperCAmelCase__ ) + len(cmd[0] ) + 1 > max_width - 1:
lines.append(UpperCAmelCase__ )
A_ = """"""
return "\\\n".join(UpperCAmelCase__ )
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> Any:
# unwrap multi-line input
A_ = re.sub(r"""[\\\n]+""", """ """, args.base_cmd )
# remove --output_dir if any and set our own
A_ = re.sub("""--output_dir\s+[^\s]+""", """""", args.base_cmd )
args.base_cmd += F''' --output_dir {output_dir}'''
# ensure we have --overwrite_output_dir
A_ = re.sub("""--overwrite_output_dir\s+""", """""", args.base_cmd )
args.base_cmd += " --overwrite_output_dir"
return [sys.executable] + shlex.split(args.base_cmd )
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) -> List[str]:
# Enable to debug everything but the run itself, to do it fast and see the progress.
# This is useful for debugging the output formatting quickly - we can remove it later once
# everybody is happy with the output
if 0:
import random
from time import sleep
sleep(0 )
return dict(
{k: random.uniform(0, 1_00 ) for k in metric_keys}, **{target_metric_key: random.choice([nan, 10.31, 100.2, 55.6_666, 222.22_222_222] )}, )
A_ = subprocess.run(UpperCAmelCase__, capture_output=UpperCAmelCase__, text=UpperCAmelCase__ )
if verbose:
print("""STDOUT""", result.stdout )
print("""STDERR""", result.stderr )
# save the streams
A_ = variation.replace(""" """, """-""" )
with open(Path(UpperCAmelCase__ ) / F'''log.{prefix}.stdout.txt''', """w""" ) as f:
f.write(result.stdout )
with open(Path(UpperCAmelCase__ ) / F'''log.{prefix}.stderr.txt''', """w""" ) as f:
f.write(result.stderr )
if result.returncode != 0:
if verbose:
print("""failed""" )
return {target_metric_key: nan}
with io.open(F'''{output_dir}/all_results.json''', """r""", encoding="""utf-8""" ) as f:
A_ = json.load(UpperCAmelCase__ )
# filter out just the keys we want
return {k: v for k, v in metrics.items() if k in metric_keys}
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, ) -> str:
A_ = []
A_ = []
A_ = F'''{id}: {variation:<{longest_variation_len}}'''
A_ = F'''{preamble}: '''
A_ = set(report_metric_keys + [target_metric_key] )
for i in tqdm(range(UpperCAmelCase__ ), desc=UpperCAmelCase__, leave=UpperCAmelCase__ ):
A_ = process_run_single(
UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
A_ = single_run_metrics[target_metric_key]
if not math.isnan(UpperCAmelCase__ ):
metrics.append(UpperCAmelCase__ )
results.append(UpperCAmelCase__ )
outcome += "✓"
else:
outcome += "✘"
A_ = F'''\33[2K\r{outcome}'''
if len(UpperCAmelCase__ ) > 0:
A_ = {k: fmean([x[k] for x in metrics] ) for k in metrics[0].keys()}
A_ = round(mean_metrics[target_metric_key], 2 )
A_ = F'''{outcome} {mean_target}'''
if len(UpperCAmelCase__ ) > 1:
results_str += F''' {tuple(round(UpperCAmelCase__, 2 ) for x in results )}'''
print(UpperCAmelCase__ )
A_ = variation
return mean_metrics
else:
print(UpperCAmelCase__ )
return {variation_key: variation, target_metric_key: nan}
def UpperCAmelCase__ ( ) -> Any:
A_ = torch.cuda.get_device_properties(torch.device("""cuda""" ) )
return F'''
Datetime : {datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S" )}
Software:
transformers: {transformers.__version__}
torch : {torch.__version__}
cuda : {torch.version.cuda}
python : {platform.python_version()}
Hardware:
{torch.cuda.device_count()} GPUs : {properties.name}, {properties.total_memory/2**30:0.2f}GB
'''
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) -> str:
A_ = pd.DataFrame(UpperCAmelCase__ )
A_ = """variation"""
A_ = """diff_%"""
A_ = nan
if base_variation is not None and len(df[df[variation_key] == base_variation] ):
# this may still return nan
A_ = df.loc[df[variation_key] == base_variation][target_metric_key].item()
if math.isnan(UpperCAmelCase__ ):
# as a fallback, use the minimal value as the sentinel
A_ = df.loc[df[target_metric_key] != nan][target_metric_key].min()
# create diff column if possible
if not math.isnan(UpperCAmelCase__ ):
A_ = df.apply(
lambda UpperCAmelCase__ : round(1_00 * (r[target_metric_key] - sentinel_value) / sentinel_value )
if not math.isnan(r[target_metric_key] )
else 0, axis="""columns""", )
# re-order columns
A_ = [variation_key, target_metric_key, diff_key, *report_metric_keys]
A_ = df.reindex(UpperCAmelCase__, axis="""columns""" ) # reorder cols
# capitalize
A_ = df.rename(str.capitalize, axis="""columns""" )
# make the cols as narrow as possible
A_ = df.rename(lambda UpperCAmelCase__ : c.replace("""_""", """<br>""" ), axis="""columns""" )
A_ = df.rename(lambda UpperCAmelCase__ : c.replace("""_""", """\n""" ), axis="""columns""" )
A_ = ["""""", """Copy between the cut-here-lines and paste as is to github or a forum"""]
report += ["----------8<-----------------8<--------"]
report += ["*** Results:", df_github.to_markdown(index=UpperCAmelCase__, floatfmt=""".2f""" )]
report += ["```"]
report += ["*** Setup:", get_versions()]
report += ["*** The benchmark command line was:", get_original_command()]
report += ["```"]
report += ["----------8<-----------------8<--------"]
report += ["*** Results (console):", df_console.to_markdown(index=UpperCAmelCase__, floatfmt=""".2f""" )]
print("""\n\n""".join(UpperCAmelCase__ ) )
def UpperCAmelCase__ ( ) -> Union[str, Any]:
A_ = argparse.ArgumentParser()
parser.add_argument(
"""--base-cmd""", default=UpperCAmelCase__, type=UpperCAmelCase__, required=UpperCAmelCase__, help="""Base cmd""", )
parser.add_argument(
"""--variations""", default=UpperCAmelCase__, type=UpperCAmelCase__, nargs="""+""", required=UpperCAmelCase__, help="""Multi-dimensional variations, example: '|--fp16|--bf16' '|--tf32'""", )
parser.add_argument(
"""--base-variation""", default=UpperCAmelCase__, type=UpperCAmelCase__, help="""Baseline variation to compare to. if None the minimal target value will be used to compare against""", )
parser.add_argument(
"""--target-metric-key""", default=UpperCAmelCase__, type=UpperCAmelCase__, required=UpperCAmelCase__, help="""Target metric key in output_dir/all_results.json, e.g., train_samples_per_second""", )
parser.add_argument(
"""--report-metric-keys""", default="""""", type=UpperCAmelCase__, help="""Report metric keys - other metric keys from output_dir/all_results.json to report, e.g., train_loss. Use a single argument e.g., 'train_loss train_samples""", )
parser.add_argument(
"""--repeat-times""", default=1, type=UpperCAmelCase__, help="""How many times to re-run each variation - an average will be reported""", )
parser.add_argument(
"""--output_dir""", default="""output_benchmark""", type=UpperCAmelCase__, help="""The output directory where all the benchmark reports will go to and additionally this directory will be used to override --output_dir in the script that is being benchmarked""", )
parser.add_argument(
"""--verbose""", default=UpperCAmelCase__, action="""store_true""", help="""Whether to show the outputs of each run or just the benchmark progress""", )
A_ = parser.parse_args()
A_ = args.output_dir
Path(UpperCAmelCase__ ).mkdir(exist_ok=UpperCAmelCase__ )
A_ = get_base_command(UpperCAmelCase__, UpperCAmelCase__ )
# split each dimension into its --foo variations
A_ = [list(map(str.strip, re.split(r"""\|""", UpperCAmelCase__ ) ) ) for x in args.variations]
# build a cartesian product of dimensions and convert those back into cmd-line arg strings,
# while stripping white space for inputs that were empty
A_ = list(map(str.strip, map(""" """.join, itertools.product(*UpperCAmelCase__ ) ) ) )
A_ = max(len(UpperCAmelCase__ ) for x in variations )
# split wanted keys
A_ = args.report_metric_keys.split()
# capture prints into a log file for convenience
A_ = F'''benchmark-report-{datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S" )}.txt'''
print(F'''\nNote: each run\'s output is also logged under {output_dir}/log.*.std*.txt''' )
print(F'''and this script\'s output is also piped into {report_fn}''' )
A_ = Tee(UpperCAmelCase__ )
print(F'''\n*** Running {len(UpperCAmelCase__ )} benchmarks:''' )
print(F'''Base command: {" ".join(UpperCAmelCase__ )}''' )
A_ = """variation"""
A_ = []
for id, variation in enumerate(tqdm(UpperCAmelCase__, desc="""Total completion: """, leave=UpperCAmelCase__ ) ):
A_ = base_cmd + variation.split()
results.append(
process_run(
id + 1, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, args.target_metric_key, UpperCAmelCase__, args.repeat_times, UpperCAmelCase__, args.verbose, ) )
process_results(UpperCAmelCase__, args.target_metric_key, UpperCAmelCase__, args.base_variation, UpperCAmelCase__ )
if __name__ == "__main__":
main()
| 288 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = {
'''Salesforce/codegen-350M-nl''': '''https://huggingface.co/Salesforce/codegen-350M-nl/resolve/main/config.json''',
'''Salesforce/codegen-350M-multi''': '''https://huggingface.co/Salesforce/codegen-350M-multi/resolve/main/config.json''',
'''Salesforce/codegen-350M-mono''': '''https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/config.json''',
'''Salesforce/codegen-2B-nl''': '''https://huggingface.co/Salesforce/codegen-2B-nl/resolve/main/config.json''',
'''Salesforce/codegen-2B-multi''': '''https://huggingface.co/Salesforce/codegen-2B-multi/resolve/main/config.json''',
'''Salesforce/codegen-2B-mono''': '''https://huggingface.co/Salesforce/codegen-2B-mono/resolve/main/config.json''',
'''Salesforce/codegen-6B-nl''': '''https://huggingface.co/Salesforce/codegen-6B-nl/resolve/main/config.json''',
'''Salesforce/codegen-6B-multi''': '''https://huggingface.co/Salesforce/codegen-6B-multi/resolve/main/config.json''',
'''Salesforce/codegen-6B-mono''': '''https://huggingface.co/Salesforce/codegen-6B-mono/resolve/main/config.json''',
'''Salesforce/codegen-16B-nl''': '''https://huggingface.co/Salesforce/codegen-16B-nl/resolve/main/config.json''',
'''Salesforce/codegen-16B-multi''': '''https://huggingface.co/Salesforce/codegen-16B-multi/resolve/main/config.json''',
'''Salesforce/codegen-16B-mono''': '''https://huggingface.co/Salesforce/codegen-16B-mono/resolve/main/config.json''',
}
class A__ ( _snake_case ):
lowercase = "codegen"
lowercase = {
"max_position_embeddings": "n_positions",
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self , UpperCamelCase__=50400 , UpperCamelCase__=2048 , UpperCamelCase__=2048 , UpperCamelCase__=4096 , UpperCamelCase__=28 , UpperCamelCase__=16 , UpperCamelCase__=64 , UpperCamelCase__=None , UpperCamelCase__="gelu_new" , UpperCamelCase__=0.0 , UpperCamelCase__=0.0 , UpperCamelCase__=0.0 , UpperCamelCase__=1e-5 , UpperCamelCase__=0.02 , UpperCamelCase__=True , UpperCamelCase__=50256 , UpperCamelCase__=50256 , UpperCamelCase__=False , **UpperCamelCase__ , ) -> Tuple:
'''simple docstring'''
A_ = vocab_size
A_ = n_ctx
A_ = n_positions
A_ = n_embd
A_ = n_layer
A_ = n_head
A_ = n_inner
A_ = rotary_dim
A_ = activation_function
A_ = resid_pdrop
A_ = embd_pdrop
A_ = attn_pdrop
A_ = layer_norm_epsilon
A_ = initializer_range
A_ = use_cache
A_ = bos_token_id
A_ = eos_token_id
super().__init__(
bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , tie_word_embeddings=UpperCamelCase__ , **UpperCamelCase__ )
class A__ ( _snake_case ):
def __init__( self , UpperCamelCase__ , UpperCamelCase__ = "default" , UpperCamelCase__ = None , UpperCamelCase__ = False , ) -> Tuple:
'''simple docstring'''
super().__init__(UpperCamelCase__ , task=UpperCamelCase__ , patching_specs=UpperCamelCase__ , use_past=UpperCamelCase__ )
if not getattr(self._config , """pad_token_id""" , UpperCamelCase__ ):
# TODO: how to do that better?
A_ = 0
@property
def snake_case_ ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
A_ = OrderedDict({"""input_ids""": {0: """batch""", 1: """sequence"""}} )
if self.use_past:
self.fill_with_past_key_values_(UpperCamelCase__ , direction="""inputs""" )
A_ = {0: """batch""", 1: """past_sequence + sequence"""}
else:
A_ = {0: """batch""", 1: """sequence"""}
return common_inputs
@property
def snake_case_ ( self ) -> int:
'''simple docstring'''
return self._config.n_layer
@property
def snake_case_ ( self ) -> int:
'''simple docstring'''
return self._config.n_head
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ = -1 , UpperCamelCase__ = -1 , UpperCamelCase__ = False , UpperCamelCase__ = None , ) -> Mapping[str, Any]:
'''simple docstring'''
A_ = super(UpperCamelCase__ , self ).generate_dummy_inputs(
UpperCamelCase__ , batch_size=UpperCamelCase__ , seq_length=UpperCamelCase__ , is_pair=UpperCamelCase__ , framework=UpperCamelCase__ )
# We need to order the input in the way they appears in the forward()
A_ = OrderedDict({"""input_ids""": common_inputs["""input_ids"""]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
A_ , A_ = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
A_ = seqlen + 2
A_ = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
A_ = [
(torch.zeros(UpperCamelCase__ ), torch.zeros(UpperCamelCase__ )) for _ in range(self.num_layers )
]
A_ = common_inputs["""attention_mask"""]
if self.use_past:
A_ = ordered_inputs["""attention_mask"""].dtype
A_ = torch.cat(
[ordered_inputs["""attention_mask"""], torch.ones(UpperCamelCase__ , UpperCamelCase__ , dtype=UpperCamelCase__ )] , dim=1 )
return ordered_inputs
@property
def snake_case_ ( self ) -> int:
'''simple docstring'''
return 13
| 288 | 1 |
import os
from glob import glob
import imageio
import torch
import torchvision
import wandb
from img_processing import custom_to_pil, loop_post_process, preprocess, preprocess_vqgan
from loaders import load_vqgan
from PIL import Image
from torch import nn
from transformers import CLIPModel, CLIPTokenizerFast
from utils import get_device, get_timestamp, show_pil
class UpperCamelCase :
def __init__(self , __UpperCamelCase = "cpu" , __UpperCamelCase = "openai/clip-vit-large-patch14" ) -> None:
UpperCamelCase_ : Tuple = device
UpperCamelCase_ : List[Any] = CLIPTokenizerFast.from_pretrained(__UpperCamelCase )
UpperCamelCase_ : Dict = [0.48_145_466, 0.4_578_275, 0.40_821_073]
UpperCamelCase_ : Optional[Any] = [0.26_862_954, 0.26_130_258, 0.27_577_711]
UpperCamelCase_ : Optional[Any] = torchvision.transforms.Normalize(self.image_mean , self.image_std )
UpperCamelCase_ : Union[str, Any] = torchvision.transforms.Resize(224 )
UpperCamelCase_ : Tuple = torchvision.transforms.CenterCrop(224 )
def A_ (self , __UpperCamelCase ) -> Union[str, Any]:
UpperCamelCase_ : Optional[int] = self.resize(__UpperCamelCase )
UpperCamelCase_ : Tuple = self.center_crop(__UpperCamelCase )
UpperCamelCase_ : Optional[int] = self.normalize(__UpperCamelCase )
return images
def __call__(self , __UpperCamelCase=None , __UpperCamelCase=None , **__UpperCamelCase ) -> List[Any]:
UpperCamelCase_ : Optional[int] = self.tokenizer(text=__UpperCamelCase , **__UpperCamelCase )
UpperCamelCase_ : List[str] = self.preprocess_img(__UpperCamelCase )
UpperCamelCase_ : Any = {key: value.to(self.device ) for (key, value) in encoding.items()}
return encoding
class UpperCamelCase ( nn.Module ):
def __init__(self , __UpperCamelCase=10 , __UpperCamelCase=0.01 , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=False , __UpperCamelCase=True , __UpperCamelCase="image" , __UpperCamelCase=True , __UpperCamelCase=False , __UpperCamelCase=False , __UpperCamelCase=False , ) -> None:
super().__init__()
UpperCamelCase_ : int = None
UpperCamelCase_ : Any = device if device else get_device()
if vqgan:
UpperCamelCase_ : int = vqgan
else:
UpperCamelCase_ : int = load_vqgan(self.device , conf_path=__UpperCamelCase , ckpt_path=__UpperCamelCase )
self.vqgan.eval()
if clip:
UpperCamelCase_ : Optional[Any] = clip
else:
UpperCamelCase_ : Optional[Any] = CLIPModel.from_pretrained("""openai/clip-vit-base-patch32""" )
self.clip.to(self.device )
UpperCamelCase_ : Optional[Any] = ProcessorGradientFlow(device=self.device )
UpperCamelCase_ : int = iterations
UpperCamelCase_ : str = lr
UpperCamelCase_ : List[str] = log
UpperCamelCase_ : Optional[int] = make_grid
UpperCamelCase_ : Union[str, Any] = return_val
UpperCamelCase_ : str = quantize
UpperCamelCase_ : List[str] = self.vqgan.decoder.z_shape
def A_ (self , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=5 , __UpperCamelCase=True ) -> str:
UpperCamelCase_ : List[Any] = []
if output_path is None:
UpperCamelCase_ : Any = """./animation.gif"""
if input_path is None:
UpperCamelCase_ : Optional[int] = self.save_path
UpperCamelCase_ : Tuple = sorted(glob(input_path + """/*""" ) )
if not len(__UpperCamelCase ):
raise ValueError(
"""No images found in save path, aborting (did you pass save_intermediate=True to the generate"""
""" function?)""" )
if len(__UpperCamelCase ) == 1:
print("""Only one image found in save path, (did you pass save_intermediate=True to the generate function?)""" )
UpperCamelCase_ : int = total_duration / len(__UpperCamelCase )
UpperCamelCase_ : List[Any] = [frame_duration] * len(__UpperCamelCase )
if extend_frames:
UpperCamelCase_ : Tuple = 1.5
UpperCamelCase_ : Union[str, Any] = 3
for file_name in paths:
if file_name.endswith(""".png""" ):
images.append(imageio.imread(__UpperCamelCase ) )
imageio.mimsave(__UpperCamelCase , __UpperCamelCase , duration=__UpperCamelCase )
print(f'''gif saved to {output_path}''' )
def A_ (self , __UpperCamelCase=None , __UpperCamelCase=None ) -> Tuple:
if not (path or img):
raise ValueError("""Input either path or tensor""" )
if img is not None:
raise NotImplementedError
UpperCamelCase_ : Optional[int] = preprocess(Image.open(__UpperCamelCase ) , target_image_size=256 ).to(self.device )
UpperCamelCase_ : Tuple = preprocess_vqgan(__UpperCamelCase )
UpperCamelCase_,*UpperCamelCase_ : Optional[Any] = self.vqgan.encode(__UpperCamelCase )
return z
def A_ (self , __UpperCamelCase ) -> List[str]:
UpperCamelCase_ : Dict = self.latent.detach().requires_grad_()
UpperCamelCase_ : List[str] = base_latent + transform_vector
if self.quantize:
UpperCamelCase_,*UpperCamelCase_ : Dict = self.vqgan.quantize(__UpperCamelCase )
else:
UpperCamelCase_ : Tuple = trans_latent
return self.vqgan.decode(__UpperCamelCase )
def A_ (self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=None ) -> Dict:
UpperCamelCase_ : str = self.clip_preprocessor(text=__UpperCamelCase , images=__UpperCamelCase , return_tensors="""pt""" , padding=__UpperCamelCase )
UpperCamelCase_ : Any = self.clip(**__UpperCamelCase )
UpperCamelCase_ : Tuple = clip_outputs.logits_per_image
if weights is not None:
UpperCamelCase_ : Optional[int] = similarity_logits * weights
return similarity_logits.sum()
def A_ (self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Dict:
UpperCamelCase_ : Optional[Any] = self._get_clip_similarity(pos_prompts["""prompts"""] , __UpperCamelCase , weights=(1 / pos_prompts["""weights"""]) )
if neg_prompts:
UpperCamelCase_ : List[str] = self._get_clip_similarity(neg_prompts["""prompts"""] , __UpperCamelCase , weights=neg_prompts["""weights"""] )
else:
UpperCamelCase_ : List[str] = torch.tensor([1] , device=self.device )
UpperCamelCase_ : Optional[int] = -torch.log(__UpperCamelCase ) + torch.log(__UpperCamelCase )
return loss
def A_ (self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> List[str]:
UpperCamelCase_ : List[Any] = torch.randn_like(self.latent , requires_grad=__UpperCamelCase , device=self.device )
UpperCamelCase_ : Dict = torch.optim.Adam([vector] , lr=self.lr )
for i in range(self.iterations ):
optim.zero_grad()
UpperCamelCase_ : List[Any] = self._add_vector(__UpperCamelCase )
UpperCamelCase_ : Optional[Any] = loop_post_process(__UpperCamelCase )
UpperCamelCase_ : Dict = self._get_CLIP_loss(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
print("""CLIP loss""" , __UpperCamelCase )
if self.log:
wandb.log({"""CLIP Loss""": clip_loss} )
clip_loss.backward(retain_graph=__UpperCamelCase )
optim.step()
if self.return_val == "image":
yield custom_to_pil(transformed_img[0] )
else:
yield vector
def A_ (self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Optional[Any]:
wandb.init(reinit=__UpperCamelCase , project="""face-editor""" )
wandb.config.update({"""Positive Prompts""": positive_prompts} )
wandb.config.update({"""Negative Prompts""": negative_prompts} )
wandb.config.update({"""lr""": self.lr, """iterations""": self.iterations} )
if image_path:
UpperCamelCase_ : Union[str, Any] = Image.open(__UpperCamelCase )
UpperCamelCase_ : Union[str, Any] = image.resize((256, 256) )
wandb.log("""Original Image""" , wandb.Image(__UpperCamelCase ) )
def A_ (self , __UpperCamelCase ) -> List[Any]:
if not prompts:
return []
UpperCamelCase_ : Optional[Any] = []
UpperCamelCase_ : int = []
if isinstance(__UpperCamelCase , __UpperCamelCase ):
UpperCamelCase_ : Optional[int] = [prompt.strip() for prompt in prompts.split("""|""" )]
for prompt in prompts:
if isinstance(__UpperCamelCase , (tuple, list) ):
UpperCamelCase_ : Any = prompt[0]
UpperCamelCase_ : List[str] = float(prompt[1] )
elif ":" in prompt:
UpperCamelCase_,UpperCamelCase_ : int = prompt.split(""":""" )
UpperCamelCase_ : List[str] = float(__UpperCamelCase )
else:
UpperCamelCase_ : Dict = prompt
UpperCamelCase_ : Dict = 1.0
processed_prompts.append(__UpperCamelCase )
weights.append(__UpperCamelCase )
return {
"prompts": processed_prompts,
"weights": torch.tensor(__UpperCamelCase , device=self.device ),
}
def A_ (self , __UpperCamelCase , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=True , __UpperCamelCase=False , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=None , ) -> Dict:
if image_path:
UpperCamelCase_ : Union[str, Any] = self._get_latent(__UpperCamelCase )
else:
UpperCamelCase_ : Optional[Any] = torch.randn(self.latent_dim , device=self.device )
if self.log:
self._init_logging(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
assert pos_prompts, "You must provide at least one positive prompt."
UpperCamelCase_ : Union[str, Any] = self.process_prompts(__UpperCamelCase )
UpperCamelCase_ : Optional[int] = self.process_prompts(__UpperCamelCase )
if save_final and save_path is None:
UpperCamelCase_ : Optional[int] = os.path.join("""./outputs/""" , """_""".join(pos_prompts["""prompts"""] ) )
if not os.path.exists(__UpperCamelCase ):
os.makedirs(__UpperCamelCase )
else:
UpperCamelCase_ : List[str] = save_path + """_""" + get_timestamp()
os.makedirs(__UpperCamelCase )
UpperCamelCase_ : List[str] = save_path
UpperCamelCase_ : Optional[int] = self.vqgan.decode(self.latent )[0]
if show_intermediate:
print("""Original Image""" )
show_pil(custom_to_pil(__UpperCamelCase ) )
UpperCamelCase_ : Optional[int] = loop_post_process(__UpperCamelCase )
for iter, transformed_img in enumerate(self._optimize_CLIP(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) ):
if show_intermediate:
show_pil(__UpperCamelCase )
if save_intermediate:
transformed_img.save(os.path.join(self.save_path , f'''iter_{iter:03d}.png''' ) )
if self.log:
wandb.log({"""Image""": wandb.Image(__UpperCamelCase )} )
if show_final:
show_pil(__UpperCamelCase )
if save_final:
transformed_img.save(os.path.join(self.save_path , f'''iter_{iter:03d}_final.png''' ) )
| 138 | import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import DiffusionPipeline
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import logging
SCREAMING_SNAKE_CASE : int = logging.get_logger(__name__) # pylint: disable=invalid-name
class UpperCamelCase ( __a ):
def __init__(self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , ) -> Optional[int]:
super().__init__()
self.register_modules(
vae=__UpperCamelCase , text_encoder=__UpperCamelCase , tokenizer=__UpperCamelCase , unet=__UpperCamelCase , scheduler=__UpperCamelCase , safety_checker=__UpperCamelCase , feature_extractor=__UpperCamelCase , )
def A_ (self , __UpperCamelCase = "auto" ) -> int:
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
UpperCamelCase_ : Union[str, Any] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(__UpperCamelCase )
def A_ (self ) -> Union[str, Any]:
self.enable_attention_slicing(__UpperCamelCase )
@torch.no_grad()
def __call__(self , __UpperCamelCase , __UpperCamelCase = 512 , __UpperCamelCase = 512 , __UpperCamelCase = 50 , __UpperCamelCase = 7.5 , __UpperCamelCase = None , __UpperCamelCase = 1 , __UpperCamelCase = 0.0 , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = "pil" , __UpperCamelCase = True , __UpperCamelCase = None , __UpperCamelCase = 1 , __UpperCamelCase = None , **__UpperCamelCase , ) -> List[Any]:
if isinstance(__UpperCamelCase , __UpperCamelCase ):
UpperCamelCase_ : Tuple = 1
elif isinstance(__UpperCamelCase , __UpperCamelCase ):
UpperCamelCase_ : Optional[int] = len(__UpperCamelCase )
else:
raise ValueError(f'''`prompt` has to be of type `str` or `list` but is {type(__UpperCamelCase )}''' )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f'''`height` and `width` have to be divisible by 8 but are {height} and {width}.''' )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(__UpperCamelCase , __UpperCamelCase ) or callback_steps <= 0)
):
raise ValueError(
f'''`callback_steps` has to be a positive integer but is {callback_steps} of type'''
f''' {type(__UpperCamelCase )}.''' )
# get prompt text embeddings
UpperCamelCase_ : Tuple = self.tokenizer(
__UpperCamelCase , padding="""max_length""" , max_length=self.tokenizer.model_max_length , return_tensors="""pt""" , )
UpperCamelCase_ : Tuple = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
UpperCamelCase_ : Dict = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"""The following part of your input was truncated because CLIP can only handle sequences up to"""
f''' {self.tokenizer.model_max_length} tokens: {removed_text}''' )
UpperCamelCase_ : Optional[Any] = text_input_ids[:, : self.tokenizer.model_max_length]
if text_embeddings is None:
UpperCamelCase_ : Tuple = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
UpperCamelCase_,UpperCamelCase_,UpperCamelCase_ : int = text_embeddings.shape
UpperCamelCase_ : Union[str, Any] = text_embeddings.repeat(1 , __UpperCamelCase , 1 )
UpperCamelCase_ : Optional[int] = text_embeddings.view(bs_embed * num_images_per_prompt , __UpperCamelCase , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
UpperCamelCase_ : Optional[Any] = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
UpperCamelCase_ : List[str]
if negative_prompt is None:
UpperCamelCase_ : Union[str, Any] = [""""""]
elif type(__UpperCamelCase ) is not type(__UpperCamelCase ):
raise TypeError(
f'''`negative_prompt` should be the same type to `prompt`, but got {type(__UpperCamelCase )} !='''
f''' {type(__UpperCamelCase )}.''' )
elif isinstance(__UpperCamelCase , __UpperCamelCase ):
UpperCamelCase_ : List[Any] = [negative_prompt]
elif batch_size != len(__UpperCamelCase ):
raise ValueError(
f'''`negative_prompt`: {negative_prompt} has batch size {len(__UpperCamelCase )}, but `prompt`:'''
f''' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches'''
""" the batch size of `prompt`.""" )
else:
UpperCamelCase_ : Dict = negative_prompt
UpperCamelCase_ : Optional[Any] = text_input_ids.shape[-1]
UpperCamelCase_ : List[str] = self.tokenizer(
__UpperCamelCase , padding="""max_length""" , max_length=__UpperCamelCase , truncation=__UpperCamelCase , return_tensors="""pt""" , )
UpperCamelCase_ : Union[str, Any] = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
UpperCamelCase_ : int = uncond_embeddings.shape[1]
UpperCamelCase_ : List[str] = uncond_embeddings.repeat(__UpperCamelCase , __UpperCamelCase , 1 )
UpperCamelCase_ : List[Any] = uncond_embeddings.view(batch_size * num_images_per_prompt , __UpperCamelCase , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
UpperCamelCase_ : Optional[int] = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
UpperCamelCase_ : Optional[int] = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
UpperCamelCase_ : str = (batch_size * num_images_per_prompt, self.unet.config.in_channels, 64, 64)
UpperCamelCase_ : int = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
UpperCamelCase_ : int = torch.randn(
__UpperCamelCase , generator=__UpperCamelCase , device="""cpu""" , dtype=__UpperCamelCase ).to(self.device )
UpperCamelCase_ : Tuple = torch.randn(__UpperCamelCase , generator=__UpperCamelCase , device="""cpu""" , dtype=__UpperCamelCase ).to(
self.device )
else:
UpperCamelCase_ : Optional[Any] = torch.randn(
__UpperCamelCase , generator=__UpperCamelCase , device=self.device , dtype=__UpperCamelCase )
UpperCamelCase_ : Optional[Any] = torch.randn(__UpperCamelCase , generator=__UpperCamelCase , device=self.device , dtype=__UpperCamelCase )
else:
if latents_reference.shape != latents_shape:
raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' )
UpperCamelCase_ : Union[str, Any] = latents_reference.to(self.device )
UpperCamelCase_ : Any = latents.to(self.device )
# This is the key part of the pipeline where we
# try to ensure that the generated images w/ the same seed
# but different sizes actually result in similar images
UpperCamelCase_ : int = (latents_shape[3] - latents_shape_reference[3]) // 2
UpperCamelCase_ : Union[str, Any] = (latents_shape[2] - latents_shape_reference[2]) // 2
UpperCamelCase_ : List[Any] = latents_shape_reference[3] if dx >= 0 else latents_shape_reference[3] + 2 * dx
UpperCamelCase_ : List[str] = latents_shape_reference[2] if dy >= 0 else latents_shape_reference[2] + 2 * dy
UpperCamelCase_ : str = 0 if dx < 0 else dx
UpperCamelCase_ : List[str] = 0 if dy < 0 else dy
UpperCamelCase_ : Dict = max(-dx , 0 )
UpperCamelCase_ : str = max(-dy , 0 )
# import pdb
# pdb.set_trace()
UpperCamelCase_ : str = latents_reference[:, :, dy : dy + h, dx : dx + w]
# set timesteps
self.scheduler.set_timesteps(__UpperCamelCase )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
UpperCamelCase_ : Any = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
UpperCamelCase_ : str = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
UpperCamelCase_ : str = """eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
UpperCamelCase_ : Any = {}
if accepts_eta:
UpperCamelCase_ : Union[str, Any] = eta
for i, t in enumerate(self.progress_bar(__UpperCamelCase ) ):
# expand the latents if we are doing classifier free guidance
UpperCamelCase_ : int = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
UpperCamelCase_ : str = self.scheduler.scale_model_input(__UpperCamelCase , __UpperCamelCase )
# predict the noise residual
UpperCamelCase_ : str = self.unet(__UpperCamelCase , __UpperCamelCase , encoder_hidden_states=__UpperCamelCase ).sample
# perform guidance
if do_classifier_free_guidance:
UpperCamelCase_,UpperCamelCase_ : Any = noise_pred.chunk(2 )
UpperCamelCase_ : Optional[int] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
UpperCamelCase_ : int = self.scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
UpperCamelCase_ : Optional[int] = 1 / 0.18_215 * latents
UpperCamelCase_ : List[str] = self.vae.decode(__UpperCamelCase ).sample
UpperCamelCase_ : List[str] = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
UpperCamelCase_ : List[str] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if self.safety_checker is not None:
UpperCamelCase_ : List[Any] = self.feature_extractor(self.numpy_to_pil(__UpperCamelCase ) , return_tensors="""pt""" ).to(
self.device )
UpperCamelCase_,UpperCamelCase_ : Optional[Any] = self.safety_checker(
images=__UpperCamelCase , clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype ) )
else:
UpperCamelCase_ : Tuple = None
if output_type == "pil":
UpperCamelCase_ : Any = self.numpy_to_pil(__UpperCamelCase )
if not return_dict:
return (image, has_nsfw_concept)
return StableDiffusionPipelineOutput(images=__UpperCamelCase , nsfw_content_detected=__UpperCamelCase )
| 138 | 1 |
'''simple docstring'''
from math import isqrt
def _lowercase ( lowerCamelCase__ ) -> bool:
"""simple docstring"""
return all(number % divisor != 0 for divisor in range(2 , isqrt(lowerCamelCase__ ) + 1 ) )
def _lowercase ( lowerCamelCase__ = 10**6 ) -> int:
"""simple docstring"""
__UpperCAmelCase : str = 0
__UpperCAmelCase : List[Any] = 1
__UpperCAmelCase : str = 7
while prime_candidate < max_prime:
primes_count += is_prime(lowerCamelCase__ )
cube_index += 1
prime_candidate += 6 * cube_index
return primes_count
if __name__ == "__main__":
print(f"""{solution() = }""")
| 168 | '''simple docstring'''
import inspect
import tempfile
from collections import OrderedDict, UserDict
from collections.abc import MutableMapping
from contextlib import ExitStack, contextmanager
from dataclasses import fields
from enum import Enum
from typing import Any, ContextManager, List, Tuple
import numpy as np
from .import_utils import is_flax_available, is_tf_available, is_torch_available, is_torch_fx_proxy
if is_flax_available():
import jax.numpy as jnp
class __A (__magic_name__ ):
def __get__( self , UpperCamelCase_ , UpperCamelCase_=None ):
# See docs.python.org/3/howto/descriptor.html#properties
if obj is None:
return self
if self.fget is None:
raise AttributeError("unreadable attribute" )
__UpperCAmelCase : List[str] = "__cached_" + self.fget.__name__
__UpperCAmelCase : Optional[int] = getattr(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
if cached is None:
__UpperCAmelCase : List[str] = self.fget(UpperCamelCase_ )
setattr(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
return cached
def _lowercase ( lowerCamelCase__ ) -> List[str]:
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = val.lower()
if val in {"y", "yes", "t", "true", "on", "1"}:
return 1
if val in {"n", "no", "f", "false", "off", "0"}:
return 0
raise ValueError(f"""invalid truth value {val!r}""" )
def _lowercase ( lowerCamelCase__ ) -> Any:
"""simple docstring"""
if is_torch_fx_proxy(lowerCamelCase__ ):
return True
if is_torch_available():
import torch
if isinstance(lowerCamelCase__ , torch.Tensor ):
return True
if is_tf_available():
import tensorflow as tf
if isinstance(lowerCamelCase__ , tf.Tensor ):
return True
if is_flax_available():
import jax.numpy as jnp
from jax.core import Tracer
if isinstance(lowerCamelCase__ , (jnp.ndarray, Tracer) ):
return True
return isinstance(lowerCamelCase__ , np.ndarray )
def _lowercase ( lowerCamelCase__ ) -> List[str]:
"""simple docstring"""
return isinstance(lowerCamelCase__ , np.ndarray )
def _lowercase ( lowerCamelCase__ ) -> Tuple:
"""simple docstring"""
return _is_numpy(lowerCamelCase__ )
def _lowercase ( lowerCamelCase__ ) -> Tuple:
"""simple docstring"""
import torch
return isinstance(lowerCamelCase__ , torch.Tensor )
def _lowercase ( lowerCamelCase__ ) -> Optional[Any]:
"""simple docstring"""
return False if not is_torch_available() else _is_torch(lowerCamelCase__ )
def _lowercase ( lowerCamelCase__ ) -> Tuple:
"""simple docstring"""
import torch
return isinstance(lowerCamelCase__ , torch.device )
def _lowercase ( lowerCamelCase__ ) -> int:
"""simple docstring"""
return False if not is_torch_available() else _is_torch_device(lowerCamelCase__ )
def _lowercase ( lowerCamelCase__ ) -> List[Any]:
"""simple docstring"""
import torch
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
if hasattr(lowerCamelCase__ , lowerCamelCase__ ):
__UpperCAmelCase : Dict = getattr(lowerCamelCase__ , lowerCamelCase__ )
else:
return False
return isinstance(lowerCamelCase__ , torch.dtype )
def _lowercase ( lowerCamelCase__ ) -> List[Any]:
"""simple docstring"""
return False if not is_torch_available() else _is_torch_dtype(lowerCamelCase__ )
def _lowercase ( lowerCamelCase__ ) -> Optional[int]:
"""simple docstring"""
import tensorflow as tf
return isinstance(lowerCamelCase__ , tf.Tensor )
def _lowercase ( lowerCamelCase__ ) -> Union[str, Any]:
"""simple docstring"""
return False if not is_tf_available() else _is_tensorflow(lowerCamelCase__ )
def _lowercase ( lowerCamelCase__ ) -> Optional[int]:
"""simple docstring"""
import tensorflow as tf
# the `is_symbolic_tensor` predicate is only available starting with TF 2.14
if hasattr(lowerCamelCase__ , "is_symbolic_tensor" ):
return tf.is_symbolic_tensor(lowerCamelCase__ )
return type(lowerCamelCase__ ) == tf.Tensor
def _lowercase ( lowerCamelCase__ ) -> Tuple:
"""simple docstring"""
return False if not is_tf_available() else _is_tf_symbolic_tensor(lowerCamelCase__ )
def _lowercase ( lowerCamelCase__ ) -> List[str]:
"""simple docstring"""
import jax.numpy as jnp # noqa: F811
return isinstance(lowerCamelCase__ , jnp.ndarray )
def _lowercase ( lowerCamelCase__ ) -> Dict:
"""simple docstring"""
return False if not is_flax_available() else _is_jax(lowerCamelCase__ )
def _lowercase ( lowerCamelCase__ ) -> Optional[Any]:
"""simple docstring"""
if isinstance(lowerCamelCase__ , (dict, UserDict) ):
return {k: to_py_obj(lowerCamelCase__ ) for k, v in obj.items()}
elif isinstance(lowerCamelCase__ , (list, tuple) ):
return [to_py_obj(lowerCamelCase__ ) for o in obj]
elif is_tf_tensor(lowerCamelCase__ ):
return obj.numpy().tolist()
elif is_torch_tensor(lowerCamelCase__ ):
return obj.detach().cpu().tolist()
elif is_jax_tensor(lowerCamelCase__ ):
return np.asarray(lowerCamelCase__ ).tolist()
elif isinstance(lowerCamelCase__ , (np.ndarray, np.number) ): # tolist also works on 0d np arrays
return obj.tolist()
else:
return obj
def _lowercase ( lowerCamelCase__ ) -> str:
"""simple docstring"""
if isinstance(lowerCamelCase__ , (dict, UserDict) ):
return {k: to_numpy(lowerCamelCase__ ) for k, v in obj.items()}
elif isinstance(lowerCamelCase__ , (list, tuple) ):
return np.array(lowerCamelCase__ )
elif is_tf_tensor(lowerCamelCase__ ):
return obj.numpy()
elif is_torch_tensor(lowerCamelCase__ ):
return obj.detach().cpu().numpy()
elif is_jax_tensor(lowerCamelCase__ ):
return np.asarray(lowerCamelCase__ )
else:
return obj
class __A (__magic_name__ ):
def _snake_case ( self ):
__UpperCAmelCase : Any = fields(self )
# Safety and consistency checks
if not len(UpperCamelCase_ ):
raise ValueError(f"""{self.__class__.__name__} has no fields.""" )
if not all(field.default is None for field in class_fields[1:] ):
raise ValueError(f"""{self.__class__.__name__} should not have more than one required field.""" )
__UpperCAmelCase : Dict = getattr(self , class_fields[0].name )
__UpperCAmelCase : Union[str, Any] = all(getattr(self , field.name ) is None for field in class_fields[1:] )
if other_fields_are_none and not is_tensor(UpperCamelCase_ ):
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
__UpperCAmelCase : str = first_field.items()
__UpperCAmelCase : Union[str, Any] = True
else:
try:
__UpperCAmelCase : Optional[int] = iter(UpperCamelCase_ )
__UpperCAmelCase : Dict = True
except TypeError:
__UpperCAmelCase : Union[str, Any] = False
# if we provided an iterator as first field and the iterator is a (key, value) iterator
# set the associated fields
if first_field_iterator:
for idx, element in enumerate(UpperCamelCase_ ):
if (
not isinstance(UpperCamelCase_ , (list, tuple) )
or not len(UpperCamelCase_ ) == 2
or not isinstance(element[0] , UpperCamelCase_ )
):
if idx == 0:
# If we do not have an iterator of key/values, set it as attribute
__UpperCAmelCase : Union[str, Any] = first_field
else:
# If we have a mixed iterator, raise an error
raise ValueError(
f"""Cannot set key/value for {element}. It needs to be a tuple (key, value).""" )
break
setattr(self , element[0] , element[1] )
if element[1] is not None:
__UpperCAmelCase : List[str] = element[1]
elif first_field is not None:
__UpperCAmelCase : Optional[int] = first_field
else:
for field in class_fields:
__UpperCAmelCase : Any = getattr(self , field.name )
if v is not None:
__UpperCAmelCase : Union[str, Any] = v
def __delitem__( self , *UpperCamelCase_ , **UpperCamelCase_ ):
raise Exception(f"""You cannot use ``__delitem__`` on a {self.__class__.__name__} instance.""" )
def _snake_case ( self , *UpperCamelCase_ , **UpperCamelCase_ ):
raise Exception(f"""You cannot use ``setdefault`` on a {self.__class__.__name__} instance.""" )
def _snake_case ( self , *UpperCamelCase_ , **UpperCamelCase_ ):
raise Exception(f"""You cannot use ``pop`` on a {self.__class__.__name__} instance.""" )
def _snake_case ( self , *UpperCamelCase_ , **UpperCamelCase_ ):
raise Exception(f"""You cannot use ``update`` on a {self.__class__.__name__} instance.""" )
def __getitem__( self , UpperCamelCase_ ):
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
__UpperCAmelCase : List[str] = dict(self.items() )
return inner_dict[k]
else:
return self.to_tuple()[k]
def __setattr__( self , UpperCamelCase_ , UpperCamelCase_ ):
if name in self.keys() and value is not None:
# Don't call self.__setitem__ to avoid recursion errors
super().__setitem__(UpperCamelCase_ , UpperCamelCase_ )
super().__setattr__(UpperCamelCase_ , UpperCamelCase_ )
def __setitem__( self , UpperCamelCase_ , UpperCamelCase_ ):
# Will raise a KeyException if needed
super().__setitem__(UpperCamelCase_ , UpperCamelCase_ )
# Don't call self.__setattr__ to avoid recursion errors
super().__setattr__(UpperCamelCase_ , UpperCamelCase_ )
def _snake_case ( self ):
return tuple(self[k] for k in self.keys() )
class __A (__magic_name__ , __magic_name__ ):
@classmethod
def _snake_case ( cls , UpperCamelCase_ ):
raise ValueError(
f"""{value} is not a valid {cls.__name__}, please select one of {list(cls._valueamember_map_.keys() )}""" )
class __A (__magic_name__ ):
snake_case :Dict = "longest"
snake_case :Dict = "max_length"
snake_case :Union[str, Any] = "do_not_pad"
class __A (__magic_name__ ):
snake_case :Union[str, Any] = "pt"
snake_case :List[str] = "tf"
snake_case :Any = "np"
snake_case :Union[str, Any] = "jax"
class __A :
def __init__( self , UpperCamelCase_ ):
__UpperCAmelCase : Dict = context_managers
__UpperCAmelCase : str = ExitStack()
def __enter__( self ):
for context_manager in self.context_managers:
self.stack.enter_context(UpperCamelCase_ )
def __exit__( self , *UpperCamelCase_ , **UpperCamelCase_ ):
self.stack.__exit__(*UpperCamelCase_ , **UpperCamelCase_ )
def _lowercase ( lowerCamelCase__ ) -> Any:
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = infer_framework(lowerCamelCase__ )
if framework == "tf":
__UpperCAmelCase : Any = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
__UpperCAmelCase : List[str] = inspect.signature(model_class.forward ) # PyTorch models
else:
__UpperCAmelCase : List[str] = inspect.signature(model_class.__call__ ) # Flax models
for p in signature.parameters:
if p == "return_loss" and signature.parameters[p].default is True:
return True
return False
def _lowercase ( lowerCamelCase__ ) -> Any:
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = model_class.__name__
__UpperCAmelCase : List[str] = infer_framework(lowerCamelCase__ )
if framework == "tf":
__UpperCAmelCase : Optional[Any] = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
__UpperCAmelCase : Tuple = inspect.signature(model_class.forward ) # PyTorch models
else:
__UpperCAmelCase : List[Any] = inspect.signature(model_class.__call__ ) # Flax models
if "QuestionAnswering" in model_name:
return [p for p in signature.parameters if "label" in p or p in ("start_positions", "end_positions")]
else:
return [p for p in signature.parameters if "label" in p]
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ = "" , lowerCamelCase__ = "." ) -> Optional[Any]:
"""simple docstring"""
def _flatten_dict(lowerCamelCase__ , lowerCamelCase__="" , lowerCamelCase__="." ):
for k, v in d.items():
__UpperCAmelCase : Union[str, Any] = str(lowerCamelCase__ ) + delimiter + str(lowerCamelCase__ ) if parent_key else k
if v and isinstance(lowerCamelCase__ , lowerCamelCase__ ):
yield from flatten_dict(lowerCamelCase__ , lowerCamelCase__ , delimiter=lowerCamelCase__ ).items()
else:
yield key, v
return dict(_flatten_dict(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) )
@contextmanager
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ = False ) -> Union[str, Any]:
"""simple docstring"""
if use_temp_dir:
with tempfile.TemporaryDirectory() as tmp_dir:
yield tmp_dir
else:
yield working_dir
def _lowercase ( lowerCamelCase__ , lowerCamelCase__=None ) -> str:
"""simple docstring"""
if is_numpy_array(lowerCamelCase__ ):
return np.transpose(lowerCamelCase__ , axes=lowerCamelCase__ )
elif is_torch_tensor(lowerCamelCase__ ):
return array.T if axes is None else array.permute(*lowerCamelCase__ )
elif is_tf_tensor(lowerCamelCase__ ):
import tensorflow as tf
return tf.transpose(lowerCamelCase__ , perm=lowerCamelCase__ )
elif is_jax_tensor(lowerCamelCase__ ):
return jnp.transpose(lowerCamelCase__ , axes=lowerCamelCase__ )
else:
raise ValueError(f"""Type not supported for transpose: {type(lowerCamelCase__ )}.""" )
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> Optional[int]:
"""simple docstring"""
if is_numpy_array(lowerCamelCase__ ):
return np.reshape(lowerCamelCase__ , lowerCamelCase__ )
elif is_torch_tensor(lowerCamelCase__ ):
return array.reshape(*lowerCamelCase__ )
elif is_tf_tensor(lowerCamelCase__ ):
import tensorflow as tf
return tf.reshape(lowerCamelCase__ , lowerCamelCase__ )
elif is_jax_tensor(lowerCamelCase__ ):
return jnp.reshape(lowerCamelCase__ , lowerCamelCase__ )
else:
raise ValueError(f"""Type not supported for reshape: {type(lowerCamelCase__ )}.""" )
def _lowercase ( lowerCamelCase__ , lowerCamelCase__=None ) -> Optional[int]:
"""simple docstring"""
if is_numpy_array(lowerCamelCase__ ):
return np.squeeze(lowerCamelCase__ , axis=lowerCamelCase__ )
elif is_torch_tensor(lowerCamelCase__ ):
return array.squeeze() if axis is None else array.squeeze(dim=lowerCamelCase__ )
elif is_tf_tensor(lowerCamelCase__ ):
import tensorflow as tf
return tf.squeeze(lowerCamelCase__ , axis=lowerCamelCase__ )
elif is_jax_tensor(lowerCamelCase__ ):
return jnp.squeeze(lowerCamelCase__ , axis=lowerCamelCase__ )
else:
raise ValueError(f"""Type not supported for squeeze: {type(lowerCamelCase__ )}.""" )
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> int:
"""simple docstring"""
if is_numpy_array(lowerCamelCase__ ):
return np.expand_dims(lowerCamelCase__ , lowerCamelCase__ )
elif is_torch_tensor(lowerCamelCase__ ):
return array.unsqueeze(dim=lowerCamelCase__ )
elif is_tf_tensor(lowerCamelCase__ ):
import tensorflow as tf
return tf.expand_dims(lowerCamelCase__ , axis=lowerCamelCase__ )
elif is_jax_tensor(lowerCamelCase__ ):
return jnp.expand_dims(lowerCamelCase__ , axis=lowerCamelCase__ )
else:
raise ValueError(f"""Type not supported for expand_dims: {type(lowerCamelCase__ )}.""" )
def _lowercase ( lowerCamelCase__ ) -> int:
"""simple docstring"""
if is_numpy_array(lowerCamelCase__ ):
return np.size(lowerCamelCase__ )
elif is_torch_tensor(lowerCamelCase__ ):
return array.numel()
elif is_tf_tensor(lowerCamelCase__ ):
import tensorflow as tf
return tf.size(lowerCamelCase__ )
elif is_jax_tensor(lowerCamelCase__ ):
return array.size
else:
raise ValueError(f"""Type not supported for expand_dims: {type(lowerCamelCase__ )}.""" )
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> Any:
"""simple docstring"""
for key, value in auto_map.items():
if isinstance(lowerCamelCase__ , (tuple, list) ):
__UpperCAmelCase : List[str] = [f"""{repo_id}--{v}""" if (v is not None and "--" not in v) else v for v in value]
elif value is not None and "--" not in value:
__UpperCAmelCase : int = f"""{repo_id}--{value}"""
return auto_map
def _lowercase ( lowerCamelCase__ ) -> List[str]:
"""simple docstring"""
for base_class in inspect.getmro(lowerCamelCase__ ):
__UpperCAmelCase : Tuple = base_class.__module__
__UpperCAmelCase : Union[str, Any] = base_class.__name__
if module.startswith("tensorflow" ) or module.startswith("keras" ) or name == "TFPreTrainedModel":
return "tf"
elif module.startswith("torch" ) or name == "PreTrainedModel":
return "pt"
elif module.startswith("flax" ) or module.startswith("jax" ) or name == "FlaxPreTrainedModel":
return "flax"
else:
raise TypeError(f"""Could not infer framework from class {model_class}.""" )
| 168 | 1 |
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
__UpperCAmelCase = logging.get_logger(__name__)
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCAmelCase_ =["pixel_values"]
def __init__( self , _A = True , _A = None , _A = PILImageResampling.BILINEAR , _A = True , _A = None , _A = True , _A = 1 / 255 , _A = True , _A = None , _A = None , **_A , ) -> None:
super().__init__(**_A )
SCREAMING_SNAKE_CASE_ = size if size is not None else {'''shortest_edge''': 256}
SCREAMING_SNAKE_CASE_ = get_size_dict(_A , default_to_square=_A )
SCREAMING_SNAKE_CASE_ = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
SCREAMING_SNAKE_CASE_ = get_size_dict(_A , param_name='''crop_size''' )
SCREAMING_SNAKE_CASE_ = do_resize
SCREAMING_SNAKE_CASE_ = size
SCREAMING_SNAKE_CASE_ = resample
SCREAMING_SNAKE_CASE_ = do_center_crop
SCREAMING_SNAKE_CASE_ = crop_size
SCREAMING_SNAKE_CASE_ = do_rescale
SCREAMING_SNAKE_CASE_ = rescale_factor
SCREAMING_SNAKE_CASE_ = do_normalize
SCREAMING_SNAKE_CASE_ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
SCREAMING_SNAKE_CASE_ = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _UpperCamelCase ( self , _A , _A , _A = PILImageResampling.BICUBIC , _A = None , **_A , ) -> np.ndarray:
SCREAMING_SNAKE_CASE_ = get_size_dict(_A , default_to_square=_A )
if "shortest_edge" not in size:
raise ValueError(F'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' )
SCREAMING_SNAKE_CASE_ = get_resize_output_image_size(_A , size=size['''shortest_edge'''] , default_to_square=_A )
return resize(_A , size=_A , resample=_A , data_format=_A , **_A )
def _UpperCamelCase ( self , _A , _A , _A = None , **_A , ) -> np.ndarray:
SCREAMING_SNAKE_CASE_ = get_size_dict(_A )
if "height" not in size or "width" not in size:
raise ValueError(F'''The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}''' )
return center_crop(_A , size=(size['''height'''], size['''width''']) , data_format=_A , **_A )
def _UpperCamelCase ( self , _A , _A , _A = None , **_A ) -> np.ndarray:
return rescale(_A , scale=_A , data_format=_A , **_A )
def _UpperCamelCase ( self , _A , _A , _A , _A = None , **_A , ) -> np.ndarray:
return normalize(_A , mean=_A , std=_A , data_format=_A , **_A )
def _UpperCamelCase ( self , _A , _A = None , _A = None , _A = None , _A = None , _A = None , _A = None , _A = None , _A = None , _A = None , _A = None , _A = None , _A = ChannelDimension.FIRST , **_A , ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE_ = size if size is not None else self.size
SCREAMING_SNAKE_CASE_ = get_size_dict(_A , default_to_square=_A )
SCREAMING_SNAKE_CASE_ = resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE_ = do_center_crop if do_center_crop is not None else self.do_center_crop
SCREAMING_SNAKE_CASE_ = crop_size if crop_size is not None else self.crop_size
SCREAMING_SNAKE_CASE_ = get_size_dict(_A , param_name='''crop_size''' )
SCREAMING_SNAKE_CASE_ = do_rescale if do_rescale is not None else self.do_rescale
SCREAMING_SNAKE_CASE_ = rescale_factor if rescale_factor is not None else self.rescale_factor
SCREAMING_SNAKE_CASE_ = do_normalize if do_normalize is not None else self.do_normalize
SCREAMING_SNAKE_CASE_ = image_mean if image_mean is not None else self.image_mean
SCREAMING_SNAKE_CASE_ = image_std if image_std is not None else self.image_std
SCREAMING_SNAKE_CASE_ = make_list_of_images(_A )
if not valid_images(_A ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE_ = [to_numpy_array(_A ) for image in images]
if do_resize:
SCREAMING_SNAKE_CASE_ = [self.resize(image=_A , size=_A , resample=_A ) for image in images]
if do_center_crop:
SCREAMING_SNAKE_CASE_ = [self.center_crop(image=_A , size=_A ) for image in images]
if do_rescale:
SCREAMING_SNAKE_CASE_ = [self.rescale(image=_A , scale=_A ) for image in images]
if do_normalize:
SCREAMING_SNAKE_CASE_ = [self.normalize(image=_A , mean=_A , std=_A ) for image in images]
SCREAMING_SNAKE_CASE_ = [to_channel_dimension_format(_A , _A ) for image in images]
SCREAMING_SNAKE_CASE_ = {'''pixel_values''': images}
return BatchFeature(data=_A , tensor_type=_A )
def _UpperCamelCase ( self , _A , _A = None ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(_A ) != len(_A ):
raise ValueError(
'''Make sure that you pass in as many target sizes as the batch dimension of the logits''' )
if is_torch_tensor(_A ):
SCREAMING_SNAKE_CASE_ = target_sizes.numpy()
SCREAMING_SNAKE_CASE_ = []
for idx in range(len(_A ) ):
SCREAMING_SNAKE_CASE_ = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='''bilinear''' , align_corners=_A )
SCREAMING_SNAKE_CASE_ = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(_A )
else:
SCREAMING_SNAKE_CASE_ = logits.argmax(dim=1 )
SCREAMING_SNAKE_CASE_ = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 597 |
import socket
def A__ ( ):
SCREAMING_SNAKE_CASE_ = socket.socket(socket.AF_INET, socket.SOCK_STREAM )
SCREAMING_SNAKE_CASE_ = socket.gethostname()
SCREAMING_SNAKE_CASE_ = 1_23_12
sock.connect((host, port) )
sock.send(B'''Hello server!''' )
with open('''Received_file''', '''wb''' ) as out_file:
print('''File opened''' )
print('''Receiving data...''' )
while True:
SCREAMING_SNAKE_CASE_ = sock.recv(10_24 )
if not data:
break
out_file.write(__lowerCamelCase )
print('''Successfully received the file''' )
sock.close()
print('''Connection closed''' )
if __name__ == "__main__":
main()
| 597 | 1 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class lowerCamelCase_ ( metaclass=__A ):
_lowerCAmelCase : Union[str, Any] = ['torch', 'torchsde']
def __init__( self : Optional[int] , *lowerCAmelCase__ : Optional[Any] , **lowerCAmelCase__ : Any ):
"""simple docstring"""
requires_backends(self , ['''torch''', '''torchsde'''] )
@classmethod
def __lowercase ( cls : List[str] , *lowerCAmelCase__ : int , **lowerCAmelCase__ : Tuple ):
"""simple docstring"""
requires_backends(cls , ['''torch''', '''torchsde'''] )
@classmethod
def __lowercase ( cls : Optional[Any] , *lowerCAmelCase__ : Dict , **lowerCAmelCase__ : Optional[int] ):
"""simple docstring"""
requires_backends(cls , ['''torch''', '''torchsde'''] )
| 527 |
import unittest
import numpy as np
from diffusers import LMSDiscreteScheduler, OnnxStableDiffusionInpaintPipeline
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class _UpperCamelCase ( __A , unittest.TestCase ):
'''simple docstring'''
pass
@nightly
@require_onnxruntime
@require_torch_gpu
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@property
def __UpperCamelCase ( self : List[Any] ) -> List[str]:
"""simple docstring"""
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def __UpperCamelCase ( self : int ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = ort.SessionOptions()
SCREAMING_SNAKE_CASE : Union[str, Any] = False
return options
def __UpperCamelCase ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo.png" )
SCREAMING_SNAKE_CASE : Optional[Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo_mask.png" )
SCREAMING_SNAKE_CASE : int = OnnxStableDiffusionInpaintPipeline.from_pretrained(
"runwayml/stable-diffusion-inpainting" , revision="onnx" , safety_checker=a , feature_extractor=a , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=a )
SCREAMING_SNAKE_CASE : Optional[Any] = "A red cat sitting on a park bench"
SCREAMING_SNAKE_CASE : Tuple = np.random.RandomState(0 )
SCREAMING_SNAKE_CASE : Optional[int] = pipe(
prompt=a , image=a , mask_image=a , guidance_scale=7.5 , num_inference_steps=10 , generator=a , output_type="np" , )
SCREAMING_SNAKE_CASE : List[Any] = output.images
SCREAMING_SNAKE_CASE : Union[str, Any] = images[0, 255:258, 255:258, -1]
assert images.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE : int = np.array([0.2514, 0.3007, 0.3517, 0.1790, 0.2382, 0.3167, 0.1944, 0.2273, 0.2464] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def __UpperCamelCase ( self : List[Any] ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo.png" )
SCREAMING_SNAKE_CASE : Optional[Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo_mask.png" )
SCREAMING_SNAKE_CASE : Optional[Any] = LMSDiscreteScheduler.from_pretrained(
"runwayml/stable-diffusion-inpainting" , subfolder="scheduler" , revision="onnx" )
SCREAMING_SNAKE_CASE : Union[str, Any] = OnnxStableDiffusionInpaintPipeline.from_pretrained(
"runwayml/stable-diffusion-inpainting" , revision="onnx" , scheduler=a , safety_checker=a , feature_extractor=a , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=a )
SCREAMING_SNAKE_CASE : List[Any] = "A red cat sitting on a park bench"
SCREAMING_SNAKE_CASE : Dict = np.random.RandomState(0 )
SCREAMING_SNAKE_CASE : Tuple = pipe(
prompt=a , image=a , mask_image=a , guidance_scale=7.5 , num_inference_steps=20 , generator=a , output_type="np" , )
SCREAMING_SNAKE_CASE : List[str] = output.images
SCREAMING_SNAKE_CASE : Optional[int] = images[0, 255:258, 255:258, -1]
assert images.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE : Any = np.array([0.0086, 0.0077, 0.0083, 0.0093, 0.0107, 0.0139, 0.0094, 0.0097, 0.0125] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 | 25 | 0 |
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from torch import nn
from transformers import (
SpeechaTextaConfig,
SpeechaTextaForCausalLM,
SpeechaTextaTokenizer,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
A_ : Any = logging.get_logger(__name__)
A_ : List[Any] = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
}
A_ : int = [
"lm_head",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
]
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
for attribute in key.split('.' ):
lowerCamelCase__ : Optional[int] = getattr(_lowercase , _lowercase )
if weight_type is not None:
lowerCamelCase__ : Optional[Any] = getattr(_lowercase , _lowercase ).shape
else:
lowerCamelCase__ : List[str] = hf_pointer.shape
assert hf_shape == value.shape, (
f'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
lowerCamelCase__ : Any = value
elif weight_type == "weight_g":
lowerCamelCase__ : List[Any] = value
elif weight_type == "weight_v":
lowerCamelCase__ : Optional[int] = value
elif weight_type == "bias":
lowerCamelCase__ : List[Any] = value
else:
lowerCamelCase__ : Union[str, Any] = value
logger.info(f'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' )
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ : Optional[Any] = []
lowerCamelCase__ : Optional[int] = fairseq_model.state_dict()
lowerCamelCase__ : Optional[int] = hf_model.feature_extractor
# if encoder has different dim to decoder -> use proj_weight
lowerCamelCase__ : Tuple = None
for name, value in fairseq_dict.items():
lowerCamelCase__ : int = False
if "conv_layers" in name:
load_conv_layer(
_lowercase , _lowercase , _lowercase , _lowercase , hf_model.config.feat_extract_norm == 'group' , )
lowerCamelCase__ : Optional[Any] = True
elif name.split('.' )[0] == "proj":
lowerCamelCase__ : str = fairseq_model.proj
lowerCamelCase__ : Optional[Any] = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
lowerCamelCase__ : Optional[Any] = True
if "*" in mapped_key:
lowerCamelCase__ : List[Any] = name.split(_lowercase )[0].split('.' )[-2]
lowerCamelCase__ : Optional[int] = mapped_key.replace('*' , _lowercase )
if "weight_g" in name:
lowerCamelCase__ : int = 'weight_g'
elif "weight_v" in name:
lowerCamelCase__ : Dict = 'weight_v'
elif "bias" in name:
lowerCamelCase__ : Dict = 'bias'
elif "weight" in name:
lowerCamelCase__ : str = 'weight'
else:
lowerCamelCase__ : List[Any] = None
set_recursively(_lowercase , _lowercase , _lowercase , _lowercase , _lowercase )
continue
if not is_used:
unused_weights.append(_lowercase )
logger.warning(f'''Unused weights: {unused_weights}''' )
return proj_weight
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ : Any = full_name.split('conv_layers.' )[-1]
lowerCamelCase__ : Tuple = name.split('.' )
lowerCamelCase__ : Optional[int] = int(items[0] )
lowerCamelCase__ : Any = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
lowerCamelCase__ : List[str] = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
lowerCamelCase__ : Optional[Any] = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
lowerCamelCase__ : Dict = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
lowerCamelCase__ : Union[str, Any] = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(_lowercase )
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ , lowerCamelCase__ : List[Any] = emb.weight.shape
lowerCamelCase__ : Any = nn.Linear(_lowercase , _lowercase , bias=_lowercase )
lowerCamelCase__ : Union[str, Any] = emb.weight.data
return lin_layer
def lowerCamelCase_ ( _lowerCamelCase ):
with open(_lowercase , 'r' , encoding='utf-8' ) as f:
lowerCamelCase__ : Optional[int] = f.readlines()
lowerCamelCase__ : str = [line.split(' ' )[0] for line in lines]
lowerCamelCase__ : Tuple = len(_lowercase )
lowerCamelCase__ : Dict = {
'<s>': 0,
'<pad>': 1,
'</s>': 2,
'<unk>': 3,
}
vocab_dict.update(dict(zip(_lowercase , range(4 , num_words + 4 ) ) ) )
return vocab_dict
@torch.no_grad()
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ):
lowerCamelCase__ : Optional[int] = WavaVecaConfig.from_pretrained(_lowercase )
lowerCamelCase__ : str = SpeechaTextaConfig.from_pretrained(
_lowercase , vocab_size=_lowercase , decoder_layers=_lowercase , do_stable_layer_norm=_lowercase )
lowerCamelCase__ : Union[str, Any] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=_lowercase , return_attention_mask=_lowercase , )
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Dict = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
lowerCamelCase__ : Any = model[0].eval()
# set weights for wav2vec2 encoder
lowerCamelCase__ : Optional[int] = WavaVecaModel(_lowercase )
lowerCamelCase__ : Optional[int] = recursively_load_weights_wavaveca(model.encoder , _lowercase )
lowerCamelCase__ : int = SpeechaTextaForCausalLM(_lowercase )
lowerCamelCase__ , lowerCamelCase__ : str = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=_lowercase )
# set output linear layer
unexpected_keys.remove('embed_out' )
lowerCamelCase__ : Tuple = nn.Parameter(model.decoder.embed_out.detach() )
# layer norm is init to identity matrix so leaving it is fine
logger.warning(f'''The following keys are missing when loading the decoder weights: {missing_keys}''' )
logger.warning(f'''The following keys are unexpected when loading the decoder weights: {unexpected_keys}''' )
lowerCamelCase__ : Any = SpeechEncoderDecoderModel(encoder=_lowercase , decoder=_lowercase )
lowerCamelCase__ : str = False
# add projection layer
lowerCamelCase__ : str = nn.Parameter(projection_layer.weight )
lowerCamelCase__ : Dict = nn.Parameter(projection_layer.bias )
lowerCamelCase__ : Optional[Any] = create_vocab_dict(_lowercase )
with open(os.path.join(_lowercase , 'vocab.json' ) , 'w' ) as fp:
json.dump(_lowercase , _lowercase )
lowerCamelCase__ : Dict = SpeechaTextaTokenizer(os.path.join(_lowercase , 'vocab.json' ) )
tokenizer.save_pretrained(_lowercase )
lowerCamelCase__ : List[str] = hf_wavavec.config.to_dict()
lowerCamelCase__ : int = tokenizer.pad_token_id
lowerCamelCase__ : int = tokenizer.bos_token_id
lowerCamelCase__ : Dict = tokenizer.eos_token_id
lowerCamelCase__ : int = 'speech_to_text_2'
lowerCamelCase__ : List[Any] = 'wav2vec2'
lowerCamelCase__ : Dict = SpeechEncoderDecoderConfig.from_dict(_lowercase )
hf_wavavec.save_pretrained(_lowercase )
feature_extractor.save_pretrained(_lowercase )
if __name__ == "__main__":
A_ : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument(
"--encoder_config_path",
default="facebook/wav2vec2-large-lv60",
type=str,
help="Path to hf encoder wav2vec2 checkpoint config",
)
parser.add_argument(
"--decoder_config_path",
default="facebook/s2t-small-mustc-en-fr-st",
type=str,
help="Path to hf decoder s2t checkpoint config",
)
parser.add_argument("--vocab_size", default=1_02_24, type=int, help="Vocab size of decoder")
parser.add_argument("--num_decoder_layers", default=7, type=int, help="Number of decoder layers")
A_ : Optional[int] = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
vocab_size=args.vocab_size,
num_decoder_layers=args.num_decoder_layers,
)
| 718 |
"""simple docstring"""
import os
def lowerCamelCase_ ( ):
with open(os.path.dirname(_lowerCamelCase ) + '/p022_names.txt' ) as file:
lowerCamelCase__ : Union[str, Any] = str(file.readlines()[0] )
lowerCamelCase__ : int = names.replace('"' , '' ).split(',' )
names.sort()
lowerCamelCase__ : Tuple = 0
lowerCamelCase__ : str = 0
for i, name in enumerate(_lowerCamelCase ):
for letter in name:
name_score += ord(_lowerCamelCase ) - 64
total_score += (i + 1) * name_score
lowerCamelCase__ : Dict = 0
return total_score
if __name__ == "__main__":
print(solution())
| 696 | 0 |
"""simple docstring"""
def _lowerCamelCase ( UpperCAmelCase_ : int = 10, UpperCAmelCase_ : int = 22 ) -> int:
"""simple docstring"""
A__ = range(1, UpperCAmelCase_ )
A__ = range(1, UpperCAmelCase_ )
return sum(
1 for power in powers for base in bases if len(str(base**power ) ) == power )
if __name__ == "__main__":
print(f'{solution(10, 22) = }')
| 104 |
'''simple docstring'''
import argparse
import torch
from torch import nn
from transformers import MBartConfig, MBartForConditionalGeneration
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE__ ):
__a : List[str] = [
'encoder.version',
'decoder.version',
'model.encoder.version',
'model.decoder.version',
'_float_tensor',
'decoder.output_projection.weight',
]
for k in ignore_keys:
state_dict.pop(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE__ ):
__a , __a : Tuple = emb.weight.shape
__a : Union[str, Any] = nn.Linear(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , bias=SCREAMING_SNAKE_CASE__ )
__a : List[str] = emb.weight.data
return lin_layer
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__="facebook/mbart-large-en-ro" , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=False ):
__a : Tuple = torch.load(SCREAMING_SNAKE_CASE__ , map_location='cpu' )['model']
remove_ignore_keys_(SCREAMING_SNAKE_CASE__ )
__a : Optional[int] = state_dict['encoder.embed_tokens.weight'].shape[0]
__a : Dict = MBartConfig.from_pretrained(SCREAMING_SNAKE_CASE__ , vocab_size=SCREAMING_SNAKE_CASE__ )
if mbart_aa and finetuned:
__a : str = 'relu'
__a : List[Any] = state_dict['decoder.embed_tokens.weight']
__a : Dict = MBartForConditionalGeneration(SCREAMING_SNAKE_CASE__ )
model.model.load_state_dict(SCREAMING_SNAKE_CASE__ )
if finetuned:
__a : Tuple = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"fairseq_path", type=str, help="bart.large, bart.large.cnn or a path to a model.pt on local filesystem."
)
parser.add_argument("pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument(
"--hf_config",
default="facebook/mbart-large-cc25",
type=str,
help="Which huggingface architecture to use: mbart-large",
)
parser.add_argument("--mbart_50", action="store_true", help="whether the model is mMART-50 checkpoint")
parser.add_argument("--finetuned", action="store_true", help="whether the model is a fine-tuned checkpoint")
SCREAMING_SNAKE_CASE_ = parser.parse_args()
SCREAMING_SNAKE_CASE_ = convert_fairseq_mbart_checkpoint_from_disk(
args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa
)
model.save_pretrained(args.pytorch_dump_folder_path)
| 597 | 0 |
import argparse
import os
from io import BytesIO
from pathlib import Path
import requests
from clip_retrieval.clip_client import ClipClient
from PIL import Image
from tqdm import tqdm
def a(lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
snake_case_ = 1.5
snake_case_ = int(factor * num_class_images )
snake_case_ = ClipClient(
url='https://knn.laion.ai/knn-service' , indice_name='laion_400m' , num_images=lowercase__ , aesthetic_weight=0.1 )
os.makedirs(f"""{class_data_dir}/images""" , exist_ok=lowercase__ )
if len(list(Path(f"""{class_data_dir}/images""" ).iterdir() ) ) >= num_class_images:
return
while True:
snake_case_ = client.query(text=lowercase__ )
if len(lowercase__ ) >= factor * num_class_images or num_images > 1e4:
break
else:
snake_case_ = int(factor * num_images )
snake_case_ = ClipClient(
url='https://knn.laion.ai/knn-service' , indice_name='laion_400m' , num_images=lowercase__ , aesthetic_weight=0.1 , )
snake_case_ = 0
snake_case_ = 0
snake_case_ = tqdm(desc='downloading real regularization images' , total=lowercase__ )
with open(f"""{class_data_dir}/caption.txt""" , 'w' ) as fa, open(f"""{class_data_dir}/urls.txt""" , 'w' ) as fa, open(
f"""{class_data_dir}/images.txt""" , 'w' ) as fa:
while total < num_class_images:
snake_case_ = class_images[count]
count += 1
try:
snake_case_ = requests.get(images['url'] )
if img.status_code == 200:
snake_case_ = Image.open(BytesIO(img.content ) )
with open(f"""{class_data_dir}/images/{total}.jpg""" , 'wb' ) as f:
f.write(img.content )
fa.write(images['caption'] + '\n' )
fa.write(images['url'] + '\n' )
fa.write(f"""{class_data_dir}/images/{total}.jpg""" + '\n' )
total += 1
pbar.update(1 )
else:
continue
except Exception:
continue
return
def a():
'''simple docstring'''
snake_case_ = argparse.ArgumentParser('' , add_help=lowercase__ )
parser.add_argument('--class_prompt' , help='text prompt to retrieve images' , required=lowercase__ , type=lowercase__ )
parser.add_argument('--class_data_dir' , help='path to save images' , required=lowercase__ , type=lowercase__ )
parser.add_argument('--num_class_images' , help='number of images to download' , default=200 , type=lowercase__ )
return parser.parse_args()
if __name__ == "__main__":
A = parse_args()
retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
| 46 |
from ..utils import DummyObject, requires_backends
class SCREAMING_SNAKE_CASE ( metaclass=__snake_case ):
"""simple docstring"""
__A = ["""torch""", """transformers""", """onnx"""]
def __init__( self , *__UpperCamelCase , **__UpperCamelCase ):
"""simple docstring"""
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def __lowerCAmelCase ( cls , *__UpperCamelCase , **__UpperCamelCase ):
"""simple docstring"""
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def __lowerCAmelCase ( cls , *__UpperCamelCase , **__UpperCamelCase ):
"""simple docstring"""
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class SCREAMING_SNAKE_CASE ( metaclass=__snake_case ):
"""simple docstring"""
__A = ["""torch""", """transformers""", """onnx"""]
def __init__( self , *__UpperCamelCase , **__UpperCamelCase ):
"""simple docstring"""
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def __lowerCAmelCase ( cls , *__UpperCamelCase , **__UpperCamelCase ):
"""simple docstring"""
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def __lowerCAmelCase ( cls , *__UpperCamelCase , **__UpperCamelCase ):
"""simple docstring"""
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class SCREAMING_SNAKE_CASE ( metaclass=__snake_case ):
"""simple docstring"""
__A = ["""torch""", """transformers""", """onnx"""]
def __init__( self , *__UpperCamelCase , **__UpperCamelCase ):
"""simple docstring"""
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def __lowerCAmelCase ( cls , *__UpperCamelCase , **__UpperCamelCase ):
"""simple docstring"""
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def __lowerCAmelCase ( cls , *__UpperCamelCase , **__UpperCamelCase ):
"""simple docstring"""
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class SCREAMING_SNAKE_CASE ( metaclass=__snake_case ):
"""simple docstring"""
__A = ["""torch""", """transformers""", """onnx"""]
def __init__( self , *__UpperCamelCase , **__UpperCamelCase ):
"""simple docstring"""
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def __lowerCAmelCase ( cls , *__UpperCamelCase , **__UpperCamelCase ):
"""simple docstring"""
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def __lowerCAmelCase ( cls , *__UpperCamelCase , **__UpperCamelCase ):
"""simple docstring"""
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class SCREAMING_SNAKE_CASE ( metaclass=__snake_case ):
"""simple docstring"""
__A = ["""torch""", """transformers""", """onnx"""]
def __init__( self , *__UpperCamelCase , **__UpperCamelCase ):
"""simple docstring"""
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def __lowerCAmelCase ( cls , *__UpperCamelCase , **__UpperCamelCase ):
"""simple docstring"""
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def __lowerCAmelCase ( cls , *__UpperCamelCase , **__UpperCamelCase ):
"""simple docstring"""
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class SCREAMING_SNAKE_CASE ( metaclass=__snake_case ):
"""simple docstring"""
__A = ["""torch""", """transformers""", """onnx"""]
def __init__( self , *__UpperCamelCase , **__UpperCamelCase ):
"""simple docstring"""
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def __lowerCAmelCase ( cls , *__UpperCamelCase , **__UpperCamelCase ):
"""simple docstring"""
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def __lowerCAmelCase ( cls , *__UpperCamelCase , **__UpperCamelCase ):
"""simple docstring"""
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
| 46 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a : Optional[int] = logging.get_logger(__name__)
a : Tuple = {
'''transfo-xl-wt103''': '''https://huggingface.co/transfo-xl-wt103/resolve/main/config.json''',
}
class a_ ( _UpperCAmelCase ):
a : List[str] = 'transfo-xl'
a : Tuple = ['mems']
a : List[Any] = {
'n_token': 'vocab_size',
'hidden_size': 'd_model',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self : Tuple , __UpperCamelCase : Any=26_77_35 , __UpperCamelCase : Dict=[2_00_00, 4_00_00, 20_00_00] , __UpperCamelCase : Optional[int]=10_24 , __UpperCamelCase : Union[str, Any]=10_24 , __UpperCamelCase : Any=16 , __UpperCamelCase : Tuple=64 , __UpperCamelCase : List[Any]=40_96 , __UpperCamelCase : Union[str, Any]=4 , __UpperCamelCase : str=False , __UpperCamelCase : Any=18 , __UpperCamelCase : Union[str, Any]=16_00 , __UpperCamelCase : Dict=10_00 , __UpperCamelCase : Tuple=True , __UpperCamelCase : Optional[Any]=True , __UpperCamelCase : Tuple=0 , __UpperCamelCase : List[str]=-1 , __UpperCamelCase : Dict=True , __UpperCamelCase : Optional[int]=0.1 , __UpperCamelCase : Union[str, Any]=0.0 , __UpperCamelCase : List[Any]=True , __UpperCamelCase : int="normal" , __UpperCamelCase : List[Any]=0.0_1 , __UpperCamelCase : Union[str, Any]=0.0_1 , __UpperCamelCase : Optional[int]=0.0_2 , __UpperCamelCase : Optional[Any]=1e-5 , __UpperCamelCase : Tuple=0 , **__UpperCamelCase : Optional[Any] , ) ->Tuple:
'''simple docstring'''
_UpperCAmelCase = vocab_size
_UpperCAmelCase = []
self.cutoffs.extend(__UpperCamelCase )
if proj_share_all_but_first:
_UpperCAmelCase = [False] + [True] * len(self.cutoffs )
else:
_UpperCAmelCase = [False] + [False] * len(self.cutoffs )
_UpperCAmelCase = d_model
_UpperCAmelCase = d_embed
_UpperCAmelCase = d_head
_UpperCAmelCase = d_inner
_UpperCAmelCase = div_val
_UpperCAmelCase = pre_lnorm
_UpperCAmelCase = n_layer
_UpperCAmelCase = n_head
_UpperCAmelCase = mem_len
_UpperCAmelCase = same_length
_UpperCAmelCase = attn_type
_UpperCAmelCase = clamp_len
_UpperCAmelCase = sample_softmax
_UpperCAmelCase = adaptive
_UpperCAmelCase = dropout
_UpperCAmelCase = dropatt
_UpperCAmelCase = untie_r
_UpperCAmelCase = init
_UpperCAmelCase = init_range
_UpperCAmelCase = proj_init_std
_UpperCAmelCase = init_std
_UpperCAmelCase = layer_norm_epsilon
super().__init__(eos_token_id=__UpperCamelCase , **__UpperCamelCase )
@property
def _snake_case ( self : Tuple ) ->List[str]:
'''simple docstring'''
logger.info(f"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
return -1
@max_position_embeddings.setter
def _snake_case ( self : Union[str, Any] , __UpperCamelCase : int ) ->Union[str, Any]:
'''simple docstring'''
raise NotImplementedError(
f"""The model {self.model_type} is one of the few models that has no sequence length limit.""" ) | 555 |
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class a_ ( unittest.TestCase ):
def _snake_case ( self : Union[str, Any] ) ->Any:
'''simple docstring'''
_UpperCAmelCase = [[1, 2, 4], [1, 2, 3, 4]]
_UpperCAmelCase = DisjunctiveConstraint(__UpperCamelCase )
self.assertTrue(isinstance(dc.token_ids , __UpperCamelCase ) )
with self.assertRaises(__UpperCamelCase ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(__UpperCamelCase ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def _snake_case ( self : Optional[int] ) ->Dict:
'''simple docstring'''
_UpperCAmelCase = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(__UpperCamelCase ):
DisjunctiveConstraint(__UpperCamelCase ) # fails here
def _snake_case ( self : Optional[int] ) ->Dict:
'''simple docstring'''
_UpperCAmelCase = [[1, 2, 3], [1, 2, 4]]
_UpperCAmelCase = DisjunctiveConstraint(__UpperCamelCase )
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase = dc.update(1 )
_UpperCAmelCase = stepped is True and completed is False and reset is False
self.assertTrue(__UpperCamelCase )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase = dc.update(2 )
_UpperCAmelCase = stepped is True and completed is False and reset is False
self.assertTrue(__UpperCamelCase )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase = dc.update(3 )
_UpperCAmelCase = stepped is True and completed is True and reset is False
self.assertTrue(__UpperCamelCase )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def _snake_case ( self : List[Any] ) ->Any:
'''simple docstring'''
_UpperCAmelCase = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
_UpperCAmelCase = DisjunctiveConstraint(__UpperCamelCase )
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase = dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] ) | 555 | 1 |
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('TEST_SAGEMAKER', 'False' ) ) is not True, reason='Skipping test because should only be run when releasing minor transformers version', )
@pytest.mark.usefixtures('sm_env' )
@parameterized_class(
[
{
'framework': 'pytorch',
'script': 'run_glue.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.p3.16xlarge',
'results': {'train_runtime': 6_5_0, 'eval_accuracy': 0.7, 'eval_loss': 0.6},
},
{
'framework': 'pytorch',
'script': 'run_ddp.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.p3.16xlarge',
'results': {'train_runtime': 6_0_0, 'eval_accuracy': 0.7, 'eval_loss': 0.6},
},
{
'framework': 'tensorflow',
'script': 'run_tf_dist.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.p3.16xlarge',
'results': {'train_runtime': 6_0_0, 'eval_accuracy': 0.6, 'eval_loss': 0.7},
},
] )
class snake_case_ ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self ) -> Any:
if self.framework == "pytorch":
subprocess.run(
f"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split(), encoding="utf-8", check=__A, )
assert hasattr(self, "env" )
def __UpperCAmelCase ( self, A_ ) -> Optional[int]:
UpperCAmelCase__ =f"""{self.env.base_job_name}-{instance_count}-{'ddp' if 'ddp' in self.script else 'smd'}"""
# distributed data settings
UpperCAmelCase__ ={"smdistributed": {"dataparallel": {"enabled": True}}} if self.script != "run_ddp.py" else None
# creates estimator
return HuggingFace(
entry_point=self.script, source_dir=self.env.test_path, role=self.env.role, image_uri=self.env.image_uri, base_job_name=__A, instance_count=__A, instance_type=self.instance_type, debugger_hook_config=__A, hyperparameters={**self.env.distributed_hyperparameters, "model_name_or_path": self.model_name_or_path}, metric_definitions=self.env.metric_definitions, distribution=__A, py_version="py36", )
def __UpperCAmelCase ( self, A_ ) -> Optional[Any]:
TrainingJobAnalytics(__A ).export_csv(f"""{self.env.test_path}/{job_name}_metrics.csv""" )
@parameterized.expand([(2,)] )
def __UpperCAmelCase ( self, A_ ) -> Any:
# create estimator
UpperCAmelCase__ =self.create_estimator(__A )
# run training
estimator.fit()
# result dataframe
UpperCAmelCase__ =TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
UpperCAmelCase__ =list(result_metrics_df[result_metrics_df.metric_name == "eval_accuracy"]["value"] )
UpperCAmelCase__ =list(result_metrics_df[result_metrics_df.metric_name == "eval_loss"]["value"] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
UpperCAmelCase__ =(
Session().describe_training_job(estimator.latest_training_job.name ).get("TrainingTimeInSeconds", 99_9999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["eval_accuracy"] for t in eval_accuracy )
assert all(t <= self.results["eval_loss"] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f"""{estimator.latest_training_job.name}.json""", "w" ) as outfile:
json.dump({"train_time": train_runtime, "eval_accuracy": eval_accuracy, "eval_loss": eval_loss}, __A )
| 711 |
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import torch
class snake_case_ ( TensorFormatter[Mapping, 'torch.Tensor', Mapping] ):
'''simple docstring'''
def __init__( self, A_=None, **A_ ) -> Tuple:
super().__init__(features=A_ )
UpperCAmelCase__ =torch_tensor_kwargs
import torch # noqa import torch at initialization
def __UpperCAmelCase ( self, A_ ) -> Dict:
import torch
if isinstance(A_, A_ ) and column:
if all(
isinstance(A_, torch.Tensor ) and x.shape == column[0].shape and x.dtype == column[0].dtype
for x in column ):
return torch.stack(A_ )
return column
def __UpperCAmelCase ( self, A_ ) -> Dict:
import torch
if isinstance(A_, (str, bytes, type(A_ )) ):
return value
elif isinstance(A_, (np.character, np.ndarray) ) and np.issubdtype(value.dtype, np.character ):
return value.tolist()
UpperCAmelCase__ ={}
if isinstance(A_, (np.number, np.ndarray) ) and np.issubdtype(value.dtype, np.integer ):
UpperCAmelCase__ ={"dtype": torch.intaa}
elif isinstance(A_, (np.number, np.ndarray) ) and np.issubdtype(value.dtype, np.floating ):
UpperCAmelCase__ ={"dtype": torch.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(A_, PIL.Image.Image ):
UpperCAmelCase__ =np.asarray(A_ )
return torch.tensor(A_, **{**default_dtype, **self.torch_tensor_kwargs} )
def __UpperCAmelCase ( self, A_ ) -> int:
import torch
# support for torch, tf, jax etc.
if hasattr(A_, "__array__" ) and not isinstance(A_, torch.Tensor ):
UpperCAmelCase__ =data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(A_, np.ndarray ):
if data_struct.dtype == object: # torch tensors cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(A_ ) for substruct in data_struct] )
elif isinstance(A_, (list, tuple) ):
return self._consolidate([self.recursive_tensorize(A_ ) for substruct in data_struct] )
return self._tensorize(A_ )
def __UpperCAmelCase ( self, A_ ) -> List[Any]:
return map_nested(self._recursive_tensorize, A_, map_list=A_ )
def __UpperCAmelCase ( self, A_ ) -> Mapping:
UpperCAmelCase__ =self.numpy_arrow_extractor().extract_row(A_ )
UpperCAmelCase__ =self.python_features_decoder.decode_row(A_ )
return self.recursive_tensorize(A_ )
def __UpperCAmelCase ( self, A_ ) -> "torch.Tensor":
UpperCAmelCase__ =self.numpy_arrow_extractor().extract_column(A_ )
UpperCAmelCase__ =self.python_features_decoder.decode_column(A_, pa_table.column_names[0] )
UpperCAmelCase__ =self.recursive_tensorize(A_ )
UpperCAmelCase__ =self._consolidate(A_ )
return column
def __UpperCAmelCase ( self, A_ ) -> Mapping:
UpperCAmelCase__ =self.numpy_arrow_extractor().extract_batch(A_ )
UpperCAmelCase__ =self.python_features_decoder.decode_batch(A_ )
UpperCAmelCase__ =self.recursive_tensorize(A_ )
for column_name in batch:
UpperCAmelCase__ =self._consolidate(batch[column_name] )
return batch
| 510 | 0 |
from dataclasses import dataclass
from typing import Dict, Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .attention_processor import AttentionProcessor, AttnProcessor
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, DiagonalGaussianDistribution, Encoder
@dataclass
class _UpperCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
a_ = 42
class _UpperCAmelCase ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
a_ = True
@register_to_config
def __init__( self : Tuple , lowerCAmelCase_ : int = 3 , lowerCAmelCase_ : int = 3 , lowerCAmelCase_ : Tuple[str] = ("DownEncoderBlock2D",) , lowerCAmelCase_ : Tuple[str] = ("UpDecoderBlock2D",) , lowerCAmelCase_ : Tuple[int] = (6_4,) , lowerCAmelCase_ : int = 1 , lowerCAmelCase_ : str = "silu" , lowerCAmelCase_ : int = 4 , lowerCAmelCase_ : int = 3_2 , lowerCAmelCase_ : int = 3_2 , lowerCAmelCase_ : float = 0.1_82_15 , ) -> Union[str, Any]:
super().__init__()
# pass init params to Encoder
__lowerCAmelCase = Encoder(
in_channels=lowerCAmelCase_ , out_channels=lowerCAmelCase_ , down_block_types=lowerCAmelCase_ , block_out_channels=lowerCAmelCase_ , layers_per_block=lowerCAmelCase_ , act_fn=lowerCAmelCase_ , norm_num_groups=lowerCAmelCase_ , double_z=lowerCAmelCase_ , )
# pass init params to Decoder
__lowerCAmelCase = Decoder(
in_channels=lowerCAmelCase_ , out_channels=lowerCAmelCase_ , up_block_types=lowerCAmelCase_ , block_out_channels=lowerCAmelCase_ , layers_per_block=lowerCAmelCase_ , norm_num_groups=lowerCAmelCase_ , act_fn=lowerCAmelCase_ , )
__lowerCAmelCase = nn.Convad(2 * latent_channels , 2 * latent_channels , 1 )
__lowerCAmelCase = nn.Convad(lowerCAmelCase_ , lowerCAmelCase_ , 1 )
__lowerCAmelCase = False
__lowerCAmelCase = False
# only relevant if vae tiling is enabled
__lowerCAmelCase = self.config.sample_size
__lowerCAmelCase = (
self.config.sample_size[0]
if isinstance(self.config.sample_size , (list, tuple) )
else self.config.sample_size
)
__lowerCAmelCase = int(sample_size / (2 ** (len(self.config.block_out_channels ) - 1)) )
__lowerCAmelCase = 0.25
def lowercase ( self : Dict , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Union[str, Any]=False ) -> int:
if isinstance(lowerCAmelCase_ , (Encoder, Decoder) ):
__lowerCAmelCase = value
def lowercase ( self : Dict , lowerCAmelCase_ : bool = True ) -> Optional[int]:
__lowerCAmelCase = use_tiling
def lowercase ( self : Optional[Any] ) -> Any:
self.enable_tiling(lowerCAmelCase_ )
def lowercase ( self : List[str] ) -> Optional[int]:
__lowerCAmelCase = True
def lowercase ( self : Dict ) -> Optional[int]:
__lowerCAmelCase = False
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def lowercase ( self : List[str] ) -> Dict[str, AttentionProcessor]:
__lowerCAmelCase = {}
def fn_recursive_add_processors(lowerCAmelCase_ : str , lowerCAmelCase_ : torch.nn.Module , lowerCAmelCase_ : Dict[str, AttentionProcessor] ):
if hasattr(lowerCAmelCase_ , 'set_processor' ):
__lowerCAmelCase = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(f"""{name}.{sub_name}""" , lowerCAmelCase_ , lowerCAmelCase_ )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
return processors
def lowercase ( self : Optional[Any] , lowerCAmelCase_ : Union[AttentionProcessor, Dict[str, AttentionProcessor]] ) -> str:
__lowerCAmelCase = len(self.attn_processors.keys() )
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and len(lowerCAmelCase_ ) != count:
raise ValueError(
f"""A dict of processors was passed, but the number of processors {len(lowerCAmelCase_ )} does not match the"""
f""" number of attention layers: {count}. Please make sure to pass {count} processor classes.""" )
def fn_recursive_attn_processor(lowerCAmelCase_ : str , lowerCAmelCase_ : torch.nn.Module , lowerCAmelCase_ : Optional[int] ):
if hasattr(lowerCAmelCase_ , 'set_processor' ):
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
module.set_processor(lowerCAmelCase_ )
else:
module.set_processor(processor.pop(f"""{name}.processor""" ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(f"""{name}.{sub_name}""" , lowerCAmelCase_ , lowerCAmelCase_ )
for name, module in self.named_children():
fn_recursive_attn_processor(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
def lowercase ( self : Dict ) -> Optional[Any]:
self.set_attn_processor(AttnProcessor() )
@apply_forward_hook
def lowercase ( self : Union[str, Any] , lowerCAmelCase_ : torch.FloatTensor , lowerCAmelCase_ : bool = True ) -> AutoencoderKLOutput:
if self.use_tiling and (x.shape[-1] > self.tile_sample_min_size or x.shape[-2] > self.tile_sample_min_size):
return self.tiled_encode(lowerCAmelCase_ , return_dict=lowerCAmelCase_ )
if self.use_slicing and x.shape[0] > 1:
__lowerCAmelCase = [self.encoder(lowerCAmelCase_ ) for x_slice in x.split(1 )]
__lowerCAmelCase = torch.cat(lowerCAmelCase_ )
else:
__lowerCAmelCase = self.encoder(lowerCAmelCase_ )
__lowerCAmelCase = self.quant_conv(lowerCAmelCase_ )
__lowerCAmelCase = DiagonalGaussianDistribution(lowerCAmelCase_ )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=lowerCAmelCase_ )
def lowercase ( self : str , lowerCAmelCase_ : torch.FloatTensor , lowerCAmelCase_ : bool = True ) -> Union[DecoderOutput, torch.FloatTensor]:
if self.use_tiling and (z.shape[-1] > self.tile_latent_min_size or z.shape[-2] > self.tile_latent_min_size):
return self.tiled_decode(lowerCAmelCase_ , return_dict=lowerCAmelCase_ )
__lowerCAmelCase = self.post_quant_conv(lowerCAmelCase_ )
__lowerCAmelCase = self.decoder(lowerCAmelCase_ )
if not return_dict:
return (dec,)
return DecoderOutput(sample=lowerCAmelCase_ )
@apply_forward_hook
def lowercase ( self : str , lowerCAmelCase_ : torch.FloatTensor , lowerCAmelCase_ : bool = True ) -> Union[DecoderOutput, torch.FloatTensor]:
if self.use_slicing and z.shape[0] > 1:
__lowerCAmelCase = [self._decode(lowerCAmelCase_ ).sample for z_slice in z.split(1 )]
__lowerCAmelCase = torch.cat(lowerCAmelCase_ )
else:
__lowerCAmelCase = self._decode(lowerCAmelCase_ ).sample
if not return_dict:
return (decoded,)
return DecoderOutput(sample=lowerCAmelCase_ )
def lowercase ( self : List[str] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[int] ) -> Any:
__lowerCAmelCase = min(a.shape[2] , b.shape[2] , lowerCAmelCase_ )
for y in range(lowerCAmelCase_ ):
__lowerCAmelCase = a[:, :, -blend_extent + y, :] * (1 - y / blend_extent) + b[:, :, y, :] * (y / blend_extent)
return b
def lowercase ( self : List[Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Tuple ) -> Tuple:
__lowerCAmelCase = min(a.shape[3] , b.shape[3] , lowerCAmelCase_ )
for x in range(lowerCAmelCase_ ):
__lowerCAmelCase = a[:, :, :, -blend_extent + x] * (1 - x / blend_extent) + b[:, :, :, x] * (x / blend_extent)
return b
def lowercase ( self : int , lowerCAmelCase_ : torch.FloatTensor , lowerCAmelCase_ : bool = True ) -> AutoencoderKLOutput:
__lowerCAmelCase = int(self.tile_sample_min_size * (1 - self.tile_overlap_factor) )
__lowerCAmelCase = int(self.tile_latent_min_size * self.tile_overlap_factor )
__lowerCAmelCase = self.tile_latent_min_size - blend_extent
# Split the image into 512x512 tiles and encode them separately.
__lowerCAmelCase = []
for i in range(0 , x.shape[2] , lowerCAmelCase_ ):
__lowerCAmelCase = []
for j in range(0 , x.shape[3] , lowerCAmelCase_ ):
__lowerCAmelCase = x[:, :, i : i + self.tile_sample_min_size, j : j + self.tile_sample_min_size]
__lowerCAmelCase = self.encoder(lowerCAmelCase_ )
__lowerCAmelCase = self.quant_conv(lowerCAmelCase_ )
row.append(lowerCAmelCase_ )
rows.append(lowerCAmelCase_ )
__lowerCAmelCase = []
for i, row in enumerate(lowerCAmelCase_ ):
__lowerCAmelCase = []
for j, tile in enumerate(lowerCAmelCase_ ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
__lowerCAmelCase = self.blend_v(rows[i - 1][j] , lowerCAmelCase_ , lowerCAmelCase_ )
if j > 0:
__lowerCAmelCase = self.blend_h(row[j - 1] , lowerCAmelCase_ , lowerCAmelCase_ )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(lowerCAmelCase_ , dim=3 ) )
__lowerCAmelCase = torch.cat(lowerCAmelCase_ , dim=2 )
__lowerCAmelCase = DiagonalGaussianDistribution(lowerCAmelCase_ )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=lowerCAmelCase_ )
def lowercase ( self : List[Any] , lowerCAmelCase_ : torch.FloatTensor , lowerCAmelCase_ : bool = True ) -> Union[DecoderOutput, torch.FloatTensor]:
__lowerCAmelCase = int(self.tile_latent_min_size * (1 - self.tile_overlap_factor) )
__lowerCAmelCase = int(self.tile_sample_min_size * self.tile_overlap_factor )
__lowerCAmelCase = self.tile_sample_min_size - blend_extent
# Split z into overlapping 64x64 tiles and decode them separately.
# The tiles have an overlap to avoid seams between tiles.
__lowerCAmelCase = []
for i in range(0 , z.shape[2] , lowerCAmelCase_ ):
__lowerCAmelCase = []
for j in range(0 , z.shape[3] , lowerCAmelCase_ ):
__lowerCAmelCase = z[:, :, i : i + self.tile_latent_min_size, j : j + self.tile_latent_min_size]
__lowerCAmelCase = self.post_quant_conv(lowerCAmelCase_ )
__lowerCAmelCase = self.decoder(lowerCAmelCase_ )
row.append(lowerCAmelCase_ )
rows.append(lowerCAmelCase_ )
__lowerCAmelCase = []
for i, row in enumerate(lowerCAmelCase_ ):
__lowerCAmelCase = []
for j, tile in enumerate(lowerCAmelCase_ ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
__lowerCAmelCase = self.blend_v(rows[i - 1][j] , lowerCAmelCase_ , lowerCAmelCase_ )
if j > 0:
__lowerCAmelCase = self.blend_h(row[j - 1] , lowerCAmelCase_ , lowerCAmelCase_ )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(lowerCAmelCase_ , dim=3 ) )
__lowerCAmelCase = torch.cat(lowerCAmelCase_ , dim=2 )
if not return_dict:
return (dec,)
return DecoderOutput(sample=lowerCAmelCase_ )
def lowercase ( self : Tuple , lowerCAmelCase_ : torch.FloatTensor , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Optional[torch.Generator] = None , ) -> Union[DecoderOutput, torch.FloatTensor]:
__lowerCAmelCase = sample
__lowerCAmelCase = self.encode(lowerCAmelCase_ ).latent_dist
if sample_posterior:
__lowerCAmelCase = posterior.sample(generator=lowerCAmelCase_ )
else:
__lowerCAmelCase = posterior.mode()
__lowerCAmelCase = self.decode(lowerCAmelCase_ ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=lowerCAmelCase_ )
| 53 |
import argparse
import json
import pickle
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation, MaskFormerImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case : Any = logging.get_logger(__name__)
def a_ ( lowerCAmelCase_ : str ):
__lowerCAmelCase = SwinConfig.from_pretrained(
'microsoft/swin-tiny-patch4-window7-224', out_features=['stage1', 'stage2', 'stage3', 'stage4'] )
__lowerCAmelCase = MaskFormerConfig(backbone_config=lowerCAmelCase_ )
__lowerCAmelCase = 'huggingface/label-files'
if "ade20k-full" in model_name:
# this should be ok
__lowerCAmelCase = 847
__lowerCAmelCase = 'maskformer-ade20k-full-id2label.json'
elif "ade" in model_name:
# this should be ok
__lowerCAmelCase = 150
__lowerCAmelCase = 'ade20k-id2label.json'
elif "coco-stuff" in model_name:
# this should be ok
__lowerCAmelCase = 171
__lowerCAmelCase = 'maskformer-coco-stuff-id2label.json'
elif "coco" in model_name:
# TODO
__lowerCAmelCase = 133
__lowerCAmelCase = 'coco-panoptic-id2label.json'
elif "cityscapes" in model_name:
# this should be ok
__lowerCAmelCase = 19
__lowerCAmelCase = 'cityscapes-id2label.json'
elif "vistas" in model_name:
# this should be ok
__lowerCAmelCase = 65
__lowerCAmelCase = 'mapillary-vistas-id2label.json'
__lowerCAmelCase = json.load(open(hf_hub_download(lowerCAmelCase_, lowerCAmelCase_, repo_type='dataset' ), 'r' ) )
__lowerCAmelCase = {int(lowerCAmelCase_ ): v for k, v in idalabel.items()}
return config
def a_ ( lowerCAmelCase_ : Tuple ):
__lowerCAmelCase = []
# stem
# fmt: off
rename_keys.append(('backbone.patch_embed.proj.weight', 'model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.weight') )
rename_keys.append(('backbone.patch_embed.proj.bias', 'model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.bias') )
rename_keys.append(('backbone.patch_embed.norm.weight', 'model.pixel_level_module.encoder.model.embeddings.norm.weight') )
rename_keys.append(('backbone.patch_embed.norm.bias', 'model.pixel_level_module.encoder.model.embeddings.norm.bias') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.norm1.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.norm1.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.attn.relative_position_bias_table""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.attn.relative_position_index""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.attn.proj.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.attn.proj.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.norm2.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.norm2.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.mlp.fc1.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.mlp.fc1.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.mlp.fc2.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.weight""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.mlp.fc2.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.bias""") )
if i < 3:
rename_keys.append((F"""backbone.layers.{i}.downsample.reduction.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.reduction.weight""") )
rename_keys.append((F"""backbone.layers.{i}.downsample.norm.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.weight""") )
rename_keys.append((F"""backbone.layers.{i}.downsample.norm.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.bias""") )
rename_keys.append((F"""backbone.norm{i}.weight""", F"""model.pixel_level_module.encoder.hidden_states_norms.{i}.weight""") )
rename_keys.append((F"""backbone.norm{i}.bias""", F"""model.pixel_level_module.encoder.hidden_states_norms.{i}.bias""") )
# FPN
rename_keys.append(('sem_seg_head.layer_4.weight', 'model.pixel_level_module.decoder.fpn.stem.0.weight') )
rename_keys.append(('sem_seg_head.layer_4.norm.weight', 'model.pixel_level_module.decoder.fpn.stem.1.weight') )
rename_keys.append(('sem_seg_head.layer_4.norm.bias', 'model.pixel_level_module.decoder.fpn.stem.1.bias') )
for source_index, target_index in zip(range(3, 0, -1 ), range(0, 3 ) ):
rename_keys.append((F"""sem_seg_head.adapter_{source_index}.weight""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.0.weight""") )
rename_keys.append((F"""sem_seg_head.adapter_{source_index}.norm.weight""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.weight""") )
rename_keys.append((F"""sem_seg_head.adapter_{source_index}.norm.bias""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.bias""") )
rename_keys.append((F"""sem_seg_head.layer_{source_index}.weight""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.block.0.weight""") )
rename_keys.append((F"""sem_seg_head.layer_{source_index}.norm.weight""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.weight""") )
rename_keys.append((F"""sem_seg_head.layer_{source_index}.norm.bias""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.bias""") )
rename_keys.append(('sem_seg_head.mask_features.weight', 'model.pixel_level_module.decoder.mask_projection.weight') )
rename_keys.append(('sem_seg_head.mask_features.bias', 'model.pixel_level_module.decoder.mask_projection.bias') )
# Transformer decoder
for idx in range(config.decoder_config.decoder_layers ):
# self-attention out projection
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.weight""", F"""model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.bias""", F"""model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.bias""") )
# cross-attention out projection
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.weight""", F"""model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.bias""", F"""model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.bias""") )
# MLP 1
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.weight""", F"""model.transformer_module.decoder.layers.{idx}.fc1.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.bias""", F"""model.transformer_module.decoder.layers.{idx}.fc1.bias""") )
# MLP 2
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.weight""", F"""model.transformer_module.decoder.layers.{idx}.fc2.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.bias""", F"""model.transformer_module.decoder.layers.{idx}.fc2.bias""") )
# layernorm 1 (self-attention layernorm)
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.weight""", F"""model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.bias""", F"""model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.bias""") )
# layernorm 2 (cross-attention layernorm)
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.weight""", F"""model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.bias""", F"""model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.bias""") )
# layernorm 3 (final layernorm)
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.weight""", F"""model.transformer_module.decoder.layers.{idx}.final_layer_norm.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.bias""", F"""model.transformer_module.decoder.layers.{idx}.final_layer_norm.bias""") )
rename_keys.append(('sem_seg_head.predictor.transformer.decoder.norm.weight', 'model.transformer_module.decoder.layernorm.weight') )
rename_keys.append(('sem_seg_head.predictor.transformer.decoder.norm.bias', 'model.transformer_module.decoder.layernorm.bias') )
# heads on top
rename_keys.append(('sem_seg_head.predictor.query_embed.weight', 'model.transformer_module.queries_embedder.weight') )
rename_keys.append(('sem_seg_head.predictor.input_proj.weight', 'model.transformer_module.input_projection.weight') )
rename_keys.append(('sem_seg_head.predictor.input_proj.bias', 'model.transformer_module.input_projection.bias') )
rename_keys.append(('sem_seg_head.predictor.class_embed.weight', 'class_predictor.weight') )
rename_keys.append(('sem_seg_head.predictor.class_embed.bias', 'class_predictor.bias') )
for i in range(3 ):
rename_keys.append((F"""sem_seg_head.predictor.mask_embed.layers.{i}.weight""", F"""mask_embedder.{i}.0.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.mask_embed.layers.{i}.bias""", F"""mask_embedder.{i}.0.bias""") )
# fmt: on
return rename_keys
def a_ ( lowerCAmelCase_ : Union[str, Any], lowerCAmelCase_ : List[str], lowerCAmelCase_ : Tuple ):
__lowerCAmelCase = dct.pop(lowerCAmelCase_ )
__lowerCAmelCase = val
def a_ ( lowerCAmelCase_ : Union[str, Any], lowerCAmelCase_ : int ):
__lowerCAmelCase = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
__lowerCAmelCase = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
__lowerCAmelCase = state_dict.pop(F"""backbone.layers.{i}.blocks.{j}.attn.qkv.weight""" )
__lowerCAmelCase = state_dict.pop(F"""backbone.layers.{i}.blocks.{j}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
__lowerCAmelCase = in_proj_weight[:dim, :]
__lowerCAmelCase = in_proj_bias[: dim]
__lowerCAmelCase = in_proj_weight[
dim : dim * 2, :
]
__lowerCAmelCase = in_proj_bias[
dim : dim * 2
]
__lowerCAmelCase = in_proj_weight[
-dim :, :
]
__lowerCAmelCase = in_proj_bias[-dim :]
# fmt: on
def a_ ( lowerCAmelCase_ : str, lowerCAmelCase_ : Dict ):
# fmt: off
__lowerCAmelCase = config.decoder_config.hidden_size
for idx in range(config.decoder_config.decoder_layers ):
# read in weights + bias of self-attention input projection layer (in the original implementation, this is a single matrix + bias)
__lowerCAmelCase = state_dict.pop(F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_weight""" )
__lowerCAmelCase = state_dict.pop(F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
__lowerCAmelCase = in_proj_weight[: hidden_size, :]
__lowerCAmelCase = in_proj_bias[:config.hidden_size]
__lowerCAmelCase = in_proj_weight[hidden_size : hidden_size * 2, :]
__lowerCAmelCase = in_proj_bias[hidden_size : hidden_size * 2]
__lowerCAmelCase = in_proj_weight[-hidden_size :, :]
__lowerCAmelCase = in_proj_bias[-hidden_size :]
# read in weights + bias of cross-attention input projection layer (in the original implementation, this is a single matrix + bias)
__lowerCAmelCase = state_dict.pop(F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_weight""" )
__lowerCAmelCase = state_dict.pop(F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
__lowerCAmelCase = in_proj_weight[: hidden_size, :]
__lowerCAmelCase = in_proj_bias[:config.hidden_size]
__lowerCAmelCase = in_proj_weight[hidden_size : hidden_size * 2, :]
__lowerCAmelCase = in_proj_bias[hidden_size : hidden_size * 2]
__lowerCAmelCase = in_proj_weight[-hidden_size :, :]
__lowerCAmelCase = in_proj_bias[-hidden_size :]
# fmt: on
def a_ ( ):
__lowerCAmelCase = 'http://images.cocodataset.org/val2017/000000039769.jpg'
__lowerCAmelCase = Image.open(requests.get(lowerCAmelCase_, stream=lowerCAmelCase_ ).raw )
return im
@torch.no_grad()
def a_ ( lowerCAmelCase_ : str, lowerCAmelCase_ : str, lowerCAmelCase_ : str, lowerCAmelCase_ : bool = False ):
__lowerCAmelCase = get_maskformer_config(lowerCAmelCase_ )
# load original state_dict
with open(lowerCAmelCase_, 'rb' ) as f:
__lowerCAmelCase = pickle.load(lowerCAmelCase_ )
__lowerCAmelCase = data['model']
# for name, param in state_dict.items():
# print(name, param.shape)
# rename keys
__lowerCAmelCase = create_rename_keys(lowerCAmelCase_ )
for src, dest in rename_keys:
rename_key(lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ )
read_in_swin_q_k_v(lowerCAmelCase_, config.backbone_config )
read_in_decoder_q_k_v(lowerCAmelCase_, lowerCAmelCase_ )
# update to torch tensors
for key, value in state_dict.items():
__lowerCAmelCase = torch.from_numpy(lowerCAmelCase_ )
# load 🤗 model
__lowerCAmelCase = MaskFormerForInstanceSegmentation(lowerCAmelCase_ )
model.eval()
for name, param in model.named_parameters():
print(lowerCAmelCase_, param.shape )
__lowerCAmelCase , __lowerCAmelCase = model.load_state_dict(lowerCAmelCase_, strict=lowerCAmelCase_ )
assert missing_keys == [
"model.pixel_level_module.encoder.model.layernorm.weight",
"model.pixel_level_module.encoder.model.layernorm.bias",
]
assert len(lowerCAmelCase_ ) == 0, F"""Unexpected keys: {unexpected_keys}"""
# verify results
__lowerCAmelCase = prepare_img()
if "vistas" in model_name:
__lowerCAmelCase = 65
elif "cityscapes" in model_name:
__lowerCAmelCase = 6_5535
else:
__lowerCAmelCase = 255
__lowerCAmelCase = True if 'ade' in model_name else False
__lowerCAmelCase = MaskFormerImageProcessor(ignore_index=lowerCAmelCase_, reduce_labels=lowerCAmelCase_ )
__lowerCAmelCase = image_processor(lowerCAmelCase_, return_tensors='pt' )
__lowerCAmelCase = model(**lowerCAmelCase_ )
print('Logits:', outputs.class_queries_logits[0, :3, :3] )
if model_name == "maskformer-swin-tiny-ade":
__lowerCAmelCase = torch.tensor(
[[3.6353, -4.4770, -2.6065], [0.5081, -4.2394, -3.5343], [2.1909, -5.0353, -1.9323]] )
assert torch.allclose(outputs.class_queries_logits[0, :3, :3], lowerCAmelCase_, atol=1E-4 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
print(F"""Saving model and image processor to {pytorch_dump_folder_path}""" )
Path(lowerCAmelCase_ ).mkdir(exist_ok=lowerCAmelCase_ )
model.save_pretrained(lowerCAmelCase_ )
image_processor.save_pretrained(lowerCAmelCase_ )
if push_to_hub:
print('Pushing model and image processor to the hub...' )
model.push_to_hub(F"""nielsr/{model_name}""" )
image_processor.push_to_hub(F"""nielsr/{model_name}""" )
if __name__ == "__main__":
_snake_case : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='maskformer-swin-tiny-ade',
type=str,
help=('Name of the MaskFormer model you\'d like to convert',),
)
parser.add_argument(
'--checkpoint_path',
default='/Users/nielsrogge/Documents/MaskFormer_checkpoints/MaskFormer-Swin-tiny-ADE20k/model.pkl',
type=str,
help='Path to the original state dict (.pth file).',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
_snake_case : List[str] = parser.parse_args()
convert_maskformer_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 53 | 1 |
import json
import os
import unittest
from transformers import AutoTokenizer, GPTaTokenizer, GPTaTokenizerFast
from transformers.models.gpta.tokenization_gpta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class SCREAMING_SNAKE_CASE_ ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
_a = GPTaTokenizer
_a = GPTaTokenizerFast
_a = True
_a = {"""add_prefix_space""": True}
_a = False
def __A ( self ) -> Tuple:
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__magic_name__ = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
'''<|endoftext|>''',
]
__magic_name__ = dict(zip(A , range(len(A ) ) ) )
__magic_name__ = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
__magic_name__ = {'''unk_token''': '''<unk>'''}
__magic_name__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
__magic_name__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(A ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(A ) )
def __A ( self , **A ) -> str:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return GPTaTokenizer.from_pretrained(self.tmpdirname , **A )
def __A ( self , **A ) -> int:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return GPTaTokenizerFast.from_pretrained(self.tmpdirname , **A )
def __A ( self , A ) -> Any:
'''simple docstring'''
__magic_name__ = '''lower newer'''
__magic_name__ = '''lower newer'''
return input_text, output_text
def __A ( self ) -> Any:
'''simple docstring'''
__magic_name__ = GPTaTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
__magic_name__ = '''lower newer'''
__magic_name__ = ['''\u0120low''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er''']
__magic_name__ = tokenizer.tokenize(A , add_prefix_space=A )
self.assertListEqual(A , A )
__magic_name__ = tokens + [tokenizer.unk_token]
__magic_name__ = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(A ) , A )
def __A ( self ) -> str:
'''simple docstring'''
if not self.test_rust_tokenizer:
return
__magic_name__ = self.get_tokenizer()
__magic_name__ = self.get_rust_tokenizer(add_prefix_space=A )
__magic_name__ = '''lower newer'''
# Testing tokenization
__magic_name__ = tokenizer.tokenize(A , add_prefix_space=A )
__magic_name__ = rust_tokenizer.tokenize(A )
self.assertListEqual(A , A )
# Testing conversion to ids without special tokens
__magic_name__ = tokenizer.encode(A , add_special_tokens=A , add_prefix_space=A )
__magic_name__ = rust_tokenizer.encode(A , add_special_tokens=A )
self.assertListEqual(A , A )
# Testing conversion to ids with special tokens
__magic_name__ = self.get_rust_tokenizer(add_prefix_space=A )
__magic_name__ = tokenizer.encode(A , add_prefix_space=A )
__magic_name__ = rust_tokenizer.encode(A )
self.assertListEqual(A , A )
# Testing the unknown token
__magic_name__ = tokens + [rust_tokenizer.unk_token]
__magic_name__ = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(A ) , A )
def __A ( self , *A , **A ) -> List[Any]:
'''simple docstring'''
pass
def __A ( self , A=15 ) -> Optional[int]:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
__magic_name__ = self.rust_tokenizer_class.from_pretrained(A , **A )
# Simple input
__magic_name__ = '''This is a simple input'''
__magic_name__ = ['''This is a simple input 1''', '''This is a simple input 2''']
__magic_name__ = ('''This is a simple input''', '''This is a pair''')
__magic_name__ = [
('''This is a simple input 1''', '''This is a simple input 2'''),
('''This is a simple pair 1''', '''This is a simple pair 2'''),
]
# Simple input tests
self.assertRaises(A , tokenizer_r.encode , A , max_length=A , padding='''max_length''' )
# Simple input
self.assertRaises(A , tokenizer_r.encode_plus , A , max_length=A , padding='''max_length''' )
# Simple input
self.assertRaises(
A , tokenizer_r.batch_encode_plus , A , max_length=A , padding='''max_length''' , )
# Pair input
self.assertRaises(A , tokenizer_r.encode , A , max_length=A , padding='''max_length''' )
# Pair input
self.assertRaises(A , tokenizer_r.encode_plus , A , max_length=A , padding='''max_length''' )
# Pair input
self.assertRaises(
A , tokenizer_r.batch_encode_plus , A , max_length=A , padding='''max_length''' , )
def __A ( self ) -> List[Any]:
'''simple docstring'''
__magic_name__ = GPTaTokenizer.from_pretrained(self.tmpdirname , pad_token='''<pad>''' )
# Simple input
__magic_name__ = '''This is a simple input'''
__magic_name__ = ['''This is a simple input looooooooong''', '''This is a simple input''']
__magic_name__ = ('''This is a simple input''', '''This is a pair''')
__magic_name__ = [
('''This is a simple input loooooong''', '''This is a simple input'''),
('''This is a simple pair loooooong''', '''This is a simple pair'''),
]
__magic_name__ = tokenizer.pad_token_id
__magic_name__ = tokenizer(A , padding='''max_length''' , max_length=30 , return_tensors='''np''' )
__magic_name__ = tokenizer(A , padding=A , truncate=A , return_tensors='''np''' )
__magic_name__ = tokenizer(*A , padding='''max_length''' , max_length=60 , return_tensors='''np''' )
__magic_name__ = tokenizer(A , padding=A , truncate=A , return_tensors='''np''' )
# s
# test single string max_length padding
self.assertEqual(out_s['''input_ids'''].shape[-1] , 30 )
self.assertTrue(pad_token_id in out_s['''input_ids'''] )
self.assertTrue(0 in out_s['''attention_mask'''] )
# s2
# test automatic padding
self.assertEqual(out_sa['''input_ids'''].shape[-1] , 33 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa['''input_ids'''][0] )
self.assertFalse(0 in out_sa['''attention_mask'''][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa['''input_ids'''][1] )
self.assertTrue(0 in out_sa['''attention_mask'''][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p['''input_ids'''].shape[-1] , 60 )
self.assertTrue(pad_token_id in out_p['''input_ids'''] )
self.assertTrue(0 in out_p['''attention_mask'''] )
# p2
# test automatic padding pair
self.assertEqual(out_pa['''input_ids'''].shape[-1] , 52 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa['''input_ids'''][0] )
self.assertFalse(0 in out_pa['''attention_mask'''][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa['''input_ids'''][1] )
self.assertTrue(0 in out_pa['''attention_mask'''][1] )
def __A ( self ) -> List[str]:
'''simple docstring'''
__magic_name__ = '''$$$'''
__magic_name__ = GPTaTokenizer.from_pretrained(self.tmpdirname , bos_token=A , add_bos_token=A )
__magic_name__ = '''This is a simple input'''
__magic_name__ = ['''This is a simple input 1''', '''This is a simple input 2''']
__magic_name__ = tokenizer.bos_token_id
__magic_name__ = tokenizer(A )
__magic_name__ = tokenizer(A )
self.assertEqual(out_s.input_ids[0] , A )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
__magic_name__ = tokenizer.decode(out_s.input_ids )
__magic_name__ = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] , A )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
def __A ( self ) -> List[str]:
'''simple docstring'''
pass
def __A ( self ) -> Any:
'''simple docstring'''
__magic_name__ = [self.get_tokenizer(do_lower_case=A , add_bos_token=A )]
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
__magic_name__ = '''Encode this.'''
__magic_name__ = '''This one too please.'''
__magic_name__ = tokenizer.encode(A , add_special_tokens=A )
encoded_sequence += tokenizer.encode(A , add_special_tokens=A )
__magic_name__ = tokenizer.encode_plus(
A , A , add_special_tokens=A , return_special_tokens_mask=A , )
__magic_name__ = encoded_sequence_dict['''input_ids''']
__magic_name__ = encoded_sequence_dict['''special_tokens_mask''']
self.assertEqual(len(A ) , len(A ) )
__magic_name__ = [
(x if not special_tokens_mask[i] else None) for i, x in enumerate(A )
]
__magic_name__ = [x for x in filtered_sequence if x is not None]
self.assertEqual(A , A )
@require_tokenizers
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
def __A ( self ) -> List[Any]:
'''simple docstring'''
__magic_name__ = AutoTokenizer.from_pretrained('''facebook/opt-350m''' , from_slow=A )
__magic_name__ = '''A photo of a cat'''
__magic_name__ = tokenizer.encode(
A , )
self.assertEqual(A , [2, 2_50, 13_45, 9, 10, 47_58] )
tokenizer.save_pretrained('''test_opt''' )
__magic_name__ = AutoTokenizer.from_pretrained('''./test_opt''' )
__magic_name__ = tokenizer.encode(
A , )
self.assertEqual(A , [2, 2_50, 13_45, 9, 10, 47_58] )
def __A ( self ) -> str:
'''simple docstring'''
__magic_name__ = AutoTokenizer.from_pretrained('''facebook/opt-350m''' , use_slow=A )
__magic_name__ = '''A photo of a cat'''
__magic_name__ = tokenizer.encode(
A , )
# Same as above
self.assertEqual(A , [2, 2_50, 13_45, 9, 10, 47_58] )
@unittest.skip('''This test is failing because of a bug in the fast tokenizer''' )
def __A ( self ) -> List[Any]:
'''simple docstring'''
__magic_name__ = AutoTokenizer.from_pretrained('''facebook/opt-350m''' , from_slow=A )
__magic_name__ = '''bos'''
__magic_name__ = tokenizer.get_vocab()['''bos''']
__magic_name__ = '''A photo of a cat'''
__magic_name__ = tokenizer.encode(
A , )
# We changed the bos token
self.assertEqual(A , [3_19_57, 2_50, 13_45, 9, 10, 47_58] )
tokenizer.save_pretrained('''./tok''' )
__magic_name__ = AutoTokenizer.from_pretrained('''./tok''' )
self.assertTrue(tokenizer.is_fast )
__magic_name__ = tokenizer.encode(
A , )
self.assertEqual(A , [3_19_57, 2_50, 13_45, 9, 10, 47_58] ) | 678 |
import argparse
import torch
from transformers import LxmertConfig, LxmertForPreTraining, load_tf_weights_in_lxmert
from transformers.utils import logging
logging.set_verbosity_info()
def _SCREAMING_SNAKE_CASE ( snake_case_ : str , snake_case_ : Optional[int] , snake_case_ : Union[str, Any] ):
# Initialise PyTorch model
__magic_name__ = LxmertConfig.from_json_file(snake_case_ )
print(f'Building PyTorch model from configuration: {config}' )
__magic_name__ = LxmertForPreTraining(snake_case_ )
# Load weights from tf checkpoint
load_tf_weights_in_lxmert(snake_case_ , snake_case_ , snake_case_ )
# Save pytorch-model
print(f'Save PyTorch model to {pytorch_dump_path}' )
torch.save(model.state_dict() , snake_case_ )
if __name__ == "__main__":
a_ : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.',
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
a_ : Optional[Any] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path) | 678 | 1 |
'''simple docstring'''
import copy
from typing import Dict, List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
__lowerCAmelCase = {
"""facebook/mask2former-swin-small-coco-instance""": (
"""https://huggingface.co/facebook/mask2former-swin-small-coco-instance/blob/main/config.json"""
)
# See all Mask2Former models at https://huggingface.co/models?filter=mask2former
}
__lowerCAmelCase = logging.get_logger(__name__)
class UpperCAmelCase__ ( UpperCamelCase__ ):
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = '''mask2former'''
__UpperCAmelCase : int = ['''swin''']
__UpperCAmelCase : Dict = {'''hidden_size''': '''hidden_dim'''}
def __init__( self : List[str] ,_a : Optional[Dict] = None ,_a : int = 256 ,_a : int = 256 ,_a : int = 256 ,_a : int = 1024 ,_a : str = "relu" ,_a : int = 6 ,_a : int = 10 ,_a : int = 8 ,_a : float = 0.0 ,_a : int = 2048 ,_a : bool = False ,_a : bool = False ,_a : int = 4 ,_a : int = 255 ,_a : int = 100 ,_a : float = 0.1 ,_a : float = 2.0 ,_a : float = 5.0 ,_a : float = 5.0 ,_a : int = 1_2544 ,_a : float = 3.0 ,_a : float = 0.75 ,_a : float = 0.02 ,_a : float = 1.0 ,_a : bool = True ,_a : List[int] = [4, 8, 16, 32] ,_a : bool = None ,**_a : Tuple ,):
'''simple docstring'''
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `Swin` backbone.' )
_a : Any = CONFIG_MAPPING['swin'](
image_size=224 ,in_channels=3 ,patch_size=4 ,embed_dim=96 ,depths=[2, 2, 18, 2] ,num_heads=[3, 6, 12, 24] ,window_size=7 ,drop_path_rate=0.3 ,use_absolute_embeddings=__A ,out_features=['stage1', 'stage2', 'stage3', 'stage4'] ,)
if isinstance(__A ,__A ):
_a : str = backbone_config.pop('model_type' )
_a : Tuple = CONFIG_MAPPING[backbone_model_type]
_a : Union[str, Any] = config_class.from_dict(__A )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
F"""Backbone {backbone_config.model_type} is not a supported model and may not be compatible with Mask2Former. """
F"""Supported model types: {','.join(self.backbones_supported )}""" )
_a : Optional[int] = backbone_config
_a : Optional[int] = feature_size
_a : Tuple = mask_feature_size
_a : Union[str, Any] = hidden_dim
_a : Tuple = encoder_feedforward_dim
_a : List[Any] = activation_function
_a : Any = encoder_layers
_a : List[str] = decoder_layers
_a : Dict = num_attention_heads
_a : Dict = dropout
_a : Optional[int] = dim_feedforward
_a : Dict = pre_norm
_a : List[Any] = enforce_input_projection
_a : List[str] = common_stride
_a : Tuple = ignore_value
_a : List[Any] = num_queries
_a : str = no_object_weight
_a : Union[str, Any] = class_weight
_a : Union[str, Any] = mask_weight
_a : List[str] = dice_weight
_a : Union[str, Any] = train_num_points
_a : Optional[Any] = oversample_ratio
_a : int = importance_sample_ratio
_a : List[Any] = init_std
_a : str = init_xavier_std
_a : Dict = use_auxiliary_loss
_a : Dict = feature_strides
_a : List[str] = output_auxiliary_logits
_a : List[str] = decoder_layers
super().__init__(**__A )
@classmethod
def __lowercase ( cls : str ,_a : PretrainedConfig ,**_a : int ):
'''simple docstring'''
return cls(
backbone_config=__A ,**__A ,)
def __lowercase ( self : Tuple ):
'''simple docstring'''
_a : int = copy.deepcopy(self.__dict__ )
_a : Union[str, Any] = self.backbone_config.to_dict()
_a : int = self.__class__.model_type
return output
| 229 |
from torch import nn
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: str ):
if act_fn in ["swish", "silu"]:
return nn.SiLU()
elif act_fn == "mish":
return nn.Mish()
elif act_fn == "gelu":
return nn.GELU()
else:
raise ValueError(f'''Unsupported activation function: {act_fn}''' ) | 6 | 0 |
"""simple docstring"""
def _lowerCAmelCase ( __lowerCamelCase:list ):
'''simple docstring'''
__magic_name__ = len(__lowerCamelCase )
for _ in range(__lowerCamelCase ):
for i in range(_ % 2 , arr_size - 1 , 2 ):
if arr[i + 1] < arr[i]:
__magic_name__ , __magic_name__ = arr[i + 1], arr[i]
return arr
if __name__ == "__main__":
lowercase = list(range(10, 0, -1))
print(f'''Original: {arr}. Sorted: {odd_even_transposition(arr)}''')
| 705 |
"""simple docstring"""
from copy import deepcopy
class A_ :
def __init__( self : List[str] , __lowerCamelCase : list[int] | None = None , __lowerCamelCase : int | None = None ) -> None:
if arr is None and size is not None:
__magic_name__ = size
__magic_name__ = [0] * size
elif arr is not None:
self.init(__lowerCamelCase )
else:
raise ValueError("Either arr or size must be specified" )
def _snake_case ( self : Optional[int] , __lowerCamelCase : list[int] ) -> None:
__magic_name__ = len(__lowerCamelCase )
__magic_name__ = deepcopy(__lowerCamelCase )
for i in range(1 , self.size ):
__magic_name__ = self.next_(__lowerCamelCase )
if j < self.size:
self.tree[j] += self.tree[i]
def _snake_case ( self : Any ) -> list[int]:
__magic_name__ = self.tree[:]
for i in range(self.size - 1 , 0 , -1 ):
__magic_name__ = self.next_(__lowerCamelCase )
if j < self.size:
arr[j] -= arr[i]
return arr
@staticmethod
def _snake_case ( __lowerCamelCase : int ) -> int:
return index + (index & (-index))
@staticmethod
def _snake_case ( __lowerCamelCase : int ) -> int:
return index - (index & (-index))
def _snake_case ( self : Union[str, Any] , __lowerCamelCase : int , __lowerCamelCase : int ) -> None:
if index == 0:
self.tree[0] += value
return
while index < self.size:
self.tree[index] += value
__magic_name__ = self.next_(__lowerCamelCase )
def _snake_case ( self : Any , __lowerCamelCase : int , __lowerCamelCase : int ) -> None:
self.add(__lowerCamelCase , value - self.get(__lowerCamelCase ) )
def _snake_case ( self : List[Any] , __lowerCamelCase : int ) -> int:
if right == 0:
return 0
__magic_name__ = self.tree[0]
right -= 1 # make right inclusive
while right > 0:
result += self.tree[right]
__magic_name__ = self.prev(__lowerCamelCase )
return result
def _snake_case ( self : Any , __lowerCamelCase : int , __lowerCamelCase : int ) -> int:
return self.prefix(__lowerCamelCase ) - self.prefix(__lowerCamelCase )
def _snake_case ( self : Tuple , __lowerCamelCase : int ) -> int:
return self.query(__lowerCamelCase , index + 1 )
def _snake_case ( self : Tuple , __lowerCamelCase : int ) -> int:
value -= self.tree[0]
if value < 0:
return -1
__magic_name__ = 1 # Largest power of 2 <= size
while j * 2 < self.size:
j *= 2
__magic_name__ = 0
while j > 0:
if i + j < self.size and self.tree[i + j] <= value:
value -= self.tree[i + j]
i += j
j //= 2
return i
if __name__ == "__main__":
import doctest
doctest.testmod()
| 468 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a__ : List[Any] = logging.get_logger(__name__)
a__ : Tuple = {
'google/canine-s': 'https://huggingface.co/google/canine-s/resolve/main/config.json',
# See all CANINE models at https://huggingface.co/models?filter=canine
}
class lowerCAmelCase__ ( UpperCAmelCase_ ):
'''simple docstring'''
_lowerCamelCase ="canine"
def __init__( self : str , a__ : int=768 , a__ : Optional[int]=12 , a__ : Optional[Any]=12 , a__ : int=3072 , a__ : str="gelu" , a__ : int=0.1 , a__ : List[Any]=0.1 , a__ : Optional[int]=16384 , a__ : Optional[int]=16 , a__ : Optional[Any]=0.02 , a__ : Dict=1e-1_2 , a__ : Union[str, Any]=0 , a__ : Optional[int]=0xe_0_0_0 , a__ : Any=0xe_0_0_1 , a__ : Union[str, Any]=4 , a__ : Dict=4 , a__ : int=8 , a__ : str=16384 , a__ : Dict=128 , **a__ : List[str] , ):
super().__init__(pad_token_id=a__ , bos_token_id=a__ , eos_token_id=a__ , **a__ )
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_act
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = initializer_range
UpperCAmelCase = type_vocab_size
UpperCAmelCase = layer_norm_eps
# Character config:
UpperCAmelCase = downsampling_rate
UpperCAmelCase = upsampling_kernel_size
UpperCAmelCase = num_hash_functions
UpperCAmelCase = num_hash_buckets
UpperCAmelCase = local_transformer_stride
| 51 |
"""simple docstring"""
# Logistic Regression from scratch
# In[62]:
# In[63]:
# importing all the required libraries
import numpy as np
from matplotlib import pyplot as plt
from sklearn import datasets
def __lowerCAmelCase ( __UpperCamelCase : int ):
'''simple docstring'''
return 1 / (1 + np.exp(-z ))
def __lowerCAmelCase ( __UpperCamelCase : Optional[Any] , __UpperCamelCase : int ):
'''simple docstring'''
return (-y * np.log(__UpperCamelCase ) - (1 - y) * np.log(1 - h )).mean()
def __lowerCAmelCase ( __UpperCamelCase : Optional[Any] , __UpperCamelCase : List[Any] , __UpperCamelCase : Dict ):
'''simple docstring'''
snake_case_ : Optional[int] = np.dot(__UpperCamelCase , __UpperCamelCase )
return np.sum(y * scores - np.log(1 + np.exp(__UpperCamelCase ) ) )
def __lowerCAmelCase ( __UpperCamelCase : Optional[Any] , __UpperCamelCase : int , __UpperCamelCase : int , __UpperCamelCase : int=7_0_0_0_0 ):
'''simple docstring'''
snake_case_ : Dict = np.zeros(x.shape[1] )
for iterations in range(__UpperCamelCase ):
snake_case_ : Any = np.dot(__UpperCamelCase , __UpperCamelCase )
snake_case_ : List[str] = sigmoid_function(__UpperCamelCase )
snake_case_ : Optional[Any] = np.dot(x.T , h - y ) / y.size
snake_case_ : str = theta - alpha * gradient # updating the weights
snake_case_ : int = np.dot(__UpperCamelCase , __UpperCamelCase )
snake_case_ : List[str] = sigmoid_function(__UpperCamelCase )
snake_case_ : Dict = cost_function(__UpperCamelCase , __UpperCamelCase )
if iterations % 1_0_0 == 0:
print(F'loss: {j} \t' ) # printing the loss after every 100 iterations
return theta
# In[68]:
if __name__ == "__main__":
__lowerCAmelCase : Any = datasets.load_iris()
__lowerCAmelCase : List[Any] = iris.data[:, :2]
__lowerCAmelCase : Tuple = (iris.target != 0) * 1
__lowerCAmelCase : Any = 0.1
__lowerCAmelCase : List[Any] = logistic_reg(alpha, x, y, max_iterations=7_0000)
print('''theta: ''', theta) # printing the theta i.e our weights vector
def __lowerCAmelCase ( __UpperCamelCase : List[str] ):
'''simple docstring'''
return sigmoid_function(
np.dot(__UpperCamelCase , __UpperCamelCase ) ) # predicting the value of probability from the logistic regression algorithm
plt.figure(figsize=(10, 6))
plt.scatter(x[y == 0][:, 0], x[y == 0][:, 1], color='''b''', label='''0''')
plt.scatter(x[y == 1][:, 0], x[y == 1][:, 1], color='''r''', label='''1''')
((__lowerCAmelCase) , (__lowerCAmelCase)) : Union[str, Any] = (x[:, 0].min(), x[:, 0].max())
((__lowerCAmelCase) , (__lowerCAmelCase)) : Tuple = (x[:, 1].min(), x[:, 1].max())
((__lowerCAmelCase) , (__lowerCAmelCase)) : Optional[Any] = np.meshgrid(np.linspace(xa_min, xa_max), np.linspace(xa_min, xa_max))
__lowerCAmelCase : Any = np.c_[xxa.ravel(), xxa.ravel()]
__lowerCAmelCase : Optional[int] = predict_prob(grid).reshape(xxa.shape)
plt.contour(xxa, xxa, probs, [0.5], linewidths=1, colors='''black''')
plt.legend()
plt.show()
| 58 | 0 |
'''simple docstring'''
import argparse
from torch import nn
# transformers_old should correspond to branch `save_old_prophetnet_model_structure` here
# original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively
from transformers_old.modeling_prophetnet import (
ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld,
)
from transformers_old.modeling_xlm_prophetnet import (
XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld,
)
from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging
__lowerCAmelCase = logging.get_logger(__name__)
logging.set_verbosity_info()
def _UpperCAmelCase ( __A : str , __A : str ):
if "xprophetnet" in prophetnet_checkpoint_path:
a_ : Tuple = XLMProphetNetForConditionalGenerationOld.from_pretrained(__A )
a_ , a_ : Optional[Any] = XLMProphetNetForConditionalGeneration.from_pretrained(
__A , output_loading_info=__A )
else:
a_ : List[Any] = ProphetNetForConditionalGenerationOld.from_pretrained(__A )
a_ , a_ : Any = ProphetNetForConditionalGeneration.from_pretrained(
__A , output_loading_info=__A )
a_ : str = ['''key_proj''', '''value_proj''', '''query_proj''']
a_ : Tuple = {
'''self_attn''': '''ngram_self_attn''',
'''cross_attn''': '''encoder_attn''',
'''cross_attn_layer_norm''': '''encoder_attn_layer_norm''',
'''feed_forward_layer_norm''': '''final_layer_norm''',
'''feed_forward''': '''''',
'''intermediate''': '''fc1''',
'''output''': '''fc2''',
'''key_proj''': '''k_proj''',
'''query_proj''': '''q_proj''',
'''value_proj''': '''v_proj''',
'''word_embeddings''': '''embed_tokens''',
'''embeddings_layer_norm''': '''emb_layer_norm''',
'''relative_pos_embeddings''': '''relative_linear''',
'''ngram_embeddings''': '''ngram_input_embed''',
'''position_embeddings''': '''embed_positions''',
}
for key in loading_info["missing_keys"]:
a_ : List[str] = key.split('''.''' )
if attributes[0] == "lm_head":
a_ : List[str] = prophet
a_ : Dict = prophet_old
else:
a_ : str = prophet.prophetnet
a_ : int = prophet_old.model
a_ : str = False
for attribute in attributes:
if attribute in mapping:
a_ : Dict = mapping[attribute]
if not hasattr(__A , __A ) and len(__A ) > 0:
a_ : List[str] = attribute
elif hasattr(__A , __A ):
a_ : Union[str, Any] = attribute
if attribute == "weight":
assert old_model.weight.shape == model.weight.shape, "Shapes have to match!"
a_ : Tuple = old_model.weight
logger.info(f'{attribute} is initialized.' )
a_ : Union[str, Any] = True
break
elif attribute == "bias":
assert old_model.bias.shape == model.bias.shape, "Shapes have to match!"
a_ : Union[str, Any] = old_model.bias
logger.info(f'{attribute} is initialized' )
a_ : Dict = True
break
elif attribute in special_keys and hasattr(__A , '''in_proj_weight''' ):
a_ : Tuple = old_model.in_proj_weight.shape[0] // 3
a_ : Any = getattr(__A , __A )
param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match"
param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match"
if attribute == "query_proj":
a_ : Union[str, Any] = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] )
a_ : Optional[Any] = nn.Parameter(old_model.in_proj_bias[:embed_dim] )
elif attribute == "key_proj":
a_ : List[Any] = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] )
a_ : Optional[int] = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] )
elif attribute == "value_proj":
a_ : Tuple = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] )
a_ : Any = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] )
a_ : Dict = True
break
elif attribute == "position_embeddings":
assert (
model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1]
), "Hidden size has to match"
assert model.position_embeddings.weight.shape[0] == 5_12, "We want 512 position_embeddings."
a_ : Union[str, Any] = nn.Parameter(old_model.embed_positions.weight[:5_12, :] )
a_ : Optional[Any] = True
break
if attribute.isdigit():
a_ : Union[str, Any] = model[int(__A )]
a_ : str = old_model[int(__A )]
else:
a_ : Tuple = getattr(__A , __A )
if old_attribute == "":
a_ : List[str] = old_model
else:
if not hasattr(__A , __A ):
raise ValueError(f'{old_model} does not have {old_attribute}' )
a_ : Optional[Any] = getattr(__A , __A )
if not is_key_init:
raise ValueError(f'{key} was not correctly initialized!' )
print(f'Saving model to {pytorch_dump_folder_path}' )
prophet.save_pretrained(__A )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--prophetnet_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
__lowerCAmelCase = parser.parse_args()
convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
| 666 |
'''simple docstring'''
import requests
from bsa import BeautifulSoup
def _UpperCAmelCase ( __A : str , __A : dict ):
a_ : Tuple = BeautifulSoup(requests.get(__A , params=__A ).content , '''html.parser''' )
a_ : List[str] = soup.find('''div''' , attrs={'''class''': '''gs_ri'''} )
a_ : List[str] = div.find('''div''' , attrs={'''class''': '''gs_fl'''} ).find_all('''a''' )
return anchors[2].get_text()
if __name__ == "__main__":
__lowerCAmelCase = {
'title': (
'Precisely geometry controlled microsupercapacitors for ultrahigh areal '
'capacitance, volumetric capacitance, and energy density'
),
'journal': 'Chem. Mater.',
'volume': 30,
'pages': '3979-3990',
'year': 2_018,
'hl': 'en',
}
print(get_citation('https://scholar.google.com/scholar_lookup', params=params))
| 666 | 1 |
from collections.abc import Callable
class UpperCAmelCase__ :
def __init__( self ,A__ = None ):
_A : Tuple = []
# Stores indexes of each item for supporting updates and deletion.
_A : Optional[int] = {}
# Stores current size of heap.
_A : str = 0
# Stores function used to evaluate the score of an item on which basis ordering
# will be done.
_A : Any = key or (lambda A__ : x)
def A__ ( self ,A__ ):
return int((i - 1) / 2 ) if i > 0 else None
def A__ ( self ,A__ ):
_A : str = int(2 * i + 1 )
return left if 0 < left < self.size else None
def A__ ( self ,A__ ):
_A : str = int(2 * i + 2 )
return right if 0 < right < self.size else None
def A__ ( self ,A__ ,A__ ):
_A , _A : Union[str, Any] = (
self.pos_map[self.arr[j][0]],
self.pos_map[self.arr[i][0]],
)
# Then swap the items in the list.
_A , _A : Optional[Any] = self.arr[j], self.arr[i]
def A__ ( self ,A__ ,A__ ):
return self.arr[i][1] < self.arr[j][1]
def A__ ( self ,A__ ):
_A : int = self._left(_a )
_A : Optional[int] = self._right(_a )
_A : int = i
if left is not None and not self._cmp(_a ,_a ):
_A : Optional[int] = left
if right is not None and not self._cmp(_a ,_a ):
_A : Union[str, Any] = right
return valid_parent
def A__ ( self ,A__ ):
_A : Dict = self._parent(_a )
while parent is not None and not self._cmp(_a ,_a ):
self._swap(_a ,_a )
_A , _A : Optional[int] = parent, self._parent(_a )
def A__ ( self ,A__ ):
_A : Optional[Any] = self._get_valid_parent(_a )
while valid_parent != index:
self._swap(_a ,_a )
_A , _A : List[str] = valid_parent, self._get_valid_parent(_a )
def A__ ( self ,A__ ,A__ ):
if item not in self.pos_map:
return
_A : Union[str, Any] = self.pos_map[item]
_A : Dict = [item, self.key(_a )]
# Make sure heap is right in both up and down direction.
# Ideally only one of them will make any change.
self._heapify_up(_a )
self._heapify_down(_a )
def A__ ( self ,A__ ):
if item not in self.pos_map:
return
_A : List[str] = self.pos_map[item]
del self.pos_map[item]
_A : Optional[Any] = self.arr[self.size - 1]
_A : List[str] = index
self.size -= 1
# Make sure heap is right in both up and down direction. Ideally only one
# of them will make any change- so no performance loss in calling both.
if self.size > index:
self._heapify_up(_a )
self._heapify_down(_a )
def A__ ( self ,A__ ,A__ ):
_A : str = len(self.arr )
if arr_len == self.size:
self.arr.append([item, self.key(_a )] )
else:
_A : Optional[int] = [item, self.key(_a )]
_A : Union[str, Any] = self.size
self.size += 1
self._heapify_up(self.size - 1 )
def A__ ( self ):
return self.arr[0] if self.size else None
def A__ ( self ):
_A : Tuple = self.get_top()
if top_item_tuple:
self.delete_item(top_item_tuple[0] )
return top_item_tuple
def a__ () -> None:
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 206 |
'''simple docstring'''
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import (
AutoProcessor,
BertTokenizerFast,
BlipImageProcessor,
GPTaTokenizer,
InstructBlipProcessor,
PreTrainedTokenizerFast,
)
@require_vision
class A__ ( unittest.TestCase ):
def A ( self : Dict ) -> Union[str, Any]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =tempfile.mkdtemp()
_SCREAMING_SNAKE_CASE =BlipImageProcessor()
_SCREAMING_SNAKE_CASE =GPTaTokenizer.from_pretrained('hf-internal-testing/tiny-random-GPT2Model' )
_SCREAMING_SNAKE_CASE =BertTokenizerFast.from_pretrained('hf-internal-testing/tiny-random-bert' )
_SCREAMING_SNAKE_CASE =InstructBlipProcessor(_a , _a , _a )
processor.save_pretrained(self.tmpdirname )
def A ( self : List[str] , **_a : List[Any] ) -> List[str]:
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **_a ).tokenizer
def A ( self : Dict , **_a : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **_a ).image_processor
def A ( self : List[str] , **_a : Dict ) -> List[str]:
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **_a ).qformer_tokenizer
def A ( self : Optional[int] ) -> int:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def A ( self : int ) -> int:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =[np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
_SCREAMING_SNAKE_CASE =[Image.fromarray(np.moveaxis(_a , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def A ( self : str ) -> Tuple:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =InstructBlipProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() , qformer_tokenizer=self.get_qformer_tokenizer() , )
processor.save_pretrained(self.tmpdirname )
_SCREAMING_SNAKE_CASE =self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
_SCREAMING_SNAKE_CASE =self.get_image_processor(do_normalize=_a , padding_value=1.0 )
_SCREAMING_SNAKE_CASE =InstructBlipProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=_a , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , _a )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _a )
self.assertIsInstance(processor.qformer_tokenizer , _a )
def A ( self : int ) -> Tuple:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =self.get_qformer_tokenizer()
_SCREAMING_SNAKE_CASE =InstructBlipProcessor(
tokenizer=_a , image_processor=_a , qformer_tokenizer=_a )
_SCREAMING_SNAKE_CASE =self.prepare_image_inputs()
_SCREAMING_SNAKE_CASE =image_processor(_a , return_tensors='np' )
_SCREAMING_SNAKE_CASE =processor(images=_a , return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def A ( self : int ) -> str:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =self.get_qformer_tokenizer()
_SCREAMING_SNAKE_CASE =InstructBlipProcessor(
tokenizer=_a , image_processor=_a , qformer_tokenizer=_a )
_SCREAMING_SNAKE_CASE ='lower newer'
_SCREAMING_SNAKE_CASE =processor(text=_a )
_SCREAMING_SNAKE_CASE =tokenizer(_a , return_token_type_ids=_a )
_SCREAMING_SNAKE_CASE =qformer_tokenizer(_a , return_token_type_ids=_a )
for key in encoded_tokens.keys():
self.assertListEqual(encoded_tokens[key] , encoded_processor[key] )
for key in encoded_tokens_qformer.keys():
self.assertListEqual(encoded_tokens_qformer[key] , encoded_processor['qformer_' + key] )
def A ( self : int ) -> str:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =self.get_qformer_tokenizer()
_SCREAMING_SNAKE_CASE =InstructBlipProcessor(
tokenizer=_a , image_processor=_a , qformer_tokenizer=_a )
_SCREAMING_SNAKE_CASE ='lower newer'
_SCREAMING_SNAKE_CASE =self.prepare_image_inputs()
_SCREAMING_SNAKE_CASE =processor(text=_a , images=_a )
self.assertListEqual(
list(inputs.keys() ) , ['input_ids', 'attention_mask', 'qformer_input_ids', 'qformer_attention_mask', 'pixel_values'] , )
# test if it raises when no input is passed
with pytest.raises(_a ):
processor()
def A ( self : Optional[Any] ) -> int:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =self.get_qformer_tokenizer()
_SCREAMING_SNAKE_CASE =InstructBlipProcessor(
tokenizer=_a , image_processor=_a , qformer_tokenizer=_a )
_SCREAMING_SNAKE_CASE =[[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_SCREAMING_SNAKE_CASE =processor.batch_decode(_a )
_SCREAMING_SNAKE_CASE =tokenizer.batch_decode(_a )
self.assertListEqual(_a , _a )
def A ( self : Tuple ) -> Optional[Any]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =self.get_qformer_tokenizer()
_SCREAMING_SNAKE_CASE =InstructBlipProcessor(
tokenizer=_a , image_processor=_a , qformer_tokenizer=_a )
_SCREAMING_SNAKE_CASE ='lower newer'
_SCREAMING_SNAKE_CASE =self.prepare_image_inputs()
_SCREAMING_SNAKE_CASE =processor(text=_a , images=_a )
self.assertListEqual(
list(inputs.keys() ) , ['input_ids', 'attention_mask', 'qformer_input_ids', 'qformer_attention_mask', 'pixel_values'] , )
| 405 | 0 |
from .imports import is_tqdm_available
if is_tqdm_available():
from tqdm.auto import tqdm as _tqdm
from ..state import PartialState
def snake_case_ ( lowerCAmelCase_ : bool = True , *lowerCAmelCase_ : int , **lowerCAmelCase_ : List[str] ):
if not is_tqdm_available():
raise ImportError("""Accelerate's `tqdm` module requires `tqdm` to be installed. Please run `pip install tqdm`.""" )
__lowercase : List[str] = False
if main_process_only:
__lowercase : Optional[int] = PartialState().local_process_index == 0
return _tqdm(*lowerCAmelCase_ , **lowerCAmelCase_ , disable=lowerCAmelCase_ ) | 649 |
import inspect
import tempfile
import unittest
from huggingface_hub import hf_hub_download
from transformers import is_torch_available
from transformers.testing_utils import is_flaky, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
lowerCamelCase : Union[str, Any] = 1E-4
if is_torch_available():
import torch
from transformers import AutoformerConfig, AutoformerForPrediction, AutoformerModel
from transformers.models.autoformer.modeling_autoformer import AutoformerDecoder, AutoformerEncoder
@require_torch
class lowerCAmelCase :
'''simple docstring'''
def __init__( self : Dict , __a : List[str] , __a : Optional[int]=16 , __a : Optional[Any]=13 , __a : str=7 , __a : List[str]=14 , __a : Any=10 , __a : str=19 , __a : int=5 , __a : Any=4 , __a : List[Any]=True , __a : Tuple=16 , __a : Dict=2 , __a : Tuple=4 , __a : int=4 , __a : List[Any]="gelu" , __a : Tuple=0.1 , __a : List[str]=0.1 , __a : int=[1, 2, 3, 4, 5] , __a : str=25 , __a : Any=5 , ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Optional[Any] = d_model
__lowercase : Dict = parent
__lowercase : Tuple = batch_size
__lowercase : Optional[int] = prediction_length
__lowercase : List[str] = context_length
__lowercase : Any = cardinality
__lowercase : str = num_time_features
__lowercase : Optional[int] = lags_sequence
__lowercase : Optional[Any] = embedding_dimension
__lowercase : List[Any] = is_training
__lowercase : List[str] = hidden_size
__lowercase : int = num_hidden_layers
__lowercase : Any = num_attention_heads
__lowercase : List[Any] = intermediate_size
__lowercase : int = hidden_act
__lowercase : str = hidden_dropout_prob
__lowercase : List[Any] = attention_probs_dropout_prob
__lowercase : str = context_length
__lowercase : int = prediction_length + label_length
__lowercase : Union[str, Any] = label_length
__lowercase : Optional[int] = moving_average
__lowercase : Optional[Any] = autocorrelation_factor
def lowerCAmelCase ( self : str ) -> List[str]:
"""simple docstring"""
return AutoformerConfig(
d_model=self.d_model , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , prediction_length=self.prediction_length , context_length=self.context_length , label_length=self.label_length , lags_sequence=self.lags_sequence , num_time_features=self.num_time_features , num_static_categorical_features=1 , cardinality=[self.cardinality] , embedding_dimension=[self.embedding_dimension] , moving_average=self.moving_average , )
def lowerCAmelCase ( self : Tuple , __a : str ) -> int:
"""simple docstring"""
__lowercase : Any = config.context_length + max(config.lags_sequence )
__lowercase : Any = ids_tensor([self.batch_size, 1] , config.cardinality[0] )
__lowercase : Optional[int] = floats_tensor([self.batch_size, _past_length, config.num_time_features] )
__lowercase : List[str] = floats_tensor([self.batch_size, _past_length] )
__lowercase : List[str] = floats_tensor([self.batch_size, _past_length] ) > 0.5
# decoder inputs
__lowercase : Dict = floats_tensor([self.batch_size, config.prediction_length, config.num_time_features] )
__lowercase : str = floats_tensor([self.batch_size, config.prediction_length] )
__lowercase : List[str] = {
"""past_values""": past_values,
"""static_categorical_features""": static_categorical_features,
"""past_time_features""": past_time_features,
"""past_observed_mask""": past_observed_mask,
"""future_time_features""": future_time_features,
"""future_values""": future_values,
}
return inputs_dict
def lowerCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Optional[Any] = self.get_config()
__lowercase : Any = self.prepare_autoformer_inputs_dict(__a )
return config, inputs_dict
def lowerCAmelCase ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
__lowercase , __lowercase : List[str] = self.prepare_config_and_inputs()
return config, inputs_dict
def lowerCAmelCase ( self : Optional[Any] , __a : Tuple , __a : Optional[int] ) -> Any:
"""simple docstring"""
__lowercase : List[str] = AutoformerModel(config=__a ).to(__a ).eval()
__lowercase : Optional[int] = model(**__a )
__lowercase : Dict = outputs.encoder_last_hidden_state
__lowercase : Tuple = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
__lowercase : List[str] = model.get_encoder()
encoder.save_pretrained(__a )
__lowercase : List[str] = AutoformerEncoder.from_pretrained(__a ).to(__a )
__lowercase , __lowercase , __lowercase , __lowercase , __lowercase : Any = model.create_network_inputs(**__a )
__lowercase , __lowercase : Any = model.decomposition_layer(transformer_inputs[:, : config.context_length, ...] )
__lowercase : Optional[Any] = torch.cat(
(transformer_inputs[:, : config.context_length, ...], feature[:, : config.context_length, ...]) , dim=-1 , )
__lowercase : Union[str, Any] = encoder(inputs_embeds=__a )[0]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1E-3 )
__lowercase : str = (
torch.mean(transformer_inputs[:, : config.context_length, ...] , dim=1 )
.unsqueeze(1 )
.repeat(1 , config.prediction_length , 1 )
)
__lowercase : Optional[int] = torch.zeros(
[transformer_inputs.shape[0], config.prediction_length, transformer_inputs.shape[2]] , device=enc_input.device , )
__lowercase : Any = torch.cat(
(
torch.cat((seasonal_input[:, -config.label_length :, ...], zeros) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
__lowercase : Dict = torch.cat(
(
torch.cat((trend_input[:, -config.label_length :, ...], mean) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
with tempfile.TemporaryDirectory() as tmpdirname:
__lowercase : Optional[Any] = model.get_decoder()
decoder.save_pretrained(__a )
__lowercase : Tuple = AutoformerDecoder.from_pretrained(__a ).to(__a )
__lowercase : str = decoder(
trend=__a , inputs_embeds=__a , encoder_hidden_states=__a , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1E-3 )
@require_torch
class lowerCAmelCase ( __a , __a , unittest.TestCase ):
'''simple docstring'''
_A : List[str] = (AutoformerModel, AutoformerForPrediction) if is_torch_available() else ()
_A : List[Any] = (AutoformerForPrediction,) if is_torch_available() else ()
_A : Any = {'''feature-extraction''': AutoformerModel} if is_torch_available() else {}
_A : Dict = False
_A : Tuple = False
_A : Optional[int] = False
_A : Tuple = False
_A : str = False
_A : Union[str, Any] = False
def lowerCAmelCase ( self : Dict ) -> str:
"""simple docstring"""
__lowercase : List[str] = AutoformerModelTester(self )
__lowercase : Dict = ConfigTester(self , config_class=__a , has_text_modality=__a )
def lowerCAmelCase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCAmelCase ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
__lowercase , __lowercase : Dict = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
__lowercase : Dict = model_class(__a )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__a )
__lowercase , __lowercase : Tuple = model_class.from_pretrained(__a , output_loading_info=__a )
self.assertEqual(info["""missing_keys"""] , [] )
def lowerCAmelCase ( self : List[str] ) -> List[str]:
"""simple docstring"""
__lowercase : str = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*__a )
@unittest.skip(reason="""Model has no tokens embeddings""" )
def lowerCAmelCase ( self : str ) -> int:
"""simple docstring"""
pass
def lowerCAmelCase ( self : Any ) -> List[str]:
"""simple docstring"""
__lowercase : Any = inspect.signature(getattr(__a , """forward""" ) )
# The main input is the name of the argument after `self`
__lowercase : Optional[int] = list(model_signature.parameters.keys() )[1]
self.assertEqual(AutoformerModel.main_input_name , __a )
def lowerCAmelCase ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
__lowercase , __lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase : Dict = model_class(__a )
__lowercase : Union[str, Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase : Any = [*signature.parameters.keys()]
__lowercase : int = [
"""past_values""",
"""past_time_features""",
"""past_observed_mask""",
"""static_categorical_features""",
"""static_real_features""",
"""future_values""",
"""future_time_features""",
]
if model.__class__.__name__ in ["AutoformerForPrediction"]:
expected_arg_names.append("""future_observed_mask""" )
expected_arg_names.extend(
[
"""decoder_attention_mask""",
"""head_mask""",
"""decoder_head_mask""",
"""cross_attn_head_mask""",
"""encoder_outputs""",
"""past_key_values""",
"""output_hidden_states""",
"""output_attentions""",
"""use_cache""",
"""return_dict""",
] )
self.assertListEqual(arg_names[: len(__a )] , __a )
def lowerCAmelCase ( self : int ) -> int:
"""simple docstring"""
__lowercase , __lowercase : str = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase : int = True
__lowercase : Tuple = getattr(self.model_tester , """seq_length""" , __a )
__lowercase : Union[str, Any] = getattr(self.model_tester , """decoder_seq_length""" , __a )
__lowercase : List[str] = getattr(self.model_tester , """encoder_seq_length""" , __a )
__lowercase : List[Any] = getattr(self.model_tester , """d_model""" , __a )
__lowercase : Optional[int] = getattr(self.model_tester , """num_attention_heads""" , __a )
__lowercase : Any = d_model // num_attention_heads
for model_class in self.all_model_classes:
__lowercase : Dict = True
__lowercase : List[str] = False
__lowercase : Optional[int] = True
__lowercase : str = model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
__lowercase : int = model(**self._prepare_for_class(__a , __a ) )
__lowercase : Any = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__lowercase : Optional[int] = True
__lowercase : List[str] = model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
__lowercase : Union[str, Any] = model(**self._prepare_for_class(__a , __a ) )
__lowercase : Dict = outputs.encoder_attentions
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
__lowercase : Tuple = len(__a )
__lowercase : str = 7
if "last_hidden_state" in outputs:
correct_outlen += 1
if "trend" in outputs:
correct_outlen += 1
if "past_key_values" in outputs:
correct_outlen += 1 # past_key_values have been returned
if "loss" in outputs:
correct_outlen += 1
if "params" in outputs:
correct_outlen += 1
self.assertEqual(__a , __a )
# decoder attentions
__lowercase : List[Any] = outputs.decoder_attentions
self.assertIsInstance(__a , (list, tuple) )
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# cross attentions
__lowercase : Optional[int] = outputs.cross_attentions
self.assertIsInstance(__a , (list, tuple) )
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(cross_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# Check attention is always last and order is fine
__lowercase : Tuple = True
__lowercase : Union[str, Any] = True
__lowercase : Tuple = model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
__lowercase : Any = model(**self._prepare_for_class(__a , __a ) )
self.assertEqual(out_len + 2 , len(__a ) )
__lowercase : Optional[Any] = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
@is_flaky()
def lowerCAmelCase ( self : int ) -> Any:
"""simple docstring"""
super().test_retain_grad_hidden_states_attentions()
def snake_case_ ( lowerCAmelCase_ : Optional[int]="train-batch.pt" ):
__lowercase : Dict = hf_hub_download(repo_id="""hf-internal-testing/tourism-monthly-batch""" , filename=lowerCAmelCase_ , repo_type="""dataset""" )
__lowercase : Optional[int] = torch.load(lowerCAmelCase_ , map_location=lowerCAmelCase_ )
return batch
@require_torch
@slow
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase ( self : Dict ) -> int:
"""simple docstring"""
__lowercase : List[str] = AutoformerModel.from_pretrained("""huggingface/autoformer-tourism-monthly""" ).to(__a )
__lowercase : List[Any] = prepare_batch()
with torch.no_grad():
__lowercase : Tuple = model(
past_values=batch["""past_values"""] , past_time_features=batch["""past_time_features"""] , past_observed_mask=batch["""past_observed_mask"""] , static_categorical_features=batch["""static_categorical_features"""] , future_values=batch["""future_values"""] , future_time_features=batch["""future_time_features"""] , )[0]
__lowercase : List[str] = torch.Size(
(64, model.config.prediction_length + model.config.label_length, model.config.feature_size) )
self.assertEqual(output.shape , __a )
__lowercase : Optional[int] = torch.tensor(
[[0.3593, -1.3398, 0.6330], [0.2279, 1.5396, -0.1792], [0.0450, 1.3225, -0.2335]] , device=__a )
self.assertTrue(torch.allclose(output[0, :3, :3] , __a , atol=__a ) )
def lowerCAmelCase ( self : str ) -> str:
"""simple docstring"""
__lowercase : int = AutoformerForPrediction.from_pretrained("""huggingface/autoformer-tourism-monthly""" ).to(__a )
__lowercase : List[str] = prepare_batch("""val-batch.pt""" )
with torch.no_grad():
__lowercase : Optional[Any] = model(
past_values=batch["""past_values"""] , past_time_features=batch["""past_time_features"""] , past_observed_mask=batch["""past_observed_mask"""] , static_categorical_features=batch["""static_categorical_features"""] , ).encoder_last_hidden_state
__lowercase : List[str] = torch.Size((64, model.config.context_length, model.config.d_model) )
self.assertEqual(output.shape , __a )
__lowercase : Optional[int] = torch.tensor(
[[-0.0734, -0.9036, 0.8358], [4.7186, 2.4113, 1.9581], [1.7953, 2.3558, 1.2970]] , device=__a )
self.assertTrue(torch.allclose(output[0, :3, :3] , __a , atol=__a ) )
def lowerCAmelCase ( self : Dict ) -> List[Any]:
"""simple docstring"""
__lowercase : Optional[Any] = AutoformerForPrediction.from_pretrained("""huggingface/autoformer-tourism-monthly""" ).to(__a )
__lowercase : Optional[int] = prepare_batch("""val-batch.pt""" )
with torch.no_grad():
__lowercase : int = model.generate(
static_categorical_features=batch["""static_categorical_features"""] , past_time_features=batch["""past_time_features"""] , past_values=batch["""past_values"""] , future_time_features=batch["""future_time_features"""] , past_observed_mask=batch["""past_observed_mask"""] , )
__lowercase : int = torch.Size((64, model.config.num_parallel_samples, model.config.prediction_length) )
self.assertEqual(outputs.sequences.shape , __a )
__lowercase : Optional[Any] = torch.tensor([3130.6763, 4056.5293, 7053.0786] , device=__a )
__lowercase : Dict = outputs.sequences.mean(dim=1 )
self.assertTrue(torch.allclose(mean_prediction[0, -3:] , __a , rtol=1E-1 ) ) | 649 | 1 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__A : Union[str, Any] = {
'''configuration_xmod''': [
'''XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''XmodConfig''',
'''XmodOnnxConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Tuple = [
'''XMOD_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XmodForCausalLM''',
'''XmodForMaskedLM''',
'''XmodForMultipleChoice''',
'''XmodForQuestionAnswering''',
'''XmodForSequenceClassification''',
'''XmodForTokenClassification''',
'''XmodModel''',
'''XmodPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xmod import XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP, XmodConfig, XmodOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xmod import (
XMOD_PRETRAINED_MODEL_ARCHIVE_LIST,
XmodForCausalLM,
XmodForMaskedLM,
XmodForMultipleChoice,
XmodForQuestionAnswering,
XmodForSequenceClassification,
XmodForTokenClassification,
XmodModel,
XmodPreTrainedModel,
)
else:
import sys
__A : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 343 | from pathlib import Path
import numpy as np
from PIL import Image
def SCREAMING_SNAKE_CASE_ ( snake_case__ ) -> np.ndarray:
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = rgb[:, :, 0], rgb[:, :, 1], rgb[:, :, 2]
return 0.29_89 * r + 0.58_70 * g + 0.11_40 * b
def SCREAMING_SNAKE_CASE_ ( snake_case__ ) -> np.ndarray:
return (gray > 1_2_7) & (gray <= 2_5_5)
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ ) -> np.ndarray:
lowerCAmelCase = np.zeros_like(snake_case__ )
lowerCAmelCase = np.zeros(
(image.shape[0] + kernel.shape[0] - 1, image.shape[1] + kernel.shape[1] - 1) )
# Copy image to padded image
lowerCAmelCase = image
# Iterate over image & apply kernel
for x in range(image.shape[1] ):
for y in range(image.shape[0] ):
lowerCAmelCase = (
kernel * image_padded[y : y + kernel.shape[0], x : x + kernel.shape[1]]
).sum()
lowerCAmelCase = int(summation > 0 )
return output
if __name__ == "__main__":
# read original image
lowercase__ : Dict = Path(__file__).resolve().parent / '''image_data''' / '''lena.jpg'''
lowercase__ : Dict = np.array(Image.open(lena_path))
# kernel to be applied
lowercase__ : int = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]])
lowercase__ : Dict = dilation(gray_to_binary(rgb_to_gray(lena)), structuring_element)
# Save the output image
lowercase__ : Dict = Image.fromarray(output).convert('''RGB''')
pil_img.save('''result_dilation.png''')
| 312 | 0 |
import argparse
import glob
import logging
import os
import sys
import time
from collections import defaultdict
from pathlib import Path
from typing import Dict, List, Tuple
import numpy as np
import pytorch_lightning as pl
import torch
from callbacks import SeqaSeqLoggingCallback, get_checkpoint_callback, get_early_stopping_callback
from torch import nn
from torch.utils.data import DataLoader
from transformers import MBartTokenizer, TaForConditionalGeneration
from transformers.models.bart.modeling_bart import shift_tokens_right
from utils import (
ROUGE_KEYS,
LegacySeqaSeqDataset,
SeqaSeqDataset,
assert_all_frozen,
calculate_bleu,
calculate_rouge,
check_output_dir,
flatten_list,
freeze_embeds,
freeze_params,
get_git_info,
label_smoothed_nll_loss,
lmap,
pickle_save,
save_git_info,
save_json,
use_task_specific_params,
)
# need the parent dir module
sys.path.insert(2, str(Path(__file__).resolve().parents[1]))
from lightning_base import BaseTransformer, add_generic_args, generic_train # noqa
_UpperCAmelCase = logging.getLogger(__name__)
class UpperCAmelCase ( __A ):
'''simple docstring'''
lowerCamelCase_ = '''summarization'''
lowerCamelCase_ = ['''loss''']
lowerCamelCase_ = ROUGE_KEYS
lowerCamelCase_ = '''rouge2'''
def __init__( self , lowercase , **lowercase ):
"""simple docstring"""
if hparams.sortish_sampler and hparams.gpus > 1:
A_ : str = False
elif hparams.max_tokens_per_batch is not None:
if hparams.gpus > 1:
raise NotImplementedError('Dynamic Batch size does not work for multi-gpu training' )
if hparams.sortish_sampler:
raise ValueError('--sortish_sampler and --max_tokens_per_batch may not be used simultaneously' )
super().__init__(lowercase , num_labels=lowercase , mode=self.mode , **lowercase )
use_task_specific_params(self.model , 'summarization' )
save_git_info(self.hparams.output_dir )
A_ : List[str] = Path(self.output_dir ) / 'metrics.json'
A_ : List[str] = Path(self.output_dir ) / 'hparams.pkl'
pickle_save(self.hparams , self.hparams_save_path )
A_ : str = 0
A_ : Any = defaultdict(lowercase )
A_ : Union[str, Any] = self.config.model_type
A_ : int = self.config.tgt_vocab_size if self.model_type == 'fsmt' else self.config.vocab_size
A_ : dict = {
"data_dir": self.hparams.data_dir,
"max_source_length": self.hparams.max_source_length,
"prefix": self.model.config.prefix or "",
}
A_ : Optional[Any] = {
'train': self.hparams.n_train,
'val': self.hparams.n_val,
'test': self.hparams.n_test,
}
A_ : List[str] = {k: v if v >= 0 else None for k, v in n_observations_per_split.items()}
A_ : Tuple = {
'train': self.hparams.max_target_length,
'val': self.hparams.val_max_target_length,
'test': self.hparams.test_max_target_length,
}
assert self.target_lens["train"] <= self.target_lens["val"], F'''target_lens: {self.target_lens}'''
assert self.target_lens["train"] <= self.target_lens["test"], F'''target_lens: {self.target_lens}'''
if self.hparams.freeze_embeds:
freeze_embeds(self.model )
if self.hparams.freeze_encoder:
freeze_params(self.model.get_encoder() )
assert_all_frozen(self.model.get_encoder() )
A_ : int = get_git_info()['repo_sha']
A_ : int = hparams.num_workers
A_ : Union[str, Any] = None # default to config
if self.model.config.decoder_start_token_id is None and isinstance(self.tokenizer , lowercase ):
A_ : Optional[int] = self.tokenizer.lang_code_to_id[hparams.tgt_lang]
A_ : Any = self.decoder_start_token_id
A_ : str = (
SeqaSeqDataset if hasattr(self.tokenizer , 'prepare_seq2seq_batch' ) else LegacySeqaSeqDataset
)
A_ : Union[str, Any] = False
A_ : Tuple = self.model.config.num_beams if self.hparams.eval_beams is None else self.hparams.eval_beams
if self.hparams.eval_max_gen_length is not None:
A_ : int = self.hparams.eval_max_gen_length
else:
A_ : List[Any] = self.model.config.max_length
A_ : List[Any] = self.default_val_metric if self.hparams.val_metric is None else self.hparams.val_metric
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
A_ : str = {
k: self.tokenizer.batch_decode(v.tolist() ) if 'mask' not in k else v.shape for k, v in batch.items()
}
save_json(lowercase , Path(self.output_dir ) / 'text_batch.json' )
save_json({k: v.tolist() for k, v in batch.items()} , Path(self.output_dir ) / 'tok_batch.json' )
A_ : int = True
return readable_batch
def lowerCAmelCase_ ( self , lowercase , **lowercase ):
"""simple docstring"""
return self.model(lowercase , **lowercase )
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
A_ : List[Any] = self.tokenizer.batch_decode(
lowercase , skip_special_tokens=lowercase , clean_up_tokenization_spaces=lowercase )
return lmap(str.strip , lowercase )
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
A_ : Union[str, Any] = self.tokenizer.pad_token_id
A_ , A_ : List[str] = batch['input_ids'], batch['attention_mask']
A_ : str = batch['labels']
if isinstance(self.model , lowercase ):
A_ : Optional[int] = self.model._shift_right(lowercase )
else:
A_ : Any = shift_tokens_right(lowercase , lowercase )
if not self.already_saved_batch: # This would be slightly better if it only happened on rank zero
A_ : Optional[Any] = decoder_input_ids
self.save_readable_batch(lowercase )
A_ : List[str] = self(lowercase , attention_mask=lowercase , decoder_input_ids=lowercase , use_cache=lowercase )
A_ : Dict = outputs['logits']
if self.hparams.label_smoothing == 0:
# Same behavior as modeling_bart.py, besides ignoring pad_token_id
A_ : Union[str, Any] = nn.CrossEntropyLoss(ignore_index=lowercase )
assert lm_logits.shape[-1] == self.vocab_size
A_ : Any = ce_loss_fct(lm_logits.view(-1 , lm_logits.shape[-1] ) , tgt_ids.view(-1 ) )
else:
A_ : List[Any] = nn.functional.log_softmax(lowercase , dim=-1 )
A_ , A_ : Any = label_smoothed_nll_loss(
lowercase , lowercase , self.hparams.label_smoothing , ignore_index=lowercase )
return (loss,)
@property
def lowerCAmelCase_ ( self ):
"""simple docstring"""
return self.tokenizer.pad_token_id
def lowerCAmelCase_ ( self , lowercase , lowercase ):
"""simple docstring"""
A_ : str = self._step(lowercase )
A_ : Optional[int] = dict(zip(self.loss_names , lowercase ) )
# tokens per batch
A_ : int = batch['input_ids'].ne(self.pad ).sum() + batch['labels'].ne(self.pad ).sum()
A_ : str = batch['input_ids'].shape[0]
A_ : Any = batch['input_ids'].eq(self.pad ).sum()
A_ : Optional[int] = batch['input_ids'].eq(self.pad ).float().mean()
# TODO(SS): make a wandb summary metric for this
return {"loss": loss_tensors[0], "log": logs}
def lowerCAmelCase_ ( self , lowercase , lowercase ):
"""simple docstring"""
return self._generative_step(lowercase )
def lowerCAmelCase_ ( self , lowercase , lowercase="val" ):
"""simple docstring"""
self.step_count += 1
A_ : Union[str, Any] = {k: torch.stack([x[k] for x in outputs] ).mean() for k in self.loss_names}
A_ : Dict = losses['loss']
A_ : int = {
k: np.array([x[k] for x in outputs] ).mean() for k in self.metric_names + ['gen_time', 'gen_len']
}
A_ : Any = (
generative_metrics[self.val_metric] if self.val_metric in generative_metrics else losses[self.val_metric]
)
A_ : torch.FloatTensor = torch.tensor(lowercase ).type_as(lowercase )
generative_metrics.update({k: v.item() for k, v in losses.items()} )
losses.update(lowercase )
A_ : Tuple = {F'''{prefix}_avg_{k}''': x for k, x in losses.items()}
A_ : Tuple = self.step_count
self.metrics[prefix].append(lowercase ) # callback writes this to self.metrics_save_path
A_ : Dict = flatten_list([x['preds'] for x in outputs] )
return {
"log": all_metrics,
"preds": preds,
F'''{prefix}_loss''': loss,
F'''{prefix}_{self.val_metric}''': metric_tensor,
}
def lowerCAmelCase_ ( self , lowercase , lowercase ):
"""simple docstring"""
return calculate_rouge(lowercase , lowercase )
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
A_ : Dict = time.time()
# parser.add_argument('--eval_max_gen_length', type=int, default=None, help='never generate more than n tokens')
A_ : Optional[int] = self.model.generate(
batch['input_ids'] , attention_mask=batch['attention_mask'] , use_cache=lowercase , decoder_start_token_id=self.decoder_start_token_id , num_beams=self.eval_beams , max_length=self.eval_max_length , )
A_ : int = (time.time() - ta) / batch['input_ids'].shape[0]
A_ : List[str] = self.ids_to_clean_text(lowercase )
A_ : List[str] = self.ids_to_clean_text(batch['labels'] )
A_ : List[Any] = self._step(lowercase )
A_ : int = dict(zip(self.loss_names , lowercase ) )
A_ : Dict = self.calc_generative_metrics(lowercase , lowercase )
A_ : List[Any] = np.mean(lmap(lowercase , lowercase ) )
base_metrics.update(gen_time=lowercase , gen_len=lowercase , preds=lowercase , target=lowercase , **lowercase )
return base_metrics
def lowerCAmelCase_ ( self , lowercase , lowercase ):
"""simple docstring"""
return self._generative_step(lowercase )
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
return self.validation_epoch_end(lowercase , prefix='test' )
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
A_ : str = self.n_obs[type_path]
A_ : List[Any] = self.target_lens[type_path]
A_ : str = self.dataset_class(
self.tokenizer , type_path=lowercase , n_obs=lowercase , max_target_length=lowercase , **self.dataset_kwargs , )
return dataset
def lowerCAmelCase_ ( self , lowercase , lowercase , lowercase = False ):
"""simple docstring"""
A_ : Optional[int] = self.get_dataset(lowercase )
if self.hparams.sortish_sampler and type_path != "test" and type_path != "val":
A_ : str = dataset.make_sortish_sampler(lowercase , distributed=self.hparams.gpus > 1 )
return DataLoader(
lowercase , batch_size=lowercase , collate_fn=dataset.collate_fn , shuffle=lowercase , num_workers=self.num_workers , sampler=lowercase , )
elif self.hparams.max_tokens_per_batch is not None and type_path != "test" and type_path != "val":
A_ : str = dataset.make_dynamic_sampler(
self.hparams.max_tokens_per_batch , distributed=self.hparams.gpus > 1 )
return DataLoader(
lowercase , batch_sampler=lowercase , collate_fn=dataset.collate_fn , num_workers=self.num_workers , )
else:
return DataLoader(
lowercase , batch_size=lowercase , collate_fn=dataset.collate_fn , shuffle=lowercase , num_workers=self.num_workers , sampler=lowercase , )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Tuple = self.get_dataloader('train' , batch_size=self.hparams.train_batch_size , shuffle=lowercase )
return dataloader
def lowerCAmelCase_ ( self ):
"""simple docstring"""
return self.get_dataloader('val' , batch_size=self.hparams.eval_batch_size )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
return self.get_dataloader('test' , batch_size=self.hparams.eval_batch_size )
@staticmethod
def lowerCAmelCase_ ( lowercase , lowercase ):
"""simple docstring"""
BaseTransformer.add_model_specific_args(lowercase , lowercase )
add_generic_args(lowercase , lowercase )
parser.add_argument(
'--max_source_length' , default=1_0_2_4 , type=lowercase , help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
) , )
parser.add_argument(
'--max_target_length' , default=5_6 , type=lowercase , help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
) , )
parser.add_argument(
'--val_max_target_length' , default=1_4_2 , type=lowercase , help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
) , )
parser.add_argument(
'--test_max_target_length' , default=1_4_2 , type=lowercase , help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
) , )
parser.add_argument('--freeze_encoder' , action='store_true' )
parser.add_argument('--freeze_embeds' , action='store_true' )
parser.add_argument('--sortish_sampler' , action='store_true' , default=lowercase )
parser.add_argument('--overwrite_output_dir' , action='store_true' , default=lowercase )
parser.add_argument('--max_tokens_per_batch' , type=lowercase , default=lowercase )
parser.add_argument('--logger_name' , type=lowercase , choices=['default', 'wandb', 'wandb_shared'] , default='default' )
parser.add_argument('--n_train' , type=lowercase , default=-1 , required=lowercase , help='# examples. -1 means use all.' )
parser.add_argument('--n_val' , type=lowercase , default=5_0_0 , required=lowercase , help='# examples. -1 means use all.' )
parser.add_argument('--n_test' , type=lowercase , default=-1 , required=lowercase , help='# examples. -1 means use all.' )
parser.add_argument(
'--task' , type=lowercase , default='summarization' , required=lowercase , help='# examples. -1 means use all.' )
parser.add_argument('--label_smoothing' , type=lowercase , default=0.0 , required=lowercase )
parser.add_argument('--src_lang' , type=lowercase , default='' , required=lowercase )
parser.add_argument('--tgt_lang' , type=lowercase , default='' , required=lowercase )
parser.add_argument('--eval_beams' , type=lowercase , default=lowercase , required=lowercase )
parser.add_argument(
'--val_metric' , type=lowercase , default=lowercase , required=lowercase , choices=['bleu', 'rouge2', 'loss', None] )
parser.add_argument('--eval_max_gen_length' , type=lowercase , default=lowercase , help='never generate more than n tokens' )
parser.add_argument('--save_top_k' , type=lowercase , default=1 , required=lowercase , help='How many checkpoints to save' )
parser.add_argument(
'--early_stopping_patience' , type=lowercase , default=-1 , required=lowercase , help=(
'-1 means never early stop. early_stopping_patience is measured in validation checks, not epochs. So'
' val_check_interval will effect it.'
) , )
return parser
class UpperCAmelCase ( __A ):
'''simple docstring'''
lowerCamelCase_ = '''translation'''
lowerCamelCase_ = ['''loss''']
lowerCamelCase_ = ['''bleu''']
lowerCamelCase_ = '''bleu'''
def __init__( self , lowercase , **lowercase ):
"""simple docstring"""
super().__init__(lowercase , **lowercase )
A_ : List[Any] = hparams.src_lang
A_ : str = hparams.tgt_lang
def lowerCAmelCase_ ( self , lowercase , lowercase ):
"""simple docstring"""
return calculate_bleu(lowercase , lowercase )
def UpperCamelCase ( __lowercase : Optional[int] ,__lowercase : Tuple=None ):
'''simple docstring'''
Path(args.output_dir ).mkdir(exist_ok=__lowercase )
check_output_dir(__lowercase ,expected_items=3 )
if model is None:
if "summarization" in args.task:
A_ : SummarizationModule = SummarizationModule(__lowercase )
else:
A_ : SummarizationModule = TranslationModule(__lowercase )
A_ : Optional[int] = Path(args.data_dir ).name
if (
args.logger_name == "default"
or args.fast_dev_run
or str(args.output_dir ).startswith('/tmp' )
or str(args.output_dir ).startswith('/var' )
):
A_ : List[str] = True # don't pollute wandb logs unnecessarily
elif args.logger_name == "wandb":
from pytorch_lightning.loggers import WandbLogger
A_ : List[str] = os.environ.get('WANDB_PROJECT' ,__lowercase )
A_ : List[Any] = WandbLogger(name=model.output_dir.name ,project=__lowercase )
elif args.logger_name == "wandb_shared":
from pytorch_lightning.loggers import WandbLogger
A_ : str = WandbLogger(name=model.output_dir.name ,project=f'''hf_{dataset}''' )
if args.early_stopping_patience >= 0:
A_ : Dict = get_early_stopping_callback(model.val_metric ,args.early_stopping_patience )
else:
A_ : str = False
A_ : Dict = args.val_metric == 'loss'
A_ : pl.Trainer = generic_train(
__lowercase ,__lowercase ,logging_callback=SeqaSeqLoggingCallback() ,checkpoint_callback=get_checkpoint_callback(
args.output_dir ,model.val_metric ,args.save_top_k ,__lowercase ) ,early_stopping_callback=__lowercase ,logger=__lowercase ,)
pickle_save(model.hparams ,model.output_dir / 'hparams.pkl' )
if not args.do_predict:
return model
A_ : Optional[Any] = ''
A_ : Optional[Any] = sorted(glob.glob(os.path.join(args.output_dir ,'*.ckpt' ) ,recursive=__lowercase ) )
if checkpoints:
A_ : List[Any] = checkpoints[-1]
A_ : Any = checkpoints[-1]
trainer.logger.log_hyperparams(model.hparams )
# test() without a model tests using the best checkpoint automatically
trainer.test()
return model
if __name__ == "__main__":
_UpperCAmelCase = argparse.ArgumentParser()
_UpperCAmelCase = pl.Trainer.add_argparse_args(parser)
_UpperCAmelCase = SummarizationModule.add_model_specific_args(parser, os.getcwd())
_UpperCAmelCase = parser.parse_args()
main(args)
| 70 | # Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def UpperCamelCase ( ):
'''simple docstring'''
A_ : List[Any] = ArgumentParser('Accelerate CLI tool' ,usage='accelerate <command> [<args>]' ,allow_abbrev=__lowercase )
A_ : Any = parser.add_subparsers(help='accelerate command helpers' )
# Register commands
get_config_parser(subparsers=__lowercase )
env_command_parser(subparsers=__lowercase )
launch_command_parser(subparsers=__lowercase )
tpu_command_parser(subparsers=__lowercase )
test_command_parser(subparsers=__lowercase )
# Let's go
A_ : Optional[Any] = parser.parse_args()
if not hasattr(__lowercase ,'func' ):
parser.print_help()
exit(1 )
# Run
args.func(__lowercase )
if __name__ == "__main__":
main()
| 70 | 1 |
import inspect
import unittest
from transformers import DecisionTransformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import DecisionTransformerModel
from transformers.models.decision_transformer.modeling_decision_transformer import (
DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
class SCREAMING_SNAKE_CASE :
def __init__( self : Any , __lowercase : List[str] , __lowercase : Optional[Any]=13 , __lowercase : List[str]=7 , __lowercase : Union[str, Any]=6 , __lowercase : Any=17 , __lowercase : Dict=23 , __lowercase : Tuple=11 , __lowercase : Union[str, Any]=True , ):
'''simple docstring'''
__a = parent
__a = batch_size
__a = seq_length
__a = act_dim
__a = state_dim
__a = hidden_size
__a = max_length
__a = is_training
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
__a = floats_tensor((self.batch_size, self.seq_length, self.state_dim) )
__a = floats_tensor((self.batch_size, self.seq_length, self.act_dim) )
__a = floats_tensor((self.batch_size, self.seq_length, 1) )
__a = floats_tensor((self.batch_size, self.seq_length, 1) )
__a = ids_tensor((self.batch_size, self.seq_length) , vocab_size=1000 )
__a = random_attention_mask((self.batch_size, self.seq_length) )
__a = self.get_config()
return (
config,
states,
actions,
rewards,
returns_to_go,
timesteps,
attention_mask,
)
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
return DecisionTransformerConfig(
batch_size=self.batch_size , seq_length=self.seq_length , act_dim=self.act_dim , state_dim=self.state_dim , hidden_size=self.hidden_size , max_length=self.max_length , )
def UpperCamelCase_ ( self : List[str] , __lowercase : int , __lowercase : Optional[int] , __lowercase : Dict , __lowercase : List[Any] , __lowercase : str , __lowercase : Union[str, Any] , __lowercase : Optional[int] , ):
'''simple docstring'''
__a = DecisionTransformerModel(config=__lowercase )
model.to(__lowercase )
model.eval()
__a = model(__lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase )
self.parent.assertEqual(result.state_preds.shape , states.shape )
self.parent.assertEqual(result.action_preds.shape , actions.shape )
self.parent.assertEqual(result.return_preds.shape , returns_to_go.shape )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length * 3, self.hidden_size) ) # seq length *3 as there are 3 modelities: states, returns and actions
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
__a = self.prepare_config_and_inputs()
(
(
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) ,
) = config_and_inputs
__a = {
"""states""": states,
"""actions""": actions,
"""rewards""": rewards,
"""returns_to_go""": returns_to_go,
"""timesteps""": timesteps,
"""attention_mask""": attention_mask,
}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
__lowerCamelCase : Optional[int] =(DecisionTransformerModel,) if is_torch_available() else ()
__lowerCamelCase : List[str] =()
__lowerCamelCase : Optional[Any] ={'feature-extraction': DecisionTransformerModel} if is_torch_available() else {}
# Ignoring of a failing test from GenerationTesterMixin, as the model does not use inputs_ids
__lowerCamelCase : Optional[int] =False
# Ignoring of a failing tests from ModelTesterMixin, as the model does not implement these features
__lowerCamelCase : Optional[Any] =False
__lowerCamelCase : Optional[Any] =False
__lowerCamelCase : List[Any] =False
__lowerCamelCase : Any =False
__lowerCamelCase : Any =False
__lowerCamelCase : str =False
__lowerCamelCase : Tuple =False
__lowerCamelCase : int =False
__lowerCamelCase : int =False
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
__a = DecisionTransformerModelTester(self )
__a = ConfigTester(self , config_class=__lowercase , hidden_size=37 )
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowercase )
@slow
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
for model_name in DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a = DecisionTransformerModel.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a = model_class(__lowercase )
__a = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__a = [*signature.parameters.keys()]
__a = [
"""states""",
"""actions""",
"""rewards""",
"""returns_to_go""",
"""timesteps""",
"""attention_mask""",
]
self.assertListEqual(arg_names[: len(__lowercase )] , __lowercase )
@require_torch
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@slow
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
__a = 2 # number of steps of autoregressive prediction we will perform
__a = 10 # defined by the RL environment, may be normalized
__a = DecisionTransformerModel.from_pretrained("""edbeeching/decision-transformer-gym-hopper-expert""" )
__a = model.to(__lowercase )
__a = model.config
torch.manual_seed(0 )
__a = torch.randn(1 , 1 , config.state_dim ).to(device=__lowercase , dtype=torch.floataa ) # env.reset()
__a = torch.tensor(
[[0.242793, -0.28693074, 0.8742613], [0.67815274, -0.08101085, -0.12952147]] , device=__lowercase )
__a = torch.tensor(__lowercase , device=__lowercase , dtype=torch.floataa ).reshape(1 , 1 , 1 )
__a = state
__a = torch.zeros(1 , 0 , config.act_dim , device=__lowercase , dtype=torch.floataa )
__a = torch.zeros(1 , 0 , device=__lowercase , dtype=torch.floataa )
__a = torch.tensor(0 , device=__lowercase , dtype=torch.long ).reshape(1 , 1 )
for step in range(__lowercase ):
__a = torch.cat([actions, torch.zeros(1 , 1 , config.act_dim , device=__lowercase )] , dim=1 )
__a = torch.cat([rewards, torch.zeros(1 , 1 , device=__lowercase )] , dim=1 )
__a = torch.ones(1 , states.shape[1] ).to(dtype=torch.long , device=states.device )
with torch.no_grad():
__a , __a , __a = model(
states=__lowercase , actions=__lowercase , rewards=__lowercase , returns_to_go=__lowercase , timesteps=__lowercase , attention_mask=__lowercase , return_dict=__lowercase , )
self.assertEqual(action_pred.shape , actions.shape )
self.assertTrue(torch.allclose(action_pred[0, -1] , expected_outputs[step] , atol=1E-4 ) )
__a , __a , __a , __a = ( # env.step(action)
torch.randn(1 , 1 , config.state_dim ).to(device=__lowercase , dtype=torch.floataa ),
1.0,
False,
{},
)
__a = action_pred[0, -1]
__a = torch.cat([states, state] , dim=1 )
__a = returns_to_go[0, -1] - reward
__a = torch.cat([returns_to_go, pred_return.reshape(1 , 1 , 1 )] , dim=1 )
__a = torch.cat(
[timesteps, torch.ones((1, 1) , device=__lowercase , dtype=torch.long ) * (step + 1)] , dim=1 )
| 225 |
import random
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
__a = num - 1
__a = 0
while s % 2 == 0:
__a = s // 2
t += 1
for _ in range(5 ):
__a = random.randrange(2 , num - 1 )
__a = pow(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if v != 1:
__a = 0
while v != (num - 1):
if i == t - 1:
return False
else:
__a = i + 1
__a = (v**2) % num
return True
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
if num < 2:
return False
__a = [
2,
3,
5,
7,
11,
13,
17,
19,
23,
29,
31,
37,
41,
43,
47,
53,
59,
61,
67,
71,
73,
79,
83,
89,
97,
101,
103,
107,
109,
113,
127,
131,
137,
139,
149,
151,
157,
163,
167,
173,
179,
181,
191,
193,
197,
199,
211,
223,
227,
229,
233,
239,
241,
251,
257,
263,
269,
271,
277,
281,
283,
293,
307,
311,
313,
317,
331,
337,
347,
349,
353,
359,
367,
373,
379,
383,
389,
397,
401,
409,
419,
421,
431,
433,
439,
443,
449,
457,
461,
463,
467,
479,
487,
491,
499,
503,
509,
521,
523,
541,
547,
557,
563,
569,
571,
577,
587,
593,
599,
601,
607,
613,
617,
619,
631,
641,
643,
647,
653,
659,
661,
673,
677,
683,
691,
701,
709,
719,
727,
733,
739,
743,
751,
757,
761,
769,
773,
787,
797,
809,
811,
821,
823,
827,
829,
839,
853,
857,
859,
863,
877,
881,
883,
887,
907,
911,
919,
929,
937,
941,
947,
953,
967,
971,
977,
983,
991,
997,
]
if num in low_primes:
return True
for prime in low_primes:
if (num % prime) == 0:
return False
return rabin_miller(_SCREAMING_SNAKE_CASE )
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : int = 1024 ):
"""simple docstring"""
while True:
__a = random.randrange(2 ** (keysize - 1) , 2 ** (keysize) )
if is_prime_low_num(_SCREAMING_SNAKE_CASE ):
return num
if __name__ == "__main__":
lowerCamelCase__ = generate_large_prime()
print(("""Prime number:""", num))
print(("""is_prime_low_num:""", is_prime_low_num(num)))
| 225 | 1 |
'''simple docstring'''
from typing import Dict
from transformers import EvalPrediction, HfArgumentParser, TrainingArguments, is_torch_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
get_torch_dist_unique_port,
require_torch_multi_gpu,
require_torch_neuroncore,
)
from transformers.training_args import ParallelMode
from transformers.utils import logging
lowercase : str = logging.get_logger(__name__)
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
from transformers import Trainer
class A ( __snake_case ):
def __init__( self , SCREAMING_SNAKE_CASE = 101 ) -> str:
"""simple docstring"""
A : Any = length
def __len__( self ) -> Union[str, Any]:
"""simple docstring"""
return self.length
def __getitem__( self , SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
return i
class A :
def __call__( self , SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
return {"input_ids": torch.tensor(SCREAMING_SNAKE_CASE ), "labels": torch.tensor(SCREAMING_SNAKE_CASE )}
class A ( nn.Module ):
def __init__( self ) -> Union[str, Any]:
"""simple docstring"""
super().__init__()
# Add some (unused) params otherwise DDP will complain.
A : List[str] = nn.Linear(120 , 80 )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None ) -> int:
"""simple docstring"""
if labels is not None:
return torch.tensor(0.0 , device=input_ids.device ), input_ids
else:
return input_ids
class A ( __snake_case ):
@require_torch_neuroncore
def __lowerCAmelCase ( self ) -> List[Any]:
"""simple docstring"""
A : Optional[int] = F'--nproc_per_node=2\n --master_port={get_torch_dist_unique_port()}\n {self.test_file_dir}/test_trainer_distributed.py\n '.split()
A : Union[str, Any] = self.get_auto_remove_tmp_dir()
A : int = F'--output_dir {output_dir}'.split()
A : List[Any] = ['''torchrun'''] + distributed_args + args
execute_subprocess_async(SCREAMING_SNAKE_CASE , env=self.get_env() )
# successful return here == success - any errors would have caused an error in the sub-call
class A ( __snake_case ):
@require_torch_multi_gpu
def __lowerCAmelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
A : Any = F'--nproc_per_node={torch.cuda.device_count()}\n --master_port={get_torch_dist_unique_port()}\n {self.test_file_dir}/test_trainer_distributed.py\n '.split()
A : Any = self.get_auto_remove_tmp_dir()
A : Union[str, Any] = F'--output_dir {output_dir}'.split()
A : List[str] = ['''torchrun'''] + distributed_args + args
execute_subprocess_async(SCREAMING_SNAKE_CASE , env=self.get_env() )
# successful return here == success - any errors would have caused an error in the sub-call
if __name__ == "__main__":
# The script below is meant to be run under torch.distributed, on a machine with multiple GPUs:
#
# PYTHONPATH="src" python -m torch.distributed.run --nproc_per_node 2 --output_dir output_dir ./tests/test_trainer_distributed.py
lowercase : int = HfArgumentParser((TrainingArguments,))
lowercase : int = parser.parse_args_into_dataclasses()[0]
logger.warning(
f'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, '''
f'''distributed training: {training_args.parallel_mode != ParallelMode.NOT_DISTRIBUTED}'''
)
# Essentially, what we want to verify in the distributed case is that we get all samples back,
# in the right order. (this is crucial for prediction for instance)
for dataset_length in [1_01, 40, 7]:
lowercase : Optional[Any] = DummyDataset(dataset_length)
def lowerCAmelCase_ ( snake_case__ ):
'''simple docstring'''
A : Tuple = list(range(len(snake_case__ ) ) )
A : Tuple = p.predictions.tolist() == sequential and p.label_ids.tolist() == sequential
if not success and training_args.local_rank == 0:
logger.warning(
'''Predictions and/or labels do not match expected results:\n - predictions: '''
F'{p.predictions.tolist()}\n - labels: {p.label_ids.tolist()}\n - expected: {sequential}' )
return {"success": success}
lowercase : Tuple = Trainer(
model=DummyModel(),
args=training_args,
data_collator=DummyDataCollator(),
eval_dataset=dataset,
compute_metrics=compute_metrics,
)
lowercase : Any = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
lowercase : int = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
lowercase : Optional[Any] = 2
lowercase : str = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
lowercase : Optional[Any] = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
lowercase : List[Any] = None
| 343 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase : Optional[int] = {
'configuration_trajectory_transformer': [
'TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'TrajectoryTransformerConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : str = [
'TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TrajectoryTransformerModel',
'TrajectoryTransformerPreTrainedModel',
'load_tf_weights_in_trajectory_transformer',
]
if TYPE_CHECKING:
from .configuration_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TrajectoryTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TrajectoryTransformerModel,
TrajectoryTransformerPreTrainedModel,
load_tf_weights_in_trajectory_transformer,
)
else:
import sys
lowercase : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 343 | 1 |
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import tensorflow as tf
from .utils import logging
_lowercase = logging.get_logger(__name__)
def A (__lowerCamelCase :Union[tf.Tensor, np.ndarray] ):
if isinstance(__lowerCamelCase , np.ndarray ):
return list(tensor.shape )
_lowerCAmelCase = tf.shape(__lowerCamelCase )
if tensor.shape == tf.TensorShape(__lowerCamelCase ):
return dynamic
_lowerCAmelCase = tensor.shape.as_list()
return [dynamic[i] if s is None else s for i, s in enumerate(__lowerCamelCase )]
def A (__lowerCamelCase :tf.Tensor , __lowerCamelCase :Optional[int] = None , __lowerCamelCase :Optional[str] = None ):
return tf.nn.softmax(logits=logits + 1e-9 , axis=__lowerCamelCase , name=__lowerCamelCase )
def A (__lowerCamelCase :List[str] , __lowerCamelCase :Union[str, Any] , __lowerCamelCase :Union[str, Any] , __lowerCamelCase :Optional[int]=1e-5 , __lowerCamelCase :str=-1 ):
# This is a very simplified functional layernorm, designed to duplicate
# the functionality of PyTorch nn.functional.layer_norm when this is needed to port
# models in Transformers.
if weight.shape.rank != 1 or bias.shape.rank != 1 or not isinstance(__lowerCamelCase , __lowerCamelCase ):
raise NotImplementedError("""Only 1D weight and bias tensors are supported for now, with only a single axis.""" )
# Get mean and variance on the axis to be normalized
_lowerCAmelCase , _lowerCAmelCase = tf.nn.moments(__lowerCamelCase , axes=[axis] , keepdims=__lowerCamelCase )
if axis != -1:
# Reshape scale and weight to have the same rank as inputs, but with 1 dimensions
# on every dimension except axis
_lowerCAmelCase = [1] * inputs.shape.rank
_lowerCAmelCase = shape_list(__lowerCamelCase )[axis]
_lowerCAmelCase = tf.reshape(__lowerCamelCase , __lowerCamelCase )
_lowerCAmelCase = tf.reshape(__lowerCamelCase , __lowerCamelCase )
# Compute layer normalization using the batch_normalization
# function.
_lowerCAmelCase = tf.nn.batch_normalization(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , offset=__lowerCamelCase , scale=__lowerCamelCase , variance_epsilon=__lowerCamelCase , )
return outputs
def A (__lowerCamelCase :Dict , __lowerCamelCase :int=0 , __lowerCamelCase :str=-1 ):
# Replicates the behavior of torch.flatten in TF
# If end_dim or start_dim is negative, count them from the end
if end_dim < 0:
end_dim += input.shape.rank
if start_dim < 0:
start_dim += input.shape.rank
if start_dim == end_dim:
return input
_lowerCAmelCase = tf.shape(__lowerCamelCase )
_lowerCAmelCase = tf.math.reduce_prod(in_shape[start_dim : end_dim + 1] )
_lowerCAmelCase = tf.concat([in_shape[:start_dim], [flattened_dim], in_shape[end_dim + 1 :]] , axis=0 )
return tf.reshape(__lowerCamelCase , __lowerCamelCase )
def A (__lowerCamelCase :tf.Tensor ):
if not isinstance(__lowerCamelCase , tf.Tensor ):
_lowerCAmelCase = tf.convert_to_tensor(__lowerCamelCase ) # Catches stray NumPy inputs
if encoder_attention_mask.shape.rank == 3:
_lowerCAmelCase = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.shape.rank == 2:
_lowerCAmelCase = encoder_attention_mask[:, None, None, :]
# T5 has a mask that can compare sequence ids, we can simulate this here with this transposition
# Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow
# /transformer/transformer_layers.py#L270
# encoder_extended_attention_mask = (encoder_extended_attention_mask ==
# encoder_extended_attention_mask.transpose(-1, -2))
_lowerCAmelCase = (
tf.cast(1 , encoder_attention_mask.dtype ) - encoder_extended_attention_mask
) * encoder_extended_attention_mask.dtype.min
return encoder_extended_attention_mask
def A (__lowerCamelCase :tf.Tensor , __lowerCamelCase :int , __lowerCamelCase :str = "input_ids" ):
tf.debugging.assert_less(
__lowerCamelCase , tf.cast(__lowerCamelCase , dtype=tensor.dtype ) , message=(
f'The maximum value of {tensor_name} ({tf.math.reduce_max(__lowerCamelCase )}) must be smaller than the embedding '
f'layer\'s input dimension ({embed_dim}). The likely cause is some problem at tokenization time.'
) , )
def A (__lowerCamelCase :List[str] , __lowerCamelCase :str , __lowerCamelCase :List[str] ):
_lowerCAmelCase = 64512
# Check that no item in `data` is larger than `HDF5_OBJECT_HEADER_LIMIT`
# because in that case even chunking the array would not make the saving
# possible.
_lowerCAmelCase = [x for x in data if len(__lowerCamelCase ) > HDF5_OBJECT_HEADER_LIMIT]
# Expecting this to never be true.
if bad_attributes:
raise RuntimeError(
"""The following attributes cannot be saved to HDF5 file because """
f'they are larger than {HDF5_OBJECT_HEADER_LIMIT} '
f'bytes: {bad_attributes}' )
_lowerCAmelCase = np.asarray(__lowerCamelCase )
_lowerCAmelCase = 1
_lowerCAmelCase = np.array_split(__lowerCamelCase , __lowerCamelCase )
# This will never loop forever thanks to the test above.
while any(x.nbytes > HDF5_OBJECT_HEADER_LIMIT for x in chunked_data ):
num_chunks += 1
_lowerCAmelCase = np.array_split(__lowerCamelCase , __lowerCamelCase )
if num_chunks > 1:
for chunk_id, chunk_data in enumerate(__lowerCamelCase ):
_lowerCAmelCase = chunk_data
else:
_lowerCAmelCase = data
def A (__lowerCamelCase :List[Any] , __lowerCamelCase :Any ):
if name in group.attrs:
_lowerCAmelCase = [n.decode("""utf8""" ) if hasattr(__lowerCamelCase , """decode""" ) else n for n in group.attrs[name]]
else:
_lowerCAmelCase = []
_lowerCAmelCase = 0
while "%s%d" % (name, chunk_id) in group.attrs:
data.extend(
[n.decode("""utf8""" ) if hasattr(__lowerCamelCase , """decode""" ) else n for n in group.attrs["""%s%d""" % (name, chunk_id)]] )
chunk_id += 1
return data
def A (__lowerCamelCase :Union[str, Any] ):
def _expand_single_ad_tensor(__lowerCamelCase :List[Any] ):
if isinstance(__lowerCamelCase , tf.Tensor ) and t.shape.rank == 1:
return tf.expand_dims(__lowerCamelCase , axis=-1 )
return t
return tf.nest.map_structure(_expand_single_ad_tensor , __lowerCamelCase )
| 5 |
import json
import logging
import math
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from datasets import Dataset, load_dataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_FOR_MASKED_LM_MAPPING,
AutoConfig,
AutoModelForMaskedLM,
AutoTokenizer,
DataCollatorForWholeWordMask,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
lowercase = logging.getLogger(__name__)
lowercase = list(MODEL_FOR_MASKED_LM_MAPPING.keys())
lowercase = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class __A:
SCREAMING_SNAKE_CASE = field(
default=UpperCAmelCase , metadata={
'''help''': (
'''The model checkpoint for weights initialization.Don\'t set if you want to train a model from scratch.'''
)
} , )
SCREAMING_SNAKE_CASE = field(
default=UpperCAmelCase , metadata={'''help''': '''If training from scratch, pass a model type from the list: ''' + ''', '''.join(UpperCAmelCase )} , )
SCREAMING_SNAKE_CASE = field(
default=UpperCAmelCase , metadata={
'''help''': (
'''Override some existing default config settings when a model is trained from scratch. Example: '''
'''n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index'''
)
} , )
SCREAMING_SNAKE_CASE = field(
default=UpperCAmelCase , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
SCREAMING_SNAKE_CASE = field(
default=UpperCAmelCase , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
SCREAMING_SNAKE_CASE = field(
default=UpperCAmelCase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
SCREAMING_SNAKE_CASE = field(
default=UpperCAmelCase , metadata={'''help''': '''Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'''} , )
SCREAMING_SNAKE_CASE = field(
default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , )
SCREAMING_SNAKE_CASE = field(
default=UpperCAmelCase , metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} , )
def lowercase__ ( self : str ):
if self.config_overrides is not None and (self.config_name is not None or self.model_name_or_path is not None):
raise ValueError(
"""--config_overrides can't be used in combination with --config_name or --model_name_or_path""" )
@dataclass
class __A:
SCREAMING_SNAKE_CASE = field(
default=UpperCAmelCase , metadata={'''help''': '''The name of the dataset to use (via the datasets library).'''} )
SCREAMING_SNAKE_CASE = field(
default=UpperCAmelCase , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} )
SCREAMING_SNAKE_CASE = field(default=UpperCAmelCase , metadata={'''help''': '''The input training data file (a text file).'''} )
SCREAMING_SNAKE_CASE = field(
default=UpperCAmelCase , metadata={'''help''': '''An optional input evaluation data file to evaluate the perplexity on (a text file).'''} , )
SCREAMING_SNAKE_CASE = field(
default=UpperCAmelCase , metadata={'''help''': '''An optional input train ref data file for whole word masking in Chinese.'''} , )
SCREAMING_SNAKE_CASE = field(
default=UpperCAmelCase , metadata={'''help''': '''An optional input validation ref data file for whole word masking in Chinese.'''} , )
SCREAMING_SNAKE_CASE = field(
default=UpperCAmelCase , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
SCREAMING_SNAKE_CASE = field(
default=5 , metadata={
'''help''': '''The percentage of the train set used as validation set in case there\'s no validation split'''
} , )
SCREAMING_SNAKE_CASE = field(
default=UpperCAmelCase , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated. Default to the max input length of the model.'''
)
} , )
SCREAMING_SNAKE_CASE = field(
default=UpperCAmelCase , metadata={'''help''': '''The number of processes to use for the preprocessing.'''} , )
SCREAMING_SNAKE_CASE = field(
default=0.15 , metadata={'''help''': '''Ratio of tokens to mask for masked language modeling loss'''} )
SCREAMING_SNAKE_CASE = field(
default=UpperCAmelCase , metadata={
'''help''': (
'''Whether to pad all samples to `max_seq_length`. '''
'''If False, will pad the samples dynamically when batching to the maximum length in the batch.'''
)
} , )
def lowercase__ ( self : Tuple ):
if self.train_file is not None:
lowerCamelCase_ = self.train_file.split(""".""" )[-1]
assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, a json or a txt file."
if self.validation_file is not None:
lowerCamelCase_ = self.validation_file.split(""".""" )[-1]
assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, a json or a txt file."
def __lowerCAmelCase ( UpperCAmelCase__ : Any , UpperCAmelCase__ : Tuple ) -> Dict:
with open(UpperCAmelCase__ , """r""" , encoding="""utf-8""" ) as f:
lowerCamelCase_ = [json.loads(UpperCAmelCase__ ) for line in f.read().splitlines() if (len(UpperCAmelCase__ ) > 0 and not line.isspace())]
assert len(UpperCAmelCase__ ) == len(UpperCAmelCase__ )
lowerCamelCase_ = {c: dataset[c] for c in dataset.column_names}
lowerCamelCase_ = refs
return Dataset.from_dict(UpperCAmelCase__ )
def __lowerCAmelCase ( ) -> Any:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowerCamelCase_ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
lowerCamelCase_ = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowerCamelCase_ = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None:
logger.info(
F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN )
# Log on each process the small summary:
logger.warning(
F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("""Training/evaluation parameters %s""" , UpperCAmelCase__ )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
lowerCamelCase_ = load_dataset(data_args.dataset_name , data_args.dataset_config_name )
if "validation" not in datasets.keys():
lowerCamelCase_ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F'''train[:{data_args.validation_split_percentage}%]''' , )
lowerCamelCase_ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F'''train[{data_args.validation_split_percentage}%:]''' , )
else:
lowerCamelCase_ = {}
if data_args.train_file is not None:
lowerCamelCase_ = data_args.train_file
if data_args.validation_file is not None:
lowerCamelCase_ = data_args.validation_file
lowerCamelCase_ = data_args.train_file.split(""".""" )[-1]
if extension == "txt":
lowerCamelCase_ = """text"""
lowerCamelCase_ = load_dataset(UpperCAmelCase__ , data_files=UpperCAmelCase__ )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowerCamelCase_ = {
"""cache_dir""": model_args.cache_dir,
"""revision""": model_args.model_revision,
"""use_auth_token""": True if model_args.use_auth_token else None,
}
if model_args.config_name:
lowerCamelCase_ = AutoConfig.from_pretrained(model_args.config_name , **UpperCAmelCase__ )
elif model_args.model_name_or_path:
lowerCamelCase_ = AutoConfig.from_pretrained(model_args.model_name_or_path , **UpperCAmelCase__ )
else:
lowerCamelCase_ = CONFIG_MAPPING[model_args.model_type]()
logger.warning("""You are instantiating a new config instance from scratch.""" )
if model_args.config_overrides is not None:
logger.info(F'''Overriding config: {model_args.config_overrides}''' )
config.update_from_string(model_args.config_overrides )
logger.info(F'''New config: {config}''' )
lowerCamelCase_ = {
"""cache_dir""": model_args.cache_dir,
"""use_fast""": model_args.use_fast_tokenizer,
"""revision""": model_args.model_revision,
"""use_auth_token""": True if model_args.use_auth_token else None,
}
if model_args.tokenizer_name:
lowerCamelCase_ = AutoTokenizer.from_pretrained(model_args.tokenizer_name , **UpperCAmelCase__ )
elif model_args.model_name_or_path:
lowerCamelCase_ = AutoTokenizer.from_pretrained(model_args.model_name_or_path , **UpperCAmelCase__ )
else:
raise ValueError(
"""You are instantiating a new tokenizer from scratch. This is not supported by this script."""
"""You can do it from another script, save it, and load it from here, using --tokenizer_name.""" )
if model_args.model_name_or_path:
lowerCamelCase_ = AutoModelForMaskedLM.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=UpperCAmelCase__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info("""Training new model from scratch""" )
lowerCamelCase_ = AutoModelForMaskedLM.from_config(UpperCAmelCase__ )
model.resize_token_embeddings(len(UpperCAmelCase__ ) )
# Preprocessing the datasets.
# First we tokenize all the texts.
if training_args.do_train:
lowerCamelCase_ = datasets["""train"""].column_names
else:
lowerCamelCase_ = datasets["""validation"""].column_names
lowerCamelCase_ = """text""" if """text""" in column_names else column_names[0]
lowerCamelCase_ = """max_length""" if data_args.pad_to_max_length else False
def tokenize_function(UpperCAmelCase__ : Any ):
# Remove empty lines
lowerCamelCase_ = [line for line in examples["""text"""] if len(UpperCAmelCase__ ) > 0 and not line.isspace()]
return tokenizer(examples["""text"""] , padding=UpperCAmelCase__ , truncation=UpperCAmelCase__ , max_length=data_args.max_seq_length )
lowerCamelCase_ = datasets.map(
UpperCAmelCase__ , batched=UpperCAmelCase__ , num_proc=data_args.preprocessing_num_workers , remove_columns=[text_column_name] , load_from_cache_file=not data_args.overwrite_cache , )
# Add the chinese references if provided
if data_args.train_ref_file is not None:
lowerCamelCase_ = add_chinese_references(tokenized_datasets["""train"""] , data_args.train_ref_file )
if data_args.validation_ref_file is not None:
lowerCamelCase_ = add_chinese_references(
tokenized_datasets["""validation"""] , data_args.validation_ref_file )
# If we have ref files, need to avoid it removed by trainer
lowerCamelCase_ = data_args.train_ref_file or data_args.validation_ref_file
if has_ref:
lowerCamelCase_ = False
# Data collator
# This one will take care of randomly masking the tokens.
lowerCamelCase_ = DataCollatorForWholeWordMask(tokenizer=UpperCAmelCase__ , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
lowerCamelCase_ = Trainer(
model=UpperCAmelCase__ , args=UpperCAmelCase__ , train_dataset=tokenized_datasets["""train"""] if training_args.do_train else None , eval_dataset=tokenized_datasets["""validation"""] if training_args.do_eval else None , tokenizer=UpperCAmelCase__ , data_collator=UpperCAmelCase__ , )
# Training
if training_args.do_train:
if last_checkpoint is not None:
lowerCamelCase_ = last_checkpoint
elif model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path ):
lowerCamelCase_ = model_args.model_name_or_path
else:
lowerCamelCase_ = None
lowerCamelCase_ = trainer.train(resume_from_checkpoint=UpperCAmelCase__ )
trainer.save_model() # Saves the tokenizer too for easy upload
lowerCamelCase_ = os.path.join(training_args.output_dir , """train_results.txt""" )
if trainer.is_world_process_zero():
with open(UpperCAmelCase__ , """w""" ) as writer:
logger.info("""***** Train results *****""" )
for key, value in sorted(train_result.metrics.items() ):
logger.info(F''' {key} = {value}''' )
writer.write(F'''{key} = {value}\n''' )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , """trainer_state.json""" ) )
# Evaluation
lowerCamelCase_ = {}
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
lowerCamelCase_ = trainer.evaluate()
lowerCamelCase_ = math.exp(eval_output["""eval_loss"""] )
lowerCamelCase_ = perplexity
lowerCamelCase_ = os.path.join(training_args.output_dir , """eval_results_mlm_wwm.txt""" )
if trainer.is_world_process_zero():
with open(UpperCAmelCase__ , """w""" ) as writer:
logger.info("""***** Eval results *****""" )
for key, value in sorted(results.items() ):
logger.info(F''' {key} = {value}''' )
writer.write(F'''{key} = {value}\n''' )
return results
def __lowerCAmelCase ( UpperCAmelCase__ : Optional[Any] ) -> Optional[Any]:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 272 | 0 |
"""simple docstring"""
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
lowerCAmelCase_ = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If BertLMPredictionHead is changed in modeling_bert.py, this code needs to be manually updated.
lowerCAmelCase_ = ''' def __init__(self, config):
super().__init__()
self.transform = BertPredictionHeadTransform(config)
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
self.bias = nn.Parameter(torch.zeros(config.vocab_size))
# Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`
self.decoder.bias = self.bias
def forward(self, hidden_states):
hidden_states = self.transform(hidden_states)
hidden_states = self.decoder(hidden_states)
return hidden_states
'''
class lowerCAmelCase ( unittest.TestCase ):
def __A ( self ):
_UpperCAmelCase = tempfile.mkdtemp()
os.makedirs(os.path.join(self.transformer_dir , 'models/bert/' ) )
_UpperCAmelCase = self.transformer_dir
shutil.copy(
os.path.join(__A , 'src/transformers/models/bert/modeling_bert.py' ) , os.path.join(self.transformer_dir , 'models/bert/modeling_bert.py' ) , )
def __A ( self ):
_UpperCAmelCase = 'src/transformers'
shutil.rmtree(self.transformer_dir )
def __A ( self , a__ , a__ , a__ , a__=None ):
_UpperCAmelCase = comment + f"""\nclass {class_name}(nn.Module):\n""" + class_code
if overwrite_result is not None:
_UpperCAmelCase = comment + f"""\nclass {class_name}(nn.Module):\n""" + overwrite_result
_UpperCAmelCase = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=1_19 )
_UpperCAmelCase = black.format_str(__A , mode=__A )
_UpperCAmelCase = os.path.join(self.transformer_dir , 'new_code.py' )
with open(__A , 'w' , newline='\n' ) as f:
f.write(__A )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(__A ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=__A )
with open(__A , 'r' ) as f:
self.assertTrue(f.read() , __A )
def __A ( self ):
_UpperCAmelCase = check_copies.find_code_in_transformers('models.bert.modeling_bert.BertLMPredictionHead' )
self.assertEqual(__A , __A )
def __A ( self ):
# Base copy consistency
self.check_copy_consistency(
'# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead' , 'BertLMPredictionHead' , REFERENCE_CODE + '\n' , )
# With no empty line at the end
self.check_copy_consistency(
'# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead' , 'BertLMPredictionHead' , __A , )
# Copy consistency with rename
self.check_copy_consistency(
'# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel' , 'TestModelLMPredictionHead' , re.sub('Bert' , 'TestModel' , __A ) , )
# Copy consistency with a really long name
_UpperCAmelCase = 'TestModelWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason'
self.check_copy_consistency(
f"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->{long_class_name}""" , f"""{long_class_name}LMPredictionHead""" , re.sub('Bert' , __A , __A ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
'# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel' , 'TestModelLMPredictionHead' , __A , overwrite_result=re.sub('Bert' , 'TestModel' , __A ) , )
def __A ( self ):
_UpperCAmelCase = check_copies.LOCALIZED_READMES['README_zh-hans.md']
_UpperCAmelCase = (
'1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the'
' Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for'
' Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong'
' Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.\n1.'
' **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (from HuggingFace),'
' released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and'
' lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same'
' method has been applied to compress GPT2 into'
' [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into'
' [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),'
' Multilingual BERT into'
' [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German'
' version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)**'
' (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders'
' as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang'
' Luong, Quoc V. Le, Christopher D. Manning.'
)
_UpperCAmelCase = (
'1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'
' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'
' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'
' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'
)
_UpperCAmelCase = (
'1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'
' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'
' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'
' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n1.'
' **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (来自 HuggingFace) 伴随论文'
' [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and'
' lighter](https://arxiv.org/abs/1910.01108) 由 Victor Sanh, Lysandre Debut and Thomas Wolf 发布。 The same'
' method has been applied to compress GPT2 into'
' [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into'
' [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),'
' Multilingual BERT into'
' [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German'
' version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)** (来自'
' Google Research/Stanford University) 伴随论文 [ELECTRA: Pre-training text encoders as discriminators rather'
' than generators](https://arxiv.org/abs/2003.10555) 由 Kevin Clark, Minh-Thang Luong, Quoc V. Le,'
' Christopher D. Manning 发布。\n'
)
_UpperCAmelCase , _UpperCAmelCase = check_copies.convert_to_localized_md(
__A , __A , localized_readme['format_model_list'] )
self.assertFalse(__A )
self.assertEqual(__A , __A )
_UpperCAmelCase , _UpperCAmelCase = check_copies.convert_to_localized_md(
__A , __A , localized_readme['format_model_list'] )
# Check whether the number of models is equal to README.md after conversion.
self.assertTrue(__A )
_UpperCAmelCase = (
'1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the'
' Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for'
' Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong'
' Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.'
)
_UpperCAmelCase = (
'1. **[ALBERT](https://huggingface.co/transformers/main/model_doc/albert.html)** (来自 Google Research and'
' the Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'
' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'
' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'
)
_UpperCAmelCase = (
'1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'
' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'
' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'
' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'
)
_UpperCAmelCase , _UpperCAmelCase = check_copies.convert_to_localized_md(
__A , __A , localized_readme['format_model_list'] )
# Check if the model link is synchronized.
self.assertEqual(__A , __A )
| 714 |
"""simple docstring"""
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert_fast import BertTokenizerFast
from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
lowerCAmelCase_ = {
'''vocab_file''': {
'''facebook/dpr-ctx_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt'''
),
'''facebook/dpr-ctx_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''facebook/dpr-ctx_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json'''
),
'''facebook/dpr-ctx_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json'''
),
},
}
lowerCAmelCase_ = {
'''vocab_file''': {
'''facebook/dpr-question_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt'''
),
'''facebook/dpr-question_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''facebook/dpr-question_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json'''
),
'''facebook/dpr-question_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json'''
),
},
}
lowerCAmelCase_ = {
'''vocab_file''': {
'''facebook/dpr-reader-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt'''
),
'''facebook/dpr-reader-multiset-base''': (
'''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''facebook/dpr-reader-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json'''
),
'''facebook/dpr-reader-multiset-base''': (
'''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json'''
),
},
}
lowerCAmelCase_ = {
'''facebook/dpr-ctx_encoder-single-nq-base''': 512,
'''facebook/dpr-ctx_encoder-multiset-base''': 512,
}
lowerCAmelCase_ = {
'''facebook/dpr-question_encoder-single-nq-base''': 512,
'''facebook/dpr-question_encoder-multiset-base''': 512,
}
lowerCAmelCase_ = {
'''facebook/dpr-reader-single-nq-base''': 512,
'''facebook/dpr-reader-multiset-base''': 512,
}
lowerCAmelCase_ = {
'''facebook/dpr-ctx_encoder-single-nq-base''': {'''do_lower_case''': True},
'''facebook/dpr-ctx_encoder-multiset-base''': {'''do_lower_case''': True},
}
lowerCAmelCase_ = {
'''facebook/dpr-question_encoder-single-nq-base''': {'''do_lower_case''': True},
'''facebook/dpr-question_encoder-multiset-base''': {'''do_lower_case''': True},
}
lowerCAmelCase_ = {
'''facebook/dpr-reader-single-nq-base''': {'''do_lower_case''': True},
'''facebook/dpr-reader-multiset-base''': {'''do_lower_case''': True},
}
class lowerCAmelCase ( snake_case ):
lowerCAmelCase__ = VOCAB_FILES_NAMES
lowerCAmelCase__ = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
lowerCAmelCase__ = DPRContextEncoderTokenizer
class lowerCAmelCase ( snake_case ):
lowerCAmelCase__ = VOCAB_FILES_NAMES
lowerCAmelCase__ = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
lowerCAmelCase__ = DPRQuestionEncoderTokenizer
lowerCAmelCase_ = collections.namedtuple(
'''DPRSpanPrediction''', ['''span_score''', '''relevance_score''', '''doc_id''', '''start_index''', '''end_index''', '''text''']
)
lowerCAmelCase_ = collections.namedtuple('''DPRReaderOutput''', ['''start_logits''', '''end_logits''', '''relevance_logits'''])
lowerCAmelCase_ = r'''
Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.
It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),
using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`
with the format:
[CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>
Args:
questions (`str` or `List[str]`):
The questions to be encoded. You can specify one question for many passages. In this case, the question
will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in
`titles` or `texts`.
titles (`str` or `List[str]`):
The passages titles to be encoded. This can be a string or a list of strings if there are several passages.
texts (`str` or `List[str]`):
The passages texts to be encoded. This can be a string or a list of strings if there are several passages.
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):
Activates and controls padding. Accepts the following values:
- `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence
if provided).
- `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different
lengths).
truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):
Activates and controls truncation. Accepts the following values:
- `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to
the maximum acceptable input length for the model if that argument is not provided. This will truncate
token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch
of pairs) is provided.
- `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the first
sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the
second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths
greater than the model maximum admissible input size).
max_length (`int`, *optional*):
Controls the maximum length to use by one of the truncation/padding parameters.
If left unset or set to `None`, this will use the predefined model maximum length if a maximum length
is required by one of the truncation/padding parameters. If the model has no specific maximum input
length (like XLNet) truncation/padding to a maximum length will be deactivated.
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors instead of list of python integers. Acceptable values are:
- `\'tf\'`: Return TensorFlow `tf.constant` objects.
- `\'pt\'`: Return PyTorch `torch.Tensor` objects.
- `\'np\'`: Return Numpy `np.ndarray` objects.
return_attention_mask (`bool`, *optional*):
Whether or not to return the attention mask. If not set, will return the attention mask according to the
specific tokenizer\'s default, defined by the `return_outputs` attribute.
[What are attention masks?](../glossary#attention-mask)
Return:
`Dict[str, List[List[int]]]`: A dictionary with the following keys:
- `input_ids`: List of token ids to be fed to a model.
- `attention_mask`: List of indices specifying which tokens should be attended to by the model.
'''
@add_start_docstrings(snake_case )
class lowerCAmelCase :
def __call__( self , a__ , a__ = None , a__ = None , a__ = False , a__ = False , a__ = None , a__ = None , a__ = None , **a__ , ):
if titles is None and texts is None:
return super().__call__(
a__ , padding=a__ , truncation=a__ , max_length=a__ , return_tensors=a__ , return_attention_mask=a__ , **a__ , )
elif titles is None or texts is None:
_UpperCAmelCase = titles if texts is None else texts
return super().__call__(
a__ , a__ , padding=a__ , truncation=a__ , max_length=a__ , return_tensors=a__ , return_attention_mask=a__ , **a__ , )
_UpperCAmelCase = titles if not isinstance(a__ , a__ ) else [titles]
_UpperCAmelCase = texts if not isinstance(a__ , a__ ) else [texts]
_UpperCAmelCase = len(a__ )
_UpperCAmelCase = questions if not isinstance(a__ , a__ ) else [questions] * n_passages
assert len(a__ ) == len(
a__ ), f"""There should be as many titles than texts but got {len(a__ )} titles and {len(a__ )} texts."""
_UpperCAmelCase = super().__call__(a__ , a__ , padding=a__ , truncation=a__ )['input_ids']
_UpperCAmelCase = super().__call__(a__ , add_special_tokens=a__ , padding=a__ , truncation=a__ )['input_ids']
_UpperCAmelCase = {
'input_ids': [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(a__ , a__ )
]
}
if return_attention_mask is not False:
_UpperCAmelCase = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
_UpperCAmelCase = attention_mask
return self.pad(a__ , padding=a__ , max_length=a__ , return_tensors=a__ )
def __A ( self , a__ , a__ , a__ = 16 , a__ = 64 , a__ = 4 , ):
_UpperCAmelCase = reader_input['input_ids']
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = reader_output[:3]
_UpperCAmelCase = len(a__ )
_UpperCAmelCase = sorted(range(a__ ) , reverse=a__ , key=relevance_logits.__getitem__ )
_UpperCAmelCase = []
for doc_id in sorted_docs:
_UpperCAmelCase = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
_UpperCAmelCase = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
_UpperCAmelCase = sequence_ids.index(self.pad_token_id )
else:
_UpperCAmelCase = len(a__ )
_UpperCAmelCase = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=a__ , top_spans=a__ , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=a__ , start_index=a__ , end_index=a__ , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(a__ ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def __A ( self , a__ , a__ , a__ , a__ , ):
_UpperCAmelCase = []
for start_index, start_score in enumerate(a__ ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
_UpperCAmelCase = sorted(a__ , key=lambda a__ : x[1] , reverse=a__ )
_UpperCAmelCase = []
for (start_index, end_index), score in scores:
assert start_index <= end_index, f"""Wrong span indices: [{start_index}:{end_index}]"""
_UpperCAmelCase = end_index - start_index + 1
assert length <= max_answer_length, f"""Span is too long: {length} > {max_answer_length}"""
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(a__ ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(snake_case )
class lowerCAmelCase ( snake_case , snake_case ):
lowerCAmelCase__ = VOCAB_FILES_NAMES
lowerCAmelCase__ = READER_PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ = READER_PRETRAINED_INIT_CONFIGURATION
lowerCAmelCase__ = ["""input_ids""", """attention_mask"""]
lowerCAmelCase__ = DPRReaderTokenizer
| 494 | 0 |
import json
import logging
import os
import sys
from time import time
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, require_torch_tpu
logging.basicConfig(level=logging.DEBUG)
a : Dict = logging.getLogger()
def lowercase_ ( _UpperCamelCase ):
'''simple docstring'''
__lowercase = {}
__lowercase = os.path.join(_UpperCamelCase , '''all_results.json''' )
if os.path.exists(_UpperCamelCase ):
with open(_UpperCamelCase , '''r''' ) as f:
__lowercase = json.load(_UpperCamelCase )
else:
raise ValueError(F'can\'t find {path}' )
return results
a : Optional[int] = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
@require_torch_tpu
class lowerCamelCase_ ( lowerCAmelCase__ ):
'''simple docstring'''
def A ( self ) -> Optional[int]:
'''simple docstring'''
import xla_spawn
__lowercase = self.get_auto_remove_tmp_dir()
__lowercase = F'\n ./examples/pytorch/text-classification/run_glue.py\n --num_cores=8\n ./examples/pytorch/text-classification/run_glue.py\n --model_name_or_path distilbert-base-uncased\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --train_file ./tests/fixtures/tests_samples/MRPC/train.csv\n --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv\n --do_train\n --do_eval\n --debug tpu_metrics_debug\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --learning_rate=1e-4\n --max_steps=10\n --warmup_steps=2\n --seed=42\n --max_seq_length=128\n '.split()
with patch.object(snake_case_ , '''argv''' , snake_case_ ):
__lowercase = time()
xla_spawn.main()
__lowercase = time()
__lowercase = get_results(snake_case_ )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.7_5 )
# Assert that the script takes less than 500 seconds to make sure it doesn't hang.
self.assertLess(end - start , 5_0_0 )
def A ( self ) -> List[str]:
'''simple docstring'''
import xla_spawn
__lowercase = '''
./tests/test_trainer_tpu.py
--num_cores=8
./tests/test_trainer_tpu.py
'''.split()
with patch.object(snake_case_ , '''argv''' , snake_case_ ):
xla_spawn.main()
| 639 |
from typing import Optional
from .. import Features, NamedSplit
from ..packaged_modules.text.text import Text
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class lowerCamelCase_ ( lowerCAmelCase__ ):
'''simple docstring'''
def __init__( self , snake_case_ , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = False , snake_case_ = False , snake_case_ = None , **snake_case_ , ) -> Dict:
'''simple docstring'''
super().__init__(
snake_case_ , split=snake_case_ , features=snake_case_ , cache_dir=snake_case_ , keep_in_memory=snake_case_ , streaming=snake_case_ , num_proc=snake_case_ , **snake_case_ , )
__lowercase = path_or_paths if isinstance(snake_case_ , snake_case_ ) else {self.split: path_or_paths}
__lowercase = Text(
cache_dir=snake_case_ , data_files=snake_case_ , features=snake_case_ , **snake_case_ , )
def A ( self ) -> int:
'''simple docstring'''
if self.streaming:
__lowercase = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
__lowercase = None
__lowercase = None
__lowercase = None
__lowercase = None
self.builder.download_and_prepare(
download_config=snake_case_ , download_mode=snake_case_ , verification_mode=snake_case_ , base_path=snake_case_ , num_proc=self.num_proc , )
__lowercase = self.builder.as_dataset(
split=self.split , verification_mode=snake_case_ , in_memory=self.keep_in_memory )
return dataset
| 639 | 1 |
import webbrowser
from sys import argv
from urllib.parse import parse_qs, quote
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
a__: Any = '%20'.join(argv[1:]) if len(argv) > 1 else quote(str(input('Search: ')))
print('Googling.....')
a__: int = F"https://www.google.com/search?q={query}&num=100"
a__: List[Any] = requests.get(
url,
headers={'User-Agent': str(UserAgent().random)},
)
try:
a__: Optional[Any] = (
BeautifulSoup(res.text, 'html.parser')
.find('div', attrs={'class': 'yuRUbf'})
.find('a')
.get('href')
)
except AttributeError:
a__: str = parse_qs(
BeautifulSoup(res.text, 'html.parser')
.find('div', attrs={'class': 'kCrYT'})
.find('a')
.get('href')
)['url'][0]
webbrowser.open(link)
| 721 |
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from transformers import TvltFeatureExtractor, is_datasets_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
if is_datasets_available():
from datasets import load_dataset
a__: Any = random.Random()
def UpperCamelCase__( UpperCamelCase__ : Dict , UpperCamelCase__ : str=1.0 , UpperCamelCase__ : str=None , UpperCamelCase__ : Tuple=None )->Any:
if rng is None:
A__ = global_rng
A__ = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def __init__( self,__lowerCamelCase,__lowerCamelCase=7,__lowerCamelCase=400,__lowerCamelCase=2000,__lowerCamelCase=2048,__lowerCamelCase=128,__lowerCamelCase=1,__lowerCamelCase=512,__lowerCamelCase=30,__lowerCamelCase=4_4100,):
A__ = parent
A__ = batch_size
A__ = min_seq_length
A__ = max_seq_length
A__ = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
A__ = spectrogram_length
A__ = feature_size
A__ = num_audio_channels
A__ = hop_length
A__ = chunk_length
A__ = sampling_rate
def UpperCamelCase ( self ):
return {
"spectrogram_length": self.spectrogram_length,
"feature_size": self.feature_size,
"num_audio_channels": self.num_audio_channels,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"sampling_rate": self.sampling_rate,
}
def UpperCamelCase ( self,__lowerCamelCase=False,__lowerCamelCase=False ):
def _flatten(__lowerCamelCase ):
return list(itertools.chain(*__lowerCamelCase ) )
if equal_length:
A__ = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
A__ = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length,self.max_seq_length,self.seq_length_diff )
]
if numpify:
A__ = [np.asarray(__lowerCamelCase ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase__ , unittest.TestCase ):
__SCREAMING_SNAKE_CASE = TvltFeatureExtractor
def UpperCamelCase ( self ):
A__ = TvltFeatureExtractionTester(self )
def UpperCamelCase ( self ):
A__ = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(__lowerCamelCase,'''spectrogram_length''' ) )
self.assertTrue(hasattr(__lowerCamelCase,'''feature_size''' ) )
self.assertTrue(hasattr(__lowerCamelCase,'''num_audio_channels''' ) )
self.assertTrue(hasattr(__lowerCamelCase,'''hop_length''' ) )
self.assertTrue(hasattr(__lowerCamelCase,'''chunk_length''' ) )
self.assertTrue(hasattr(__lowerCamelCase,'''sampling_rate''' ) )
def UpperCamelCase ( self ):
A__ = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
A__ = feat_extract_first.save_pretrained(__lowerCamelCase )[0]
check_json_file_has_correct_format(__lowerCamelCase )
A__ = self.feature_extraction_class.from_pretrained(__lowerCamelCase )
A__ = feat_extract_first.to_dict()
A__ = feat_extract_second.to_dict()
A__ = dict_first.pop('''mel_filters''' )
A__ = dict_second.pop('''mel_filters''' )
self.assertTrue(np.allclose(__lowerCamelCase,__lowerCamelCase ) )
self.assertEqual(__lowerCamelCase,__lowerCamelCase )
def UpperCamelCase ( self ):
A__ = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
A__ = os.path.join(__lowerCamelCase,'''feat_extract.json''' )
feat_extract_first.to_json_file(__lowerCamelCase )
A__ = self.feature_extraction_class.from_json_file(__lowerCamelCase )
A__ = feat_extract_first.to_dict()
A__ = feat_extract_second.to_dict()
A__ = dict_first.pop('''mel_filters''' )
A__ = dict_second.pop('''mel_filters''' )
self.assertTrue(np.allclose(__lowerCamelCase,__lowerCamelCase ) )
self.assertEqual(__lowerCamelCase,__lowerCamelCase )
def UpperCamelCase ( self ):
# Initialize feature_extractor
A__ = self.feature_extraction_class(**self.feat_extract_dict )
# create three inputs of length 800, 1000, and 1200
A__ = [floats_list((1, x) )[0] for x in range(800,1400,200 )]
A__ = [np.asarray(__lowerCamelCase ) for speech_input in speech_inputs]
# Test not batched input
A__ = feature_extractor(np_speech_inputs[0],return_tensors='''np''',sampling_rate=4_4100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test batched
A__ = feature_extractor(__lowerCamelCase,return_tensors='''np''',sampling_rate=4_4100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test audio masking
A__ = feature_extractor(
__lowerCamelCase,return_tensors='''np''',sampling_rate=4_4100,mask_audio=__lowerCamelCase ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test 2-D numpy arrays are batched.
A__ = [floats_list((1, x) )[0] for x in (800, 800, 800)]
A__ = np.asarray(__lowerCamelCase )
A__ = feature_extractor(__lowerCamelCase,return_tensors='''np''',sampling_rate=4_4100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
def UpperCamelCase ( self,__lowerCamelCase ):
A__ = load_dataset('''hf-internal-testing/librispeech_asr_dummy''','''clean''',split='''validation''' )
# automatic decoding with librispeech
A__ = ds.sort('''id''' ).select(range(__lowerCamelCase ) )[:num_samples]['''audio''']
return [x["array"] for x in speech_samples]
def UpperCamelCase ( self ):
A__ = self._load_datasamples(1 )
A__ = TvltFeatureExtractor()
A__ = feature_extractor(__lowerCamelCase,return_tensors='''pt''' ).audio_values
self.assertEquals(audio_values.shape,(1, 1, 192, 128) )
A__ = torch.tensor([[-0.3032, -0.2708], [-0.4434, -0.4007]] )
self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2],__lowerCamelCase,atol=1E-4 ) )
| 212 | 0 |
import dataclasses
import json
import sys
import types
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, ArgumentTypeError
from copy import copy
from enum import Enum
from inspect import isclass
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, List, Literal, NewType, Optional, Tuple, Union, get_type_hints
import yaml
_SCREAMING_SNAKE_CASE : int = NewType("DataClass", Any)
_SCREAMING_SNAKE_CASE : Optional[Any] = NewType("DataClassType", Any)
def UpperCAmelCase__ (UpperCamelCase_ ):
"""simple docstring"""
if isinstance(UpperCamelCase_ ,UpperCamelCase_ ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise ArgumentTypeError(
F'''Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive).''' )
def UpperCAmelCase__ (UpperCamelCase_ ):
"""simple docstring"""
snake_case = {str(UpperCamelCase_ ): choice for choice in choices}
return lambda UpperCamelCase_ : str_to_choice.get(UpperCamelCase_ ,UpperCamelCase_ )
def UpperCAmelCase__ (*,
UpperCamelCase_ = None ,UpperCamelCase_ = None ,UpperCamelCase_ = dataclasses.MISSING ,UpperCamelCase_ = dataclasses.MISSING ,UpperCamelCase_ = None ,**UpperCamelCase_ ,):
"""simple docstring"""
if metadata is None:
# Important, don't use as default param in function signature because dict is mutable and shared across function calls
snake_case = {}
if aliases is not None:
snake_case = aliases
if help is not None:
snake_case = help
return dataclasses.field(metadata=UpperCamelCase_ ,default=UpperCamelCase_ ,default_factory=UpperCamelCase_ ,**UpperCamelCase_ )
class A__ ( snake_case__ ):
"""simple docstring"""
__magic_name__ = 42
def __init__( self , __snake_case , **__snake_case ):
# To make the default appear when using --help
if "formatter_class" not in kwargs:
snake_case = ArgumentDefaultsHelpFormatter
super().__init__(**__snake_case )
if dataclasses.is_dataclass(__snake_case ):
snake_case = [dataclass_types]
snake_case = list(__snake_case )
for dtype in self.dataclass_types:
self._add_dataclass_arguments(__snake_case )
@staticmethod
def a_ ( __snake_case , __snake_case ):
snake_case = F'''--{field.name}'''
snake_case = field.metadata.copy()
# field.metadata is not used at all by Data Classes,
# it is provided as a third-party extension mechanism.
if isinstance(field.type , __snake_case ):
raise RuntimeError(
'''Unresolved type detected, which should have been done with the help of '''
'''`typing.get_type_hints` method by default''' )
snake_case = kwargs.pop('''aliases''' , [] )
if isinstance(__snake_case , __snake_case ):
snake_case = [aliases]
snake_case = getattr(field.type , '''__origin__''' , field.type )
if origin_type is Union or (hasattr(__snake_case , '''UnionType''' ) and isinstance(__snake_case , types.UnionType )):
if str not in field.type.__args__ and (
len(field.type.__args__ ) != 2 or type(__snake_case ) not in field.type.__args__
):
raise ValueError(
'''Only `Union[X, NoneType]` (i.e., `Optional[X]`) is allowed for `Union` because'''
''' the argument parser only supports one type per argument.'''
F''' Problem encountered in field \'{field.name}\'.''' )
if type(__snake_case ) not in field.type.__args__:
# filter `str` in Union
snake_case = field.type.__args__[0] if field.type.__args__[1] == str else field.type.__args__[1]
snake_case = getattr(field.type , '''__origin__''' , field.type )
elif bool not in field.type.__args__:
# filter `NoneType` in Union (except for `Union[bool, NoneType]`)
snake_case = (
field.type.__args__[0] if isinstance(__snake_case , field.type.__args__[1] ) else field.type.__args__[1]
)
snake_case = getattr(field.type , '''__origin__''' , field.type )
# A variable to store kwargs for a boolean field, if needed
# so that we can init a `no_*` complement argument (see below)
snake_case = {}
if origin_type is Literal or (isinstance(field.type , __snake_case ) and issubclass(field.type , __snake_case )):
if origin_type is Literal:
snake_case = field.type.__args__
else:
snake_case = [x.value for x in field.type]
snake_case = make_choice_type_function(kwargs['''choices'''] )
if field.default is not dataclasses.MISSING:
snake_case = field.default
else:
snake_case = True
elif field.type is bool or field.type == Optional[bool]:
# Copy the currect kwargs to use to instantiate a `no_*` complement argument below.
# We do not initialize it here because the `no_*` alternative must be instantiated after the real argument
snake_case = copy(__snake_case )
# Hack because type=bool in argparse does not behave as we want.
snake_case = string_to_bool
if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING):
# Default value is False if we have no default when of type bool.
snake_case = False if field.default is dataclasses.MISSING else field.default
# This is the value that will get picked if we don't include --field_name in any way
snake_case = default
# This tells argparse we accept 0 or 1 value after --field_name
snake_case = '''?'''
# This is the value that will get picked if we do --field_name (without value)
snake_case = True
elif isclass(__snake_case ) and issubclass(__snake_case , __snake_case ):
snake_case = field.type.__args__[0]
snake_case = '''+'''
if field.default_factory is not dataclasses.MISSING:
snake_case = field.default_factory()
elif field.default is dataclasses.MISSING:
snake_case = True
else:
snake_case = field.type
if field.default is not dataclasses.MISSING:
snake_case = field.default
elif field.default_factory is not dataclasses.MISSING:
snake_case = field.default_factory()
else:
snake_case = True
parser.add_argument(__snake_case , *__snake_case , **__snake_case )
# Add a complement `no_*` argument for a boolean field AFTER the initial field has already been added.
# Order is important for arguments with the same destination!
# We use a copy of earlier kwargs because the original kwargs have changed a lot before reaching down
# here and we do not need those changes/additional keys.
if field.default is True and (field.type is bool or field.type == Optional[bool]):
snake_case = False
parser.add_argument(F'''--no_{field.name}''' , action='''store_false''' , dest=field.name , **__snake_case )
def a_ ( self , __snake_case ):
if hasattr(__snake_case , '''_argument_group_name''' ):
snake_case = self.add_argument_group(dtype._argument_group_name )
else:
snake_case = self
try:
snake_case = get_type_hints(__snake_case )
except NameError:
raise RuntimeError(
F'''Type resolution failed for {dtype}. Try declaring the class in global scope or '''
'''removing line of `from __future__ import annotations` which opts in Postponed '''
'''Evaluation of Annotations (PEP 563)''' )
except TypeError as ex:
# Remove this block when we drop Python 3.9 support
if sys.version_info[:2] < (3, 1_0) and "unsupported operand type(s) for |" in str(__snake_case ):
snake_case = '''.'''.join(map(__snake_case , sys.version_info[:3] ) )
raise RuntimeError(
F'''Type resolution failed for {dtype} on Python {python_version}. Try removing '''
'''line of `from __future__ import annotations` which opts in union types as '''
'''`X | Y` (PEP 604) via Postponed Evaluation of Annotations (PEP 563). To '''
'''support Python versions that lower than 3.10, you need to use '''
'''`typing.Union[X, Y]` instead of `X | Y` and `typing.Optional[X]` instead of '''
'''`X | None`.''' ) from ex
raise
for field in dataclasses.fields(__snake_case ):
if not field.init:
continue
snake_case = type_hints[field.name]
self._parse_dataclass_field(__snake_case , __snake_case )
def a_ ( self , __snake_case=None , __snake_case=False , __snake_case=True , __snake_case=None , __snake_case=None , ):
if args_file_flag or args_filename or (look_for_args_file and len(sys.argv )):
snake_case = []
if args_filename:
args_files.append(Path(__snake_case ) )
elif look_for_args_file and len(sys.argv ):
args_files.append(Path(sys.argv[0] ).with_suffix('''.args''' ) )
# args files specified via command line flag should overwrite default args files so we add them last
if args_file_flag:
# Create special parser just to extract the args_file_flag values
snake_case = ArgumentParser()
args_file_parser.add_argument(__snake_case , type=__snake_case , action='''append''' )
# Use only remaining args for further parsing (remove the args_file_flag)
snake_case , snake_case = args_file_parser.parse_known_args(args=__snake_case )
snake_case = vars(__snake_case ).get(args_file_flag.lstrip('''-''' ) , __snake_case )
if cmd_args_file_paths:
args_files.extend([Path(__snake_case ) for p in cmd_args_file_paths] )
snake_case = []
for args_file in args_files:
if args_file.exists():
file_args += args_file.read_text().split()
# in case of duplicate arguments the last one has precedence
# args specified via the command line should overwrite args from files, so we add them last
snake_case = file_args + args if args is not None else file_args + sys.argv[1:]
snake_case , snake_case = self.parse_known_args(args=__snake_case )
snake_case = []
for dtype in self.dataclass_types:
snake_case = {f.name for f in dataclasses.fields(__snake_case ) if f.init}
snake_case = {k: v for k, v in vars(__snake_case ).items() if k in keys}
for k in keys:
delattr(__snake_case , __snake_case )
snake_case = dtype(**__snake_case )
outputs.append(__snake_case )
if len(namespace.__dict__ ) > 0:
# additional namespace.
outputs.append(__snake_case )
if return_remaining_strings:
return (*outputs, remaining_args)
else:
if remaining_args:
raise ValueError(F'''Some specified arguments are not used by the HfArgumentParser: {remaining_args}''' )
return (*outputs,)
def a_ ( self , __snake_case , __snake_case = False ):
snake_case = set(args.keys() )
snake_case = []
for dtype in self.dataclass_types:
snake_case = {f.name for f in dataclasses.fields(__snake_case ) if f.init}
snake_case = {k: v for k, v in args.items() if k in keys}
unused_keys.difference_update(inputs.keys() )
snake_case = dtype(**__snake_case )
outputs.append(__snake_case )
if not allow_extra_keys and unused_keys:
raise ValueError(F'''Some keys are not used by the HfArgumentParser: {sorted(__snake_case )}''' )
return tuple(__snake_case )
def a_ ( self , __snake_case , __snake_case = False ):
with open(Path(__snake_case ) , encoding='''utf-8''' ) as open_json_file:
snake_case = json.loads(open_json_file.read() )
snake_case = self.parse_dict(__snake_case , allow_extra_keys=__snake_case )
return tuple(__snake_case )
def a_ ( self , __snake_case , __snake_case = False ):
snake_case = self.parse_dict(yaml.safe_load(Path(__snake_case ).read_text() ) , allow_extra_keys=__snake_case )
return tuple(__snake_case )
| 550 |
from ..utils import DummyObject, requires_backends
class A__ ( metaclass=snake_case__ ):
"""simple docstring"""
__magic_name__ = ['flax', 'transformers']
def __init__( self , *__snake_case , **__snake_case ):
requires_backends(self , ['''flax''', '''transformers'''] )
@classmethod
def a_ ( cls , *__snake_case , **__snake_case ):
requires_backends(cls , ['''flax''', '''transformers'''] )
@classmethod
def a_ ( cls , *__snake_case , **__snake_case ):
requires_backends(cls , ['''flax''', '''transformers'''] )
class A__ ( metaclass=snake_case__ ):
"""simple docstring"""
__magic_name__ = ['flax', 'transformers']
def __init__( self , *__snake_case , **__snake_case ):
requires_backends(self , ['''flax''', '''transformers'''] )
@classmethod
def a_ ( cls , *__snake_case , **__snake_case ):
requires_backends(cls , ['''flax''', '''transformers'''] )
@classmethod
def a_ ( cls , *__snake_case , **__snake_case ):
requires_backends(cls , ['''flax''', '''transformers'''] )
class A__ ( metaclass=snake_case__ ):
"""simple docstring"""
__magic_name__ = ['flax', 'transformers']
def __init__( self , *__snake_case , **__snake_case ):
requires_backends(self , ['''flax''', '''transformers'''] )
@classmethod
def a_ ( cls , *__snake_case , **__snake_case ):
requires_backends(cls , ['''flax''', '''transformers'''] )
@classmethod
def a_ ( cls , *__snake_case , **__snake_case ):
requires_backends(cls , ['''flax''', '''transformers'''] )
class A__ ( metaclass=snake_case__ ):
"""simple docstring"""
__magic_name__ = ['flax', 'transformers']
def __init__( self , *__snake_case , **__snake_case ):
requires_backends(self , ['''flax''', '''transformers'''] )
@classmethod
def a_ ( cls , *__snake_case , **__snake_case ):
requires_backends(cls , ['''flax''', '''transformers'''] )
@classmethod
def a_ ( cls , *__snake_case , **__snake_case ):
requires_backends(cls , ['''flax''', '''transformers'''] )
| 550 | 1 |
import argparse
import glob
import logging
import os
import sys
import time
from collections import defaultdict
from pathlib import Path
from typing import Dict, List, Tuple
import numpy as np
import pytorch_lightning as pl
import torch
from callbacks import SeqaSeqLoggingCallback, get_checkpoint_callback, get_early_stopping_callback
from torch import nn
from torch.utils.data import DataLoader
from transformers import MBartTokenizer, TaForConditionalGeneration
from transformers.models.bart.modeling_bart import shift_tokens_right
from utils import (
ROUGE_KEYS,
LegacySeqaSeqDataset,
SeqaSeqDataset,
assert_all_frozen,
calculate_bleu,
calculate_rouge,
check_output_dir,
flatten_list,
freeze_embeds,
freeze_params,
get_git_info,
label_smoothed_nll_loss,
lmap,
pickle_save,
save_git_info,
save_json,
use_task_specific_params,
)
# need the parent dir module
sys.path.insert(2, str(Path(__file__).resolve().parents[1]))
from lightning_base import BaseTransformer, add_generic_args, generic_train # noqa
_A : List[Any] = logging.getLogger(__name__)
class a ( a__ ):
UpperCAmelCase__ : List[str] = "summarization"
UpperCAmelCase__ : Optional[Any] = ["loss"]
UpperCAmelCase__ : List[Any] = ROUGE_KEYS
UpperCAmelCase__ : List[str] = "rouge2"
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE_ : Union[str, Any] , **SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
if hparams.sortish_sampler and hparams.gpus > 1:
__lowerCamelCase: Tuple = False
elif hparams.max_tokens_per_batch is not None:
if hparams.gpus > 1:
raise NotImplementedError("""Dynamic Batch size does not work for multi-gpu training""" )
if hparams.sortish_sampler:
raise ValueError("""--sortish_sampler and --max_tokens_per_batch may not be used simultaneously""" )
super().__init__(lowerCamelCase_ , num_labels=lowerCamelCase_ , mode=self.mode , **lowerCamelCase_ )
use_task_specific_params(self.model , """summarization""" )
save_git_info(self.hparams.output_dir )
__lowerCamelCase: Optional[Any] = Path(self.output_dir ) / """metrics.json"""
__lowerCamelCase: str = Path(self.output_dir ) / """hparams.pkl"""
pickle_save(self.hparams , self.hparams_save_path )
__lowerCamelCase: Tuple = 0
__lowerCamelCase: Tuple = defaultdict(lowerCamelCase_ )
__lowerCamelCase: List[str] = self.config.model_type
__lowerCamelCase: Tuple = self.config.tgt_vocab_size if self.model_type == """fsmt""" else self.config.vocab_size
__lowerCamelCase: Union[str, Any] = {
"""data_dir""": self.hparams.data_dir,
"""max_source_length""": self.hparams.max_source_length,
"""prefix""": self.model.config.prefix or """""",
}
__lowerCamelCase: List[Any] = {
"""train""": self.hparams.n_train,
"""val""": self.hparams.n_val,
"""test""": self.hparams.n_test,
}
__lowerCamelCase: Union[str, Any] = {k: v if v >= 0 else None for k, v in n_observations_per_split.items()}
__lowerCamelCase: Optional[Any] = {
"""train""": self.hparams.max_target_length,
"""val""": self.hparams.val_max_target_length,
"""test""": self.hparams.test_max_target_length,
}
assert self.target_lens["train"] <= self.target_lens["val"], F'target_lens: {self.target_lens}'
assert self.target_lens["train"] <= self.target_lens["test"], F'target_lens: {self.target_lens}'
if self.hparams.freeze_embeds:
freeze_embeds(self.model )
if self.hparams.freeze_encoder:
freeze_params(self.model.get_encoder() )
assert_all_frozen(self.model.get_encoder() )
__lowerCamelCase: Optional[int] = get_git_info()["""repo_sha"""]
__lowerCamelCase: Tuple = hparams.num_workers
__lowerCamelCase: Union[str, Any] = None # default to config
if self.model.config.decoder_start_token_id is None and isinstance(self.tokenizer , lowerCamelCase_ ):
__lowerCamelCase: Union[str, Any] = self.tokenizer.lang_code_to_id[hparams.tgt_lang]
__lowerCamelCase: int = self.decoder_start_token_id
__lowerCamelCase: Any = (
SeqaSeqDataset if hasattr(self.tokenizer , """prepare_seq2seq_batch""" ) else LegacySeqaSeqDataset
)
__lowerCamelCase: Any = False
__lowerCamelCase: Union[str, Any] = self.model.config.num_beams if self.hparams.eval_beams is None else self.hparams.eval_beams
if self.hparams.eval_max_gen_length is not None:
__lowerCamelCase: Optional[Any] = self.hparams.eval_max_gen_length
else:
__lowerCamelCase: List[str] = self.model.config.max_length
__lowerCamelCase: List[str] = self.default_val_metric if self.hparams.val_metric is None else self.hparams.val_metric
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[int] ):
__lowerCamelCase: Union[str, Any] = {
k: self.tokenizer.batch_decode(v.tolist() ) if """mask""" not in k else v.shape for k, v in batch.items()
}
save_json(lowerCamelCase_ , Path(self.output_dir ) / """text_batch.json""" )
save_json({k: v.tolist() for k, v in batch.items()} , Path(self.output_dir ) / """tok_batch.json""" )
__lowerCamelCase: List[str] = True
return readable_batch
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : Any , **SCREAMING_SNAKE_CASE_ : List[Any] ):
return self.model(lowerCamelCase_ , **lowerCamelCase_ )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , SCREAMING_SNAKE_CASE_ : Tuple ):
__lowerCamelCase: Tuple = self.tokenizer.batch_decode(
lowerCamelCase_ , skip_special_tokens=lowerCamelCase_ , clean_up_tokenization_spaces=lowerCamelCase_ )
return lmap(str.strip , lowerCamelCase_ )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] ):
__lowerCamelCase: str = self.tokenizer.pad_token_id
__lowerCamelCase , __lowerCamelCase: Tuple = batch["""input_ids"""], batch["""attention_mask"""]
__lowerCamelCase: str = batch["""labels"""]
if isinstance(self.model , lowerCamelCase_ ):
__lowerCamelCase: str = self.model._shift_right(lowerCamelCase_ )
else:
__lowerCamelCase: int = shift_tokens_right(lowerCamelCase_ , lowerCamelCase_ )
if not self.already_saved_batch: # This would be slightly better if it only happened on rank zero
__lowerCamelCase: Optional[int] = decoder_input_ids
self.save_readable_batch(lowerCamelCase_ )
__lowerCamelCase: str = self(lowerCamelCase_ , attention_mask=lowerCamelCase_ , decoder_input_ids=lowerCamelCase_ , use_cache=lowerCamelCase_ )
__lowerCamelCase: Tuple = outputs["""logits"""]
if self.hparams.label_smoothing == 0:
# Same behavior as modeling_bart.py, besides ignoring pad_token_id
__lowerCamelCase: Tuple = nn.CrossEntropyLoss(ignore_index=lowerCamelCase_ )
assert lm_logits.shape[-1] == self.vocab_size
__lowerCamelCase: List[Any] = ce_loss_fct(lm_logits.view(-1 , lm_logits.shape[-1] ) , tgt_ids.view(-1 ) )
else:
__lowerCamelCase: Optional[int] = nn.functional.log_softmax(lowerCamelCase_ , dim=-1 )
__lowerCamelCase , __lowerCamelCase: Optional[int] = label_smoothed_nll_loss(
lowerCamelCase_ , lowerCamelCase_ , self.hparams.label_smoothing , ignore_index=lowerCamelCase_ )
return (loss,)
@property
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
return self.tokenizer.pad_token_id
def SCREAMING_SNAKE_CASE__ ( self : str , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[int] ):
__lowerCamelCase: int = self._step(lowerCamelCase_ )
__lowerCamelCase: Optional[int] = dict(zip(self.loss_names , lowerCamelCase_ ) )
# tokens per batch
__lowerCamelCase: str = batch["""input_ids"""].ne(self.pad ).sum() + batch["""labels"""].ne(self.pad ).sum()
__lowerCamelCase: Any = batch["""input_ids"""].shape[0]
__lowerCamelCase: Any = batch["""input_ids"""].eq(self.pad ).sum()
__lowerCamelCase: Optional[Any] = batch["""input_ids"""].eq(self.pad ).float().mean()
# TODO(SS): make a wandb summary metric for this
return {"loss": loss_tensors[0], "log": logs}
def SCREAMING_SNAKE_CASE__ ( self : Tuple , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : List[Any] ):
return self._generative_step(lowerCamelCase_ )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Any="val" ):
self.step_count += 1
__lowerCamelCase: List[Any] = {k: torch.stack([x[k] for x in outputs] ).mean() for k in self.loss_names}
__lowerCamelCase: List[Any] = losses["""loss"""]
__lowerCamelCase: List[str] = {
k: np.array([x[k] for x in outputs] ).mean() for k in self.metric_names + ["""gen_time""", """gen_len"""]
}
__lowerCamelCase: str = (
generative_metrics[self.val_metric] if self.val_metric in generative_metrics else losses[self.val_metric]
)
__lowerCamelCase: Optional[int] = torch.tensor(lowerCamelCase_ ).type_as(lowerCamelCase_ )
generative_metrics.update({k: v.item() for k, v in losses.items()} )
losses.update(lowerCamelCase_ )
__lowerCamelCase: Dict = {F'{prefix}_avg_{k}': x for k, x in losses.items()}
__lowerCamelCase: Tuple = self.step_count
self.metrics[prefix].append(lowerCamelCase_ ) # callback writes this to self.metrics_save_path
__lowerCamelCase: List[Any] = flatten_list([x["""preds"""] for x in outputs] )
return {
"log": all_metrics,
"preds": preds,
F'{prefix}_loss': loss,
F'{prefix}_{self.val_metric}': metric_tensor,
}
def SCREAMING_SNAKE_CASE__ ( self : Dict , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : List[str] ):
return calculate_rouge(lowerCamelCase_ , lowerCamelCase_ )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] ):
__lowerCamelCase: Tuple = time.time()
# parser.add_argument('--eval_max_gen_length', type=int, default=None, help='never generate more than n tokens')
__lowerCamelCase: Optional[Any] = self.model.generate(
batch["""input_ids"""] , attention_mask=batch["""attention_mask"""] , use_cache=lowerCamelCase_ , decoder_start_token_id=self.decoder_start_token_id , num_beams=self.eval_beams , max_length=self.eval_max_length , )
__lowerCamelCase: Tuple = (time.time() - ta) / batch["""input_ids"""].shape[0]
__lowerCamelCase: int = self.ids_to_clean_text(lowerCamelCase_ )
__lowerCamelCase: Union[str, Any] = self.ids_to_clean_text(batch["""labels"""] )
__lowerCamelCase: List[Any] = self._step(lowerCamelCase_ )
__lowerCamelCase: Optional[Any] = dict(zip(self.loss_names , lowerCamelCase_ ) )
__lowerCamelCase: Any = self.calc_generative_metrics(lowerCamelCase_ , lowerCamelCase_ )
__lowerCamelCase: Union[str, Any] = np.mean(lmap(lowerCamelCase_ , lowerCamelCase_ ) )
base_metrics.update(gen_time=lowerCamelCase_ , gen_len=lowerCamelCase_ , preds=lowerCamelCase_ , target=lowerCamelCase_ , **lowerCamelCase_ )
return base_metrics
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Dict ):
return self._generative_step(lowerCamelCase_ )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] ):
return self.validation_epoch_end(lowerCamelCase_ , prefix="""test""" )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] ):
__lowerCamelCase: Optional[Any] = self.n_obs[type_path]
__lowerCamelCase: str = self.target_lens[type_path]
__lowerCamelCase: List[str] = self.dataset_class(
self.tokenizer , type_path=lowerCamelCase_ , n_obs=lowerCamelCase_ , max_target_length=lowerCamelCase_ , **self.dataset_kwargs , )
return dataset
def SCREAMING_SNAKE_CASE__ ( self : Tuple , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : str = False ):
__lowerCamelCase: Optional[Any] = self.get_dataset(lowerCamelCase_ )
if self.hparams.sortish_sampler and type_path != "test" and type_path != "val":
__lowerCamelCase: str = dataset.make_sortish_sampler(lowerCamelCase_ , distributed=self.hparams.gpus > 1 )
return DataLoader(
lowerCamelCase_ , batch_size=lowerCamelCase_ , collate_fn=dataset.collate_fn , shuffle=lowerCamelCase_ , num_workers=self.num_workers , sampler=lowerCamelCase_ , )
elif self.hparams.max_tokens_per_batch is not None and type_path != "test" and type_path != "val":
__lowerCamelCase: Dict = dataset.make_dynamic_sampler(
self.hparams.max_tokens_per_batch , distributed=self.hparams.gpus > 1 )
return DataLoader(
lowerCamelCase_ , batch_sampler=lowerCamelCase_ , collate_fn=dataset.collate_fn , num_workers=self.num_workers , )
else:
return DataLoader(
lowerCamelCase_ , batch_size=lowerCamelCase_ , collate_fn=dataset.collate_fn , shuffle=lowerCamelCase_ , num_workers=self.num_workers , sampler=lowerCamelCase_ , )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
__lowerCamelCase: str = self.get_dataloader("""train""" , batch_size=self.hparams.train_batch_size , shuffle=lowerCamelCase_ )
return dataloader
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
return self.get_dataloader("""val""" , batch_size=self.hparams.eval_batch_size )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
return self.get_dataloader("""test""" , batch_size=self.hparams.eval_batch_size )
@staticmethod
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Dict ):
BaseTransformer.add_model_specific_args(lowerCamelCase_ , lowerCamelCase_ )
add_generic_args(lowerCamelCase_ , lowerCamelCase_ )
parser.add_argument(
"""--max_source_length""" , default=1024 , type=lowerCamelCase_ , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument(
"""--max_target_length""" , default=56 , type=lowerCamelCase_ , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument(
"""--val_max_target_length""" , default=142 , type=lowerCamelCase_ , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument(
"""--test_max_target_length""" , default=142 , type=lowerCamelCase_ , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument("""--freeze_encoder""" , action="""store_true""" )
parser.add_argument("""--freeze_embeds""" , action="""store_true""" )
parser.add_argument("""--sortish_sampler""" , action="""store_true""" , default=lowerCamelCase_ )
parser.add_argument("""--overwrite_output_dir""" , action="""store_true""" , default=lowerCamelCase_ )
parser.add_argument("""--max_tokens_per_batch""" , type=lowerCamelCase_ , default=lowerCamelCase_ )
parser.add_argument("""--logger_name""" , type=lowerCamelCase_ , choices=["""default""", """wandb""", """wandb_shared"""] , default="""default""" )
parser.add_argument("""--n_train""" , type=lowerCamelCase_ , default=-1 , required=lowerCamelCase_ , help="""# examples. -1 means use all.""" )
parser.add_argument("""--n_val""" , type=lowerCamelCase_ , default=500 , required=lowerCamelCase_ , help="""# examples. -1 means use all.""" )
parser.add_argument("""--n_test""" , type=lowerCamelCase_ , default=-1 , required=lowerCamelCase_ , help="""# examples. -1 means use all.""" )
parser.add_argument(
"""--task""" , type=lowerCamelCase_ , default="""summarization""" , required=lowerCamelCase_ , help="""# examples. -1 means use all.""" )
parser.add_argument("""--label_smoothing""" , type=lowerCamelCase_ , default=0.0 , required=lowerCamelCase_ )
parser.add_argument("""--src_lang""" , type=lowerCamelCase_ , default="""""" , required=lowerCamelCase_ )
parser.add_argument("""--tgt_lang""" , type=lowerCamelCase_ , default="""""" , required=lowerCamelCase_ )
parser.add_argument("""--eval_beams""" , type=lowerCamelCase_ , default=lowerCamelCase_ , required=lowerCamelCase_ )
parser.add_argument(
"""--val_metric""" , type=lowerCamelCase_ , default=lowerCamelCase_ , required=lowerCamelCase_ , choices=["""bleu""", """rouge2""", """loss""", None] )
parser.add_argument("""--eval_max_gen_length""" , type=lowerCamelCase_ , default=lowerCamelCase_ , help="""never generate more than n tokens""" )
parser.add_argument("""--save_top_k""" , type=lowerCamelCase_ , default=1 , required=lowerCamelCase_ , help="""How many checkpoints to save""" )
parser.add_argument(
"""--early_stopping_patience""" , type=lowerCamelCase_ , default=-1 , required=lowerCamelCase_ , help=(
"""-1 means never early stop. early_stopping_patience is measured in validation checks, not epochs. So"""
""" val_check_interval will effect it."""
) , )
return parser
class a ( a__ ):
UpperCAmelCase__ : Optional[int] = "translation"
UpperCAmelCase__ : Tuple = ["loss"]
UpperCAmelCase__ : str = ["bleu"]
UpperCAmelCase__ : Optional[int] = "bleu"
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : int , **SCREAMING_SNAKE_CASE_ : Any ):
super().__init__(lowerCamelCase_ , **lowerCamelCase_ )
__lowerCamelCase: Any = hparams.src_lang
__lowerCamelCase: Union[str, Any] = hparams.tgt_lang
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Tuple ):
return calculate_bleu(lowerCamelCase_ , lowerCamelCase_ )
def __lowerCAmelCase ( snake_case : Dict , snake_case : Union[str, Any]=None ) -> SummarizationModule:
Path(args.output_dir ).mkdir(exist_ok=snake_case )
check_output_dir(snake_case , expected_items=3 )
if model is None:
if "summarization" in args.task:
__lowerCamelCase: Optional[int] = SummarizationModule(snake_case )
else:
__lowerCamelCase: int = TranslationModule(snake_case )
__lowerCamelCase: Optional[Any] = Path(args.data_dir ).name
if (
args.logger_name == "default"
or args.fast_dev_run
or str(args.output_dir ).startswith("""/tmp""" )
or str(args.output_dir ).startswith("""/var""" )
):
__lowerCamelCase: Any = True # don't pollute wandb logs unnecessarily
elif args.logger_name == "wandb":
from pytorch_lightning.loggers import WandbLogger
__lowerCamelCase: List[str] = os.environ.get("""WANDB_PROJECT""" , snake_case )
__lowerCamelCase: Any = WandbLogger(name=model.output_dir.name , project=snake_case )
elif args.logger_name == "wandb_shared":
from pytorch_lightning.loggers import WandbLogger
__lowerCamelCase: Tuple = WandbLogger(name=model.output_dir.name , project=f'hf_{dataset}' )
if args.early_stopping_patience >= 0:
__lowerCamelCase: str = get_early_stopping_callback(model.val_metric , args.early_stopping_patience )
else:
__lowerCamelCase: Optional[Any] = False
__lowerCamelCase: Union[str, Any] = args.val_metric == """loss"""
__lowerCamelCase: Optional[int] = generic_train(
snake_case , snake_case , logging_callback=SeqaSeqLoggingCallback() , checkpoint_callback=get_checkpoint_callback(
args.output_dir , model.val_metric , args.save_top_k , snake_case ) , early_stopping_callback=snake_case , logger=snake_case , )
pickle_save(model.hparams , model.output_dir / """hparams.pkl""" )
if not args.do_predict:
return model
__lowerCamelCase: str = """"""
__lowerCamelCase: Tuple = sorted(glob.glob(os.path.join(args.output_dir , """*.ckpt""" ) , recursive=snake_case ) )
if checkpoints:
__lowerCamelCase: Optional[Any] = checkpoints[-1]
__lowerCamelCase: Optional[Any] = checkpoints[-1]
trainer.logger.log_hyperparams(model.hparams )
# test() without a model tests using the best checkpoint automatically
trainer.test()
return model
if __name__ == "__main__":
_A : List[str] = argparse.ArgumentParser()
_A : List[Any] = pl.Trainer.add_argparse_args(parser)
_A : str = SummarizationModule.add_model_specific_args(parser, os.getcwd())
_A : Optional[Any] = parser.parse_args()
main(args)
| 710 |
from __future__ import annotations
from typing import Any
class a :
def __init__( self : str , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : float = 0 ):
__lowerCamelCase , __lowerCamelCase: Dict = row, column
__lowerCamelCase: Optional[Any] = [[default_value for c in range(SCREAMING_SNAKE_CASE_ )] for r in range(SCREAMING_SNAKE_CASE_ )]
def __str__( self : Union[str, Any] ):
__lowerCamelCase: str = F'Matrix consist of {self.row} rows and {self.column} columns\n'
# Make string identifier
__lowerCamelCase: Optional[int] = 0
for row_vector in self.array:
for obj in row_vector:
__lowerCamelCase: int = max(SCREAMING_SNAKE_CASE_ , len(str(SCREAMING_SNAKE_CASE_ ) ) )
__lowerCamelCase: Union[str, Any] = F'%{max_element_length}s'
# Make string and return
def single_line(SCREAMING_SNAKE_CASE_ : list[float] ) -> str:
nonlocal string_format_identifier
__lowerCamelCase: Any = """["""
line += ", ".join(string_format_identifier % (obj,) for obj in row_vector )
line += "]"
return line
s += "\n".join(single_line(SCREAMING_SNAKE_CASE_ ) for row_vector in self.array )
return s
def __repr__( self : Union[str, Any] ):
return str(self )
def SCREAMING_SNAKE_CASE__ ( self : Any , SCREAMING_SNAKE_CASE_ : tuple[int, int] ):
if not (isinstance(SCREAMING_SNAKE_CASE_ , (list, tuple) ) and len(SCREAMING_SNAKE_CASE_ ) == 2):
return False
elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column):
return False
else:
return True
def __getitem__( self : Optional[int] , SCREAMING_SNAKE_CASE_ : tuple[int, int] ):
assert self.validate_indicies(SCREAMING_SNAKE_CASE_ )
return self.array[loc[0]][loc[1]]
def __setitem__( self : Any , SCREAMING_SNAKE_CASE_ : tuple[int, int] , SCREAMING_SNAKE_CASE_ : float ):
assert self.validate_indicies(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase: List[Any] = value
def __add__( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Matrix ):
assert isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
assert self.row == another.row and self.column == another.column
# Add
__lowerCamelCase: Any = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
__lowerCamelCase: Tuple = self[r, c] + another[r, c]
return result
def __neg__( self : Any ):
__lowerCamelCase: List[str] = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
__lowerCamelCase: int = -self[r, c]
return result
def __sub__( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Matrix ):
return self + (-another)
def __mul__( self : Tuple , SCREAMING_SNAKE_CASE_ : int | float | Matrix ):
if isinstance(SCREAMING_SNAKE_CASE_ , (int, float) ): # Scalar multiplication
__lowerCamelCase: List[str] = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
__lowerCamelCase: List[Any] = self[r, c] * another
return result
elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): # Matrix multiplication
assert self.column == another.row
__lowerCamelCase: Optional[int] = Matrix(self.row , another.column )
for r in range(self.row ):
for c in range(another.column ):
for i in range(self.column ):
result[r, c] += self[r, i] * another[i, c]
return result
else:
__lowerCamelCase: int = F'Unsupported type given for another ({type(SCREAMING_SNAKE_CASE_ )})'
raise TypeError(SCREAMING_SNAKE_CASE_ )
def SCREAMING_SNAKE_CASE__ ( self : Any ):
__lowerCamelCase: int = Matrix(self.column , self.row )
for r in range(self.row ):
for c in range(self.column ):
__lowerCamelCase: List[str] = self[r, c]
return result
def SCREAMING_SNAKE_CASE__ ( self : int , SCREAMING_SNAKE_CASE_ : Matrix , SCREAMING_SNAKE_CASE_ : Matrix ):
assert isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) and isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
assert self.row == self.column == u.row == v.row # u, v should be column vector
assert u.column == v.column == 1 # u, v should be column vector
# Calculate
__lowerCamelCase: Optional[Any] = v.transpose()
__lowerCamelCase: Any = (v_t * self * u)[0, 0] + 1
if numerator_factor == 0:
return None # It's not invertable
return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor))
# Testing
if __name__ == "__main__":
def __lowerCAmelCase ( ) -> None:
# a^(-1)
__lowerCamelCase: int = Matrix(3 , 3 , 0 )
for i in range(3 ):
__lowerCamelCase: List[Any] = 1
print(f'a^(-1) is {ainv}' )
# u, v
__lowerCamelCase: List[str] = Matrix(3 , 1 , 0 )
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase: Optional[Any] = 1, 2, -3
__lowerCamelCase: Dict = Matrix(3 , 1 , 0 )
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase: Union[str, Any] = 4, -2, 5
print(f'u is {u}' )
print(f'v is {v}' )
print(f'uv^T is {u * v.transpose()}' )
# Sherman Morrison
print(f'(a + uv^T)^(-1) is {ainv.sherman_morrison(snake_case , snake_case )}' )
def __lowerCAmelCase ( ) -> None:
import doctest
doctest.testmod()
testa()
| 189 | 0 |
"""simple docstring"""
from typing import Dict, List, Optional, Tuple, Union
import torch
from ...models import AutoencoderKL, TransformeraDModel
from ...schedulers import KarrasDiffusionSchedulers
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class _lowerCAmelCase ( snake_case_ ):
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = None , ) -> int:
super().__init__()
self.register_modules(transformer=lowerCAmelCase_ , vae=lowerCAmelCase_ , scheduler=lowerCAmelCase_ )
# create a imagenet -> id dictionary for easier use
_SCREAMING_SNAKE_CASE : List[Any] = {}
if idalabel is not None:
for key, value in idalabel.items():
for label in value.split(',' ):
_SCREAMING_SNAKE_CASE : Any = int(lowerCAmelCase_ )
_SCREAMING_SNAKE_CASE : str = dict(sorted(self.labels.items() ) )
def A ( self , lowerCAmelCase_ ) -> List[int]:
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
_SCREAMING_SNAKE_CASE : List[str] = list(lowerCAmelCase_ )
for l in label:
if l not in self.labels:
raise ValueError(
F"""{l} does not exist. Please make sure to select one of the following labels: \n {self.labels}.""" )
return [self.labels[l] for l in label]
@torch.no_grad()
def __call__( self , lowerCAmelCase_ , lowerCAmelCase_ = 4.0 , lowerCAmelCase_ = None , lowerCAmelCase_ = 5_0 , lowerCAmelCase_ = "pil" , lowerCAmelCase_ = True , ) -> Union[ImagePipelineOutput, Tuple]:
_SCREAMING_SNAKE_CASE : int = len(lowerCAmelCase_ )
_SCREAMING_SNAKE_CASE : Optional[Any] = self.transformer.config.sample_size
_SCREAMING_SNAKE_CASE : List[Any] = self.transformer.config.in_channels
_SCREAMING_SNAKE_CASE : List[Any] = randn_tensor(
shape=(batch_size, latent_channels, latent_size, latent_size) , generator=lowerCAmelCase_ , device=self.device , dtype=self.transformer.dtype , )
_SCREAMING_SNAKE_CASE : List[str] = torch.cat([latents] * 2 ) if guidance_scale > 1 else latents
_SCREAMING_SNAKE_CASE : Tuple = torch.tensor(lowerCAmelCase_ , device=self.device ).reshape(-1 )
_SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor([1_0_0_0] * batch_size , device=self.device )
_SCREAMING_SNAKE_CASE : List[Any] = torch.cat([class_labels, class_null] , 0 ) if guidance_scale > 1 else class_labels
# set step values
self.scheduler.set_timesteps(lowerCAmelCase_ )
for t in self.progress_bar(self.scheduler.timesteps ):
if guidance_scale > 1:
_SCREAMING_SNAKE_CASE : Union[str, Any] = latent_model_input[: len(lowerCAmelCase_ ) // 2]
_SCREAMING_SNAKE_CASE : Union[str, Any] = torch.cat([half, half] , dim=0 )
_SCREAMING_SNAKE_CASE : Union[str, Any] = self.scheduler.scale_model_input(lowerCAmelCase_ , lowerCAmelCase_ )
_SCREAMING_SNAKE_CASE : List[str] = t
if not torch.is_tensor(lowerCAmelCase_ ):
# TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can
# This would be a good case for the `match` statement (Python 3.10+)
_SCREAMING_SNAKE_CASE : Optional[int] = latent_model_input.device.type == """mps"""
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
_SCREAMING_SNAKE_CASE : Tuple = torch.floataa if is_mps else torch.floataa
else:
_SCREAMING_SNAKE_CASE : int = torch.intaa if is_mps else torch.intaa
_SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor([timesteps] , dtype=lowerCAmelCase_ , device=latent_model_input.device )
elif len(timesteps.shape ) == 0:
_SCREAMING_SNAKE_CASE : List[str] = timesteps[None].to(latent_model_input.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
_SCREAMING_SNAKE_CASE : Tuple = timesteps.expand(latent_model_input.shape[0] )
# predict noise model_output
_SCREAMING_SNAKE_CASE : List[Any] = self.transformer(
lowerCAmelCase_ , timestep=lowerCAmelCase_ , class_labels=lowerCAmelCase_ ).sample
# perform guidance
if guidance_scale > 1:
_SCREAMING_SNAKE_CASE : Dict = noise_pred[:, :latent_channels], noise_pred[:, latent_channels:]
_SCREAMING_SNAKE_CASE : Tuple = torch.split(lowerCAmelCase_ , len(lowerCAmelCase_ ) // 2 , dim=0 )
_SCREAMING_SNAKE_CASE : Tuple = uncond_eps + guidance_scale * (cond_eps - uncond_eps)
_SCREAMING_SNAKE_CASE : Dict = torch.cat([half_eps, half_eps] , dim=0 )
_SCREAMING_SNAKE_CASE : Tuple = torch.cat([eps, rest] , dim=1 )
# learned sigma
if self.transformer.config.out_channels // 2 == latent_channels:
_SCREAMING_SNAKE_CASE : Union[str, Any] = torch.split(lowerCAmelCase_ , lowerCAmelCase_ , dim=1 )
else:
_SCREAMING_SNAKE_CASE : Any = noise_pred
# compute previous image: x_t -> x_t-1
_SCREAMING_SNAKE_CASE : Any = self.scheduler.step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ).prev_sample
if guidance_scale > 1:
_SCREAMING_SNAKE_CASE : List[str] = latent_model_input.chunk(2 , dim=0 )
else:
_SCREAMING_SNAKE_CASE : Union[str, Any] = latent_model_input
_SCREAMING_SNAKE_CASE : Union[str, Any] = 1 / self.vae.config.scaling_factor * latents
_SCREAMING_SNAKE_CASE : List[str] = self.vae.decode(lowerCAmelCase_ ).sample
_SCREAMING_SNAKE_CASE : str = (samples / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
_SCREAMING_SNAKE_CASE : str = samples.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
_SCREAMING_SNAKE_CASE : str = self.numpy_to_pil(lowerCAmelCase_ )
if not return_dict:
return (samples,)
return ImagePipelineOutput(images=lowerCAmelCase_ )
| 621 |
'''simple docstring'''
import math
def _SCREAMING_SNAKE_CASE ( lowerCamelCase__ : int ):
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(lowerCamelCase__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def _SCREAMING_SNAKE_CASE ( lowerCamelCase__ : float = 0.1 ):
'''simple docstring'''
A: Union[str, Any] = 3
A: Tuple = 3
while primes / (2 * j - 1) >= ratio:
for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1 ):
primes += is_prime(lowerCamelCase__ )
j += 2
return j
if __name__ == "__main__":
import doctest
doctest.testmod()
| 135 | 0 |
'''simple docstring'''
from datetime import datetime
import requests
def snake_case_ (_a : str ):
UpperCAmelCase = '''https://downloadgram.net/wp-json/wppress/video-downloader/video?url='''
UpperCAmelCase = requests.get(base_url + url ).json()[0]['''urls'''][0]['''src''']
return requests.get(_a ).content
if __name__ == "__main__":
A =input('Enter Video/IGTV url: ').strip()
A =f"""{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4"""
with open(file_name, 'wb') as fp:
fp.write(download_video(url))
print(f"""Done. Video saved to disk as {file_name}.""")
| 703 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A =logging.get_logger(__name__)
A ={
'alibaba-damo/mgp-str-base': 'https://huggingface.co/alibaba-damo/mgp-str-base/resolve/main/config.json',
}
class _a ( __a ):
__a : List[Any] = """mgp-str"""
def __init__( self : str , lowercase : str=[32, 128] , lowercase : Optional[Any]=4 , lowercase : Optional[Any]=3 , lowercase : Dict=27 , lowercase : Any=38 , lowercase : int=50_257 , lowercase : List[str]=30_522 , lowercase : Optional[int]=768 , lowercase : List[Any]=12 , lowercase : Tuple=12 , lowercase : Optional[int]=4.0 , lowercase : Union[str, Any]=True , lowercase : str=False , lowercase : str=1E-5 , lowercase : Dict=0.0 , lowercase : Dict=0.0 , lowercase : Tuple=0.0 , lowercase : str=False , lowercase : Optional[Any]=0.02 , **lowercase : List[str] , ):
'''simple docstring'''
super().__init__(**lowercase )
UpperCAmelCase = image_size
UpperCAmelCase = patch_size
UpperCAmelCase = num_channels
UpperCAmelCase = max_token_length
UpperCAmelCase = num_character_labels
UpperCAmelCase = num_bpe_labels
UpperCAmelCase = num_wordpiece_labels
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = mlp_ratio
UpperCAmelCase = distilled
UpperCAmelCase = layer_norm_eps
UpperCAmelCase = drop_rate
UpperCAmelCase = qkv_bias
UpperCAmelCase = attn_drop_rate
UpperCAmelCase = drop_path_rate
UpperCAmelCase = output_aa_attentions
UpperCAmelCase = initializer_range
| 358 | 0 |
import argparse
import os
import numpy as np
import tensorflow as tf
import torch
from transformers import BertModel
def _snake_case (__lowercase , __lowercase , __lowercase):
UpperCamelCase_ = ('dense.weight', 'attention.self.query', 'attention.self.key', 'attention.self.value')
UpperCamelCase_ = (
('layer.', 'layer_'),
('word_embeddings.weight', 'word_embeddings'),
('position_embeddings.weight', 'position_embeddings'),
('token_type_embeddings.weight', 'token_type_embeddings'),
('.', '/'),
('LayerNorm/weight', 'LayerNorm/gamma'),
('LayerNorm/bias', 'LayerNorm/beta'),
('weight', 'kernel'),
)
if not os.path.isdir(__lowercase):
os.makedirs(__lowercase)
UpperCamelCase_ = model.state_dict()
def to_tf_var_name(__lowercase):
for patt, repl in iter(__lowercase):
UpperCamelCase_ = name.replace(__lowercase , __lowercase)
return f"""bert/{name}"""
def create_tf_var(__lowercase , __lowercase , __lowercase):
UpperCamelCase_ = tf.dtypes.as_dtype(tensor.dtype)
UpperCamelCase_ = tf.get_variable(dtype=__lowercase , shape=tensor.shape , name=__lowercase , initializer=tf.zeros_initializer())
session.run(tf.variables_initializer([tf_var]))
session.run(__lowercase)
return tf_var
tf.reset_default_graph()
with tf.Session() as session:
for var_name in state_dict:
UpperCamelCase_ = to_tf_var_name(__lowercase)
UpperCamelCase_ = state_dict[var_name].numpy()
if any(x in var_name for x in tensors_to_transpose):
UpperCamelCase_ = torch_tensor.T
UpperCamelCase_ = create_tf_var(tensor=__lowercase , name=__lowercase , session=__lowercase)
tf.keras.backend.set_value(__lowercase , __lowercase)
UpperCamelCase_ = session.run(__lowercase)
print(f"""Successfully created {tf_name}: {np.allclose(__lowercase , __lowercase)}""")
UpperCamelCase_ = tf.train.Saver(tf.trainable_variables())
saver.save(__lowercase , os.path.join(__lowercase , model_name.replace('-' , '_') + '.ckpt'))
def _snake_case (__lowercase=None):
UpperCamelCase_ = argparse.ArgumentParser()
parser.add_argument('--model_name' , type=__lowercase , required=__lowercase , help='model name e.g. bert-base-uncased')
parser.add_argument(
'--cache_dir' , type=__lowercase , default=__lowercase , required=__lowercase , help='Directory containing pytorch model')
parser.add_argument('--pytorch_model_path' , type=__lowercase , required=__lowercase , help='/path/to/<pytorch-model-name>.bin')
parser.add_argument('--tf_cache_dir' , type=__lowercase , required=__lowercase , help='Directory in which to save tensorflow model')
UpperCamelCase_ = parser.parse_args(__lowercase)
UpperCamelCase_ = BertModel.from_pretrained(
pretrained_model_name_or_path=args.model_name , state_dict=torch.load(args.pytorch_model_path) , cache_dir=args.cache_dir , )
convert_pytorch_checkpoint_to_tf(model=__lowercase , ckpt_dir=args.tf_cache_dir , model_name=args.model_name)
if __name__ == "__main__":
main()
| 23 |
import random
import unittest
import numpy as np
import transformers
from transformers import is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax
if is_flax_available():
import os
import jax.numpy as jnp
from jax import jit
from transformers import AutoTokenizer, FlaxAutoModelForCausalLM
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
__A = "0.12" # assumed parallelism: 8
if is_torch_available():
import torch
def lowercase__ ( A_: int , A_: Optional[Any] , A_: List[str]=None ) -> List[str]:
"""simple docstring"""
if rng is None:
__UpperCAmelCase =random.Random()
__UpperCAmelCase =1
for dim in shape:
total_dims *= dim
__UpperCAmelCase =[]
for _ in range(A_ ):
values.append(rng.randint(0 , vocab_size - 1 ) )
__UpperCAmelCase =np.array(A_ , dtype=jnp.intaa ).reshape(A_ )
return output
def lowercase__ ( A_: List[str] , A_: List[str]=None ) -> Any:
"""simple docstring"""
__UpperCAmelCase =ids_tensor(A_ , vocab_size=2 , rng=A_ )
# make sure that at least one token is attended to for each batch
__UpperCAmelCase =1
return attn_mask
@require_flax
class _A :
"""simple docstring"""
lowerCamelCase : Optional[Any] = None
lowerCamelCase : int = ()
def _a ( self : str ) -> Tuple:
__UpperCAmelCase , __UpperCAmelCase =self.model_tester.prepare_config_and_inputs_for_common()
# cut to half length & take max batch_size 3
__UpperCAmelCase =2
__UpperCAmelCase =inputs["""input_ids"""].shape[-1] // 2
__UpperCAmelCase =inputs["""input_ids"""][:max_batch_size, :sequence_length]
__UpperCAmelCase =jnp.ones_like(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =attention_mask[:max_batch_size, :sequence_length]
# generate max 5 tokens
__UpperCAmelCase =input_ids.shape[-1] + 5
if config.eos_token_id is not None and config.pad_token_id is None:
# hack to allow generate for models such as GPT2 as is done in `generate()`
__UpperCAmelCase =config.eos_token_id
return config, input_ids, attention_mask, max_length
@is_pt_flax_cross_test
def _a ( self : Union[str, Any] ) -> Optional[int]:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =self._get_input_ids_and_config()
__UpperCAmelCase =False
__UpperCAmelCase =max_length
__UpperCAmelCase =0
for model_class in self.all_generative_model_classes:
__UpperCAmelCase =model_class(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =model_class.__name__[4:] # Skip the "Flax" at the beginning
__UpperCAmelCase =getattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__UpperCAmelCase =pt_model_class(__SCREAMING_SNAKE_CASE ).eval()
__UpperCAmelCase =load_flax_weights_in_pytorch_model(__SCREAMING_SNAKE_CASE , flax_model.params )
__UpperCAmelCase =flax_model.generate(__SCREAMING_SNAKE_CASE ).sequences
__UpperCAmelCase =pt_model.generate(torch.tensor(__SCREAMING_SNAKE_CASE , dtype=torch.long ) )
if flax_generation_outputs.shape[-1] > pt_generation_outputs.shape[-1]:
__UpperCAmelCase =flax_generation_outputs[:, : pt_generation_outputs.shape[-1]]
self.assertListEqual(pt_generation_outputs.numpy().tolist() , flax_generation_outputs.tolist() )
def _a ( self : Optional[int] ) -> Dict:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =self._get_input_ids_and_config()
__UpperCAmelCase =False
__UpperCAmelCase =max_length
for model_class in self.all_generative_model_classes:
__UpperCAmelCase =model_class(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =model.generate(__SCREAMING_SNAKE_CASE ).sequences
self.assertEqual(generation_outputs.shape[-1] , __SCREAMING_SNAKE_CASE )
__UpperCAmelCase =jit(model.generate )
__UpperCAmelCase =jit_generate(__SCREAMING_SNAKE_CASE ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def _a ( self : Union[str, Any] ) -> List[str]:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =self._get_input_ids_and_config()
__UpperCAmelCase =True
__UpperCAmelCase =max_length
for model_class in self.all_generative_model_classes:
__UpperCAmelCase =model_class(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =model.generate(__SCREAMING_SNAKE_CASE ).sequences
self.assertEqual(generation_outputs.shape[-1] , __SCREAMING_SNAKE_CASE )
__UpperCAmelCase =jit(model.generate )
__UpperCAmelCase =jit_generate(__SCREAMING_SNAKE_CASE ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def _a ( self : List[Any] ) -> Any:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =self._get_input_ids_and_config()
__UpperCAmelCase =False
__UpperCAmelCase =max_length
__UpperCAmelCase =2
for model_class in self.all_generative_model_classes:
__UpperCAmelCase =model_class(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =model.generate(__SCREAMING_SNAKE_CASE ).sequences
self.assertEqual(generation_outputs.shape[-1] , __SCREAMING_SNAKE_CASE )
__UpperCAmelCase =jit(model.generate )
__UpperCAmelCase =jit_generate(__SCREAMING_SNAKE_CASE ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def _a ( self : Any ) -> Tuple:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =self._get_input_ids_and_config()
__UpperCAmelCase =False
__UpperCAmelCase =max_length
__UpperCAmelCase =2
__UpperCAmelCase =2
for model_class in self.all_generative_model_classes:
__UpperCAmelCase =model_class(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =model.generate(__SCREAMING_SNAKE_CASE ).sequences
self.assertEqual(generation_outputs.shape[0] , input_ids.shape[0] * config.num_return_sequences )
def _a ( self : Union[str, Any] ) -> List[Any]:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =self._get_input_ids_and_config()
__UpperCAmelCase =True
__UpperCAmelCase =max_length
__UpperCAmelCase =0.8
__UpperCAmelCase =10
__UpperCAmelCase =0.3
__UpperCAmelCase =1
__UpperCAmelCase =8
__UpperCAmelCase =9
for model_class in self.all_generative_model_classes:
__UpperCAmelCase =model_class(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =model.generate(__SCREAMING_SNAKE_CASE ).sequences
self.assertEqual(generation_outputs.shape[-1] , __SCREAMING_SNAKE_CASE )
__UpperCAmelCase =jit(model.generate )
__UpperCAmelCase =jit_generate(__SCREAMING_SNAKE_CASE ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def _a ( self : Union[str, Any] ) -> Optional[Any]:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =self._get_input_ids_and_config()
__UpperCAmelCase =max_length
__UpperCAmelCase =1
__UpperCAmelCase =8
__UpperCAmelCase =9
for model_class in self.all_generative_model_classes:
__UpperCAmelCase =model_class(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =model.generate(__SCREAMING_SNAKE_CASE ).sequences
self.assertEqual(generation_outputs.shape[-1] , __SCREAMING_SNAKE_CASE )
__UpperCAmelCase =jit(model.generate )
__UpperCAmelCase =jit_generate(__SCREAMING_SNAKE_CASE ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def _a ( self : Optional[int] ) -> Any:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =self._get_input_ids_and_config()
__UpperCAmelCase =max_length
__UpperCAmelCase =2
__UpperCAmelCase =1
__UpperCAmelCase =8
__UpperCAmelCase =9
for model_class in self.all_generative_model_classes:
__UpperCAmelCase =model_class(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =model.generate(__SCREAMING_SNAKE_CASE ).sequences
self.assertEqual(generation_outputs.shape[-1] , __SCREAMING_SNAKE_CASE )
__UpperCAmelCase =jit(model.generate )
__UpperCAmelCase =jit_generate(__SCREAMING_SNAKE_CASE ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def _a ( self : List[str] ) -> Dict:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =self._get_input_ids_and_config()
# pad attention mask on the left
__UpperCAmelCase =attention_mask.at[(0, 0)].set(0 )
__UpperCAmelCase =False
__UpperCAmelCase =max_length
for model_class in self.all_generative_model_classes:
__UpperCAmelCase =model_class(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =model.generate(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE ).sequences
self.assertEqual(generation_outputs.shape[-1] , __SCREAMING_SNAKE_CASE )
__UpperCAmelCase =jit(model.generate )
__UpperCAmelCase =jit_generate(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def _a ( self : Dict ) -> Tuple:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =self._get_input_ids_and_config()
# pad attention mask on the left
__UpperCAmelCase =attention_mask.at[(0, 0)].set(0 )
__UpperCAmelCase =True
__UpperCAmelCase =max_length
for model_class in self.all_generative_model_classes:
__UpperCAmelCase =model_class(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =model.generate(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE ).sequences
self.assertEqual(generation_outputs.shape[-1] , __SCREAMING_SNAKE_CASE )
__UpperCAmelCase =jit(model.generate )
__UpperCAmelCase =jit_generate(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def _a ( self : Dict ) -> Tuple:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =self._get_input_ids_and_config()
# pad attention mask on the left
__UpperCAmelCase =attention_mask.at[(0, 0)].set(0 )
__UpperCAmelCase =2
__UpperCAmelCase =max_length
for model_class in self.all_generative_model_classes:
__UpperCAmelCase =model_class(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =model.generate(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE ).sequences
self.assertEqual(generation_outputs.shape[-1] , __SCREAMING_SNAKE_CASE )
__UpperCAmelCase =jit(model.generate )
__UpperCAmelCase =jit_generate(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
@require_flax
class _A ( unittest.TestCase ):
"""simple docstring"""
def _a ( self : int ) -> Any:
__UpperCAmelCase =AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-bert""" )
__UpperCAmelCase =FlaxAutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-bert-flax-only""" )
__UpperCAmelCase ="""Hello world"""
__UpperCAmelCase =tokenizer(__SCREAMING_SNAKE_CASE , return_tensors="""np""" ).input_ids
# typos are quickly detected (the correct argument is `do_sample`)
with self.assertRaisesRegex(__SCREAMING_SNAKE_CASE , """do_samples""" ):
model.generate(__SCREAMING_SNAKE_CASE , do_samples=__SCREAMING_SNAKE_CASE )
# arbitrary arguments that will not be used anywhere are also not accepted
with self.assertRaisesRegex(__SCREAMING_SNAKE_CASE , """foo""" ):
__UpperCAmelCase ={"""foo""": """bar"""}
model.generate(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
| 68 | 0 |
"""simple docstring"""
import json
import os
import re
import unicodedata
from json.encoder import INFINITY
from typing import Any, Dict, List, Optional, Tuple, Union
import numpy as np
import regex
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_flax_available, is_tf_available, is_torch_available, logging
from ...utils.generic import _is_jax, _is_numpy
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {
'artists_file': 'artists.json',
'lyrics_file': 'lyrics.json',
'genres_file': 'genres.json',
}
UpperCamelCase__ = {
'artists_file': {
'jukebox': 'https://huggingface.co/ArthurZ/jukebox/blob/main/artists.json',
},
'genres_file': {
'jukebox': 'https://huggingface.co/ArthurZ/jukebox/blob/main/genres.json',
},
'lyrics_file': {
'jukebox': 'https://huggingface.co/ArthurZ/jukebox/blob/main/lyrics.json',
},
}
UpperCamelCase__ = {
'jukebox': 5_12,
}
class a ( lowercase ):
UpperCamelCase : Any = VOCAB_FILES_NAMES
UpperCamelCase : str = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase : str = PRETRAINED_LYRIC_TOKENS_SIZES
UpperCamelCase : List[Any] = ["""input_ids""", """attention_mask"""]
def __init__( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=["v3", "v2", "v2"] , UpperCamelCase_=512 , UpperCamelCase_=5 , UpperCamelCase_="<|endoftext|>" , **UpperCamelCase_ , ):
UpperCAmelCase__ : Any = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else unk_token
super().__init__(
unk_token=UpperCamelCase_ , n_genres=UpperCamelCase_ , version=UpperCamelCase_ , max_n_lyric_tokens=UpperCamelCase_ , **UpperCamelCase_ , )
UpperCAmelCase__ : int = version
UpperCAmelCase__ : Any = max_n_lyric_tokens
UpperCAmelCase__ : Any = n_genres
with open(UpperCamelCase_ , encoding='utf-8' ) as vocab_handle:
UpperCAmelCase__ : Optional[Any] = json.load(UpperCamelCase_ )
with open(UpperCamelCase_ , encoding='utf-8' ) as vocab_handle:
UpperCAmelCase__ : Dict = json.load(UpperCamelCase_ )
with open(UpperCamelCase_ , encoding='utf-8' ) as vocab_handle:
UpperCAmelCase__ : Tuple = json.load(UpperCamelCase_ )
UpperCAmelCase__ : List[str] = R'[^A-Za-z0-9.,:;!?\-\'\"()\[\] \t\n]+'
# In v2, we had a n_vocab=80 and in v3 we missed + and so n_vocab=79 of characters.
if len(self.lyrics_encoder ) == 79:
UpperCAmelCase__ : Optional[Any] = oov.replace(R'\-\'' , R'\-+\'' )
UpperCAmelCase__ : List[Any] = regex.compile(UpperCamelCase_ )
UpperCAmelCase__ : Optional[Any] = {v: k for k, v in self.artists_encoder.items()}
UpperCAmelCase__ : Dict = {v: k for k, v in self.genres_encoder.items()}
UpperCAmelCase__ : Tuple = {v: k for k, v in self.lyrics_encoder.items()}
@property
def __snake_case ( self ):
return len(self.artists_encoder ) + len(self.genres_encoder ) + len(self.lyrics_encoder )
def __snake_case ( self ):
return dict(self.artists_encoder , self.genres_encoder , self.lyrics_encoder )
def __snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
UpperCAmelCase__ : List[Any] = [self.artists_encoder.get(UpperCamelCase_ , 0 ) for artist in list_artists]
for genres in range(len(UpperCamelCase_ ) ):
UpperCAmelCase__ : int = [self.genres_encoder.get(UpperCamelCase_ , 0 ) for genre in list_genres[genres]]
UpperCAmelCase__ : Tuple = list_genres[genres] + [-1] * (self.n_genres - len(list_genres[genres] ))
UpperCAmelCase__ : str = [[self.lyrics_encoder.get(UpperCamelCase_ , 0 ) for character in list_lyrics[0]], [], []]
return artists_id, list_genres, lyric_ids
def __snake_case ( self , UpperCamelCase_ ):
return list(UpperCamelCase_ )
def __snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ ):
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Tuple = self.prepare_for_tokenization(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
UpperCAmelCase__ : List[str] = self._tokenize(UpperCamelCase_ )
return artist, genre, lyrics
def __snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = False ):
for idx in range(len(self.version ) ):
if self.version[idx] == "v3":
UpperCAmelCase__ : Optional[Any] = artists[idx].lower()
UpperCAmelCase__ : Optional[int] = [genres[idx].lower()]
else:
UpperCAmelCase__ : Optional[int] = self._normalize(artists[idx] ) + '.v2'
UpperCAmelCase__ : List[str] = [
self._normalize(UpperCamelCase_ ) + '.v2' for genre in genres[idx].split('_' )
] # split is for the full dictionary with combined genres
if self.version[0] == "v2":
UpperCAmelCase__ : Tuple = regex.compile(R'[^A-Za-z0-9.,:;!?\-\'\"()\[\] \t\n]+' )
UpperCAmelCase__ : List[Any] = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.,:;!?-+\'\"()[] \t\n'
UpperCAmelCase__ : Dict = {vocab[index]: index + 1 for index in range(len(UpperCamelCase_ ) )}
UpperCAmelCase__ : str = 0
UpperCAmelCase__ : Tuple = len(UpperCamelCase_ ) + 1
UpperCAmelCase__ : Optional[int] = self.vocab
UpperCAmelCase__ : Optional[Any] = {v: k for k, v in self.vocab.items()}
UpperCAmelCase__ : Any = ''
else:
UpperCAmelCase__ : Tuple = regex.compile(R'[^A-Za-z0-9.,:;!?\-+\'\"()\[\] \t\n]+' )
UpperCAmelCase__ : List[Any] = self._run_strip_accents(UpperCamelCase_ )
UpperCAmelCase__ : str = lyrics.replace('\\' , '\n' )
UpperCAmelCase__ : Union[str, Any] = self.out_of_vocab.sub('' , UpperCamelCase_ ), [], []
return artists, genres, lyrics
def __snake_case ( self , UpperCamelCase_ ):
UpperCAmelCase__ : List[str] = unicodedata.normalize('NFD' , UpperCamelCase_ )
UpperCAmelCase__ : List[str] = []
for char in text:
UpperCAmelCase__ : str = unicodedata.category(UpperCamelCase_ )
if cat == "Mn":
continue
output.append(UpperCamelCase_ )
return "".join(UpperCamelCase_ )
def __snake_case ( self , UpperCamelCase_ ):
UpperCAmelCase__ : Any = (
[chr(UpperCamelCase_ ) for i in range(ord('a' ) , ord('z' ) + 1 )]
+ [chr(UpperCamelCase_ ) for i in range(ord('A' ) , ord('Z' ) + 1 )]
+ [chr(UpperCamelCase_ ) for i in range(ord('0' ) , ord('9' ) + 1 )]
+ ['.']
)
UpperCAmelCase__ : Optional[Any] = frozenset(UpperCamelCase_ )
UpperCAmelCase__ : Optional[Any] = re.compile(R'_+' )
UpperCAmelCase__ : Tuple = ''.join([c if c in accepted else '_' for c in text.lower()] )
UpperCAmelCase__ : Any = pattern.sub('_' , UpperCamelCase_ ).strip('_' )
return text
def __snake_case ( self , UpperCamelCase_ ):
return " ".join(UpperCamelCase_ )
def __snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = False ):
# Convert to TensorType
if not isinstance(UpperCamelCase_ , UpperCamelCase_ ):
UpperCAmelCase__ : Tuple = TensorType(UpperCamelCase_ )
# Get a function reference for the correct framework
if tensor_type == TensorType.TENSORFLOW:
if not is_tf_available():
raise ImportError(
'Unable to convert output to TensorFlow tensors format, TensorFlow is not installed.' )
import tensorflow as tf
UpperCAmelCase__ : Tuple = tf.constant
UpperCAmelCase__ : Union[str, Any] = tf.is_tensor
elif tensor_type == TensorType.PYTORCH:
if not is_torch_available():
raise ImportError('Unable to convert output to PyTorch tensors format, PyTorch is not installed.' )
import torch
UpperCAmelCase__ : List[str] = torch.tensor
UpperCAmelCase__ : Optional[Any] = torch.is_tensor
elif tensor_type == TensorType.JAX:
if not is_flax_available():
raise ImportError('Unable to convert output to JAX tensors format, JAX is not installed.' )
import jax.numpy as jnp # noqa: F811
UpperCAmelCase__ : Any = jnp.array
UpperCAmelCase__ : Dict = _is_jax
else:
UpperCAmelCase__ : Union[str, Any] = np.asarray
UpperCAmelCase__ : List[Any] = _is_numpy
# Do the tensor conversion in batch
try:
if prepend_batch_axis:
UpperCAmelCase__ : Optional[int] = [inputs]
if not is_tensor(UpperCamelCase_ ):
UpperCAmelCase__ : Optional[int] = as_tensor(UpperCamelCase_ )
except: # noqa E722
raise ValueError(
'Unable to create tensor, you should probably activate truncation and/or padding '
'with \'padding=True\' \'truncation=True\' to have batched tensors with the same length.' )
return inputs
def __call__( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_="" , UpperCamelCase_="pt" ):
UpperCAmelCase__ : Tuple = [0, 0, 0]
UpperCAmelCase__ : Tuple = [artist] * len(self.version )
UpperCAmelCase__ : Tuple = [genres] * len(self.version )
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : int = self.tokenize(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = self._convert_token_to_id(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
UpperCAmelCase__ : Any = [-INFINITY] * len(full_tokens[-1] )
UpperCAmelCase__ : Dict = [
self.convert_to_tensors(
[input_ids + [artists_id[i]] + genres_ids[i] + full_tokens[i]] , tensor_type=UpperCamelCase_ )
for i in range(len(self.version ) )
]
return BatchEncoding({'input_ids': input_ids, 'attention_masks': attention_masks} )
def __snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
if not os.path.isdir(UpperCamelCase_ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
UpperCAmelCase__ : int = os.path.join(
UpperCamelCase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['artists_file'] )
with open(UpperCamelCase_ , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.artists_encoder , ensure_ascii=UpperCamelCase_ ) )
UpperCAmelCase__ : Optional[Any] = os.path.join(
UpperCamelCase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['genres_file'] )
with open(UpperCamelCase_ , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.genres_encoder , ensure_ascii=UpperCamelCase_ ) )
UpperCAmelCase__ : List[Any] = os.path.join(
UpperCamelCase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['lyrics_file'] )
with open(UpperCamelCase_ , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.lyrics_encoder , ensure_ascii=UpperCamelCase_ ) )
return (artists_file, genres_file, lyrics_file)
def __snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
UpperCAmelCase__ : Tuple = self.artists_decoder.get(UpperCamelCase_ )
UpperCAmelCase__ : List[Any] = [self.genres_decoder.get(UpperCamelCase_ ) for genre in genres_index]
UpperCAmelCase__ : Dict = [self.lyrics_decoder.get(UpperCamelCase_ ) for character in lyric_index]
return artist, genres, lyrics
| 254 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {
'facebook/deit-base-distilled-patch16-224': (
'https://huggingface.co/facebook/deit-base-patch16-224/resolve/main/config.json'
),
# See all DeiT models at https://huggingface.co/models?filter=deit
}
class a ( lowercase ):
UpperCamelCase : List[Any] = """deit"""
def __init__( self , UpperCamelCase_=768 , UpperCamelCase_=12 , UpperCamelCase_=12 , UpperCamelCase_=3_072 , UpperCamelCase_="gelu" , UpperCamelCase_=0.0 , UpperCamelCase_=0.0 , UpperCamelCase_=0.02 , UpperCamelCase_=1E-12 , UpperCamelCase_=224 , UpperCamelCase_=16 , UpperCamelCase_=3 , UpperCamelCase_=True , UpperCamelCase_=16 , **UpperCamelCase_ , ):
super().__init__(**UpperCamelCase_ )
UpperCAmelCase__ : int = hidden_size
UpperCAmelCase__ : Union[str, Any] = num_hidden_layers
UpperCAmelCase__ : List[str] = num_attention_heads
UpperCAmelCase__ : int = intermediate_size
UpperCAmelCase__ : Optional[int] = hidden_act
UpperCAmelCase__ : Optional[Any] = hidden_dropout_prob
UpperCAmelCase__ : Optional[Any] = attention_probs_dropout_prob
UpperCAmelCase__ : Dict = initializer_range
UpperCAmelCase__ : Optional[Any] = layer_norm_eps
UpperCAmelCase__ : Optional[int] = image_size
UpperCAmelCase__ : Optional[int] = patch_size
UpperCAmelCase__ : Union[str, Any] = num_channels
UpperCAmelCase__ : Optional[Any] = qkv_bias
UpperCAmelCase__ : Dict = encoder_stride
class a ( lowercase ):
UpperCamelCase : Optional[int] = version.parse("""1.11""" )
@property
def __snake_case ( self ):
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def __snake_case ( self ):
return 1E-4
| 254 | 1 |
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
return (pointa[0] - pointa[0]) ** 2 + (pointa[1] - pointa[1]) ** 2
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=0 ):
'''simple docstring'''
return sorted(SCREAMING_SNAKE_CASE , key=lambda SCREAMING_SNAKE_CASE : x[column] )
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=float('''inf''' ) ):
'''simple docstring'''
for i in range(points_counts - 1 ):
for j in range(i + 1 , SCREAMING_SNAKE_CASE ):
__UpperCamelCase :Any = euclidean_distance_sqr(points[i] , points[j] )
if current_dis < min_dis:
__UpperCamelCase :Any = current_dis
return min_dis
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=float('''inf''' ) ):
'''simple docstring'''
for i in range(min(6 , points_counts - 1 ) , SCREAMING_SNAKE_CASE ):
for j in range(max(0 , i - 6 ) , SCREAMING_SNAKE_CASE ):
__UpperCamelCase :int = euclidean_distance_sqr(points[i] , points[j] )
if current_dis < min_dis:
__UpperCamelCase :str = current_dis
return min_dis
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if points_counts <= 3:
return dis_between_closest_pair(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# recursion
__UpperCamelCase :List[Any] = points_counts // 2
__UpperCamelCase :Tuple = closest_pair_of_points_sqr(
SCREAMING_SNAKE_CASE , points_sorted_on_y[:mid] , SCREAMING_SNAKE_CASE )
__UpperCamelCase :int = closest_pair_of_points_sqr(
SCREAMING_SNAKE_CASE , points_sorted_on_y[mid:] , points_counts - mid )
__UpperCamelCase :List[str] = min(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__UpperCamelCase :Any = []
for point in points_sorted_on_x:
if abs(point[0] - points_sorted_on_x[mid][0] ) < closest_pair_dis:
cross_strip.append(SCREAMING_SNAKE_CASE )
__UpperCamelCase :Dict = dis_between_closest_in_strip(
SCREAMING_SNAKE_CASE , len(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
return min(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :Union[str, Any] = column_based_sort(SCREAMING_SNAKE_CASE , column=0 )
__UpperCamelCase :Union[str, Any] = column_based_sort(SCREAMING_SNAKE_CASE , column=1 )
return (
closest_pair_of_points_sqr(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
) ** 0.5
if __name__ == "__main__":
__lowercase = [(2, 3), (12, 30), (40, 50), (5, 1), (12, 10), (3, 4)]
print('''Distance:''', closest_pair_of_points(points, len(points)))
| 167 | import argparse
import json
from typing import List
from ltp import LTP
from transformers import BertTokenizer
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if (
(cp >= 0X4_e_0_0 and cp <= 0X9_f_f_f)
or (cp >= 0X3_4_0_0 and cp <= 0X4_d_b_f) #
or (cp >= 0X2_0_0_0_0 and cp <= 0X2_a_6_d_f) #
or (cp >= 0X2_a_7_0_0 and cp <= 0X2_b_7_3_f) #
or (cp >= 0X2_b_7_4_0 and cp <= 0X2_b_8_1_f) #
or (cp >= 0X2_b_8_2_0 and cp <= 0X2_c_e_a_f) #
or (cp >= 0Xf_9_0_0 and cp <= 0Xf_a_f_f)
or (cp >= 0X2_f_8_0_0 and cp <= 0X2_f_a_1_f) #
): #
return True
return False
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
for char in word:
__UpperCamelCase :str = ord(SCREAMING_SNAKE_CASE )
if not _is_chinese_char(SCREAMING_SNAKE_CASE ):
return 0
return 1
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :List[Any] = set()
for token in tokens:
__UpperCamelCase :Tuple = len(SCREAMING_SNAKE_CASE ) > 1 and is_chinese(SCREAMING_SNAKE_CASE )
if chinese_word:
word_set.add(SCREAMING_SNAKE_CASE )
__UpperCamelCase :int = list(SCREAMING_SNAKE_CASE )
return word_list
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if not chinese_word_set:
return bert_tokens
__UpperCamelCase :Dict = max([len(SCREAMING_SNAKE_CASE ) for w in chinese_word_set] )
__UpperCamelCase :str = bert_tokens
__UpperCamelCase , __UpperCamelCase :int = 0, len(SCREAMING_SNAKE_CASE )
while start < end:
__UpperCamelCase :Optional[int] = True
if is_chinese(bert_word[start] ):
__UpperCamelCase :Dict = min(end - start , SCREAMING_SNAKE_CASE )
for i in range(SCREAMING_SNAKE_CASE , 1 , -1 ):
__UpperCamelCase :int = ''''''.join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
__UpperCamelCase :Union[str, Any] = '''##''' + bert_word[j]
__UpperCamelCase :Dict = start + i
__UpperCamelCase :Dict = False
break
if single_word:
start += 1
return bert_word
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :Tuple = []
for i in range(0 , len(SCREAMING_SNAKE_CASE ) , 100 ):
__UpperCamelCase :List[str] = ltp_tokenizer.seg(lines[i : i + 100] )[0]
__UpperCamelCase :int = [get_chinese_word(SCREAMING_SNAKE_CASE ) for r in res]
ltp_res.extend(SCREAMING_SNAKE_CASE )
assert len(SCREAMING_SNAKE_CASE ) == len(SCREAMING_SNAKE_CASE )
__UpperCamelCase :Any = []
for i in range(0 , len(SCREAMING_SNAKE_CASE ) , 100 ):
__UpperCamelCase :List[str] = bert_tokenizer(lines[i : i + 100] , add_special_tokens=SCREAMING_SNAKE_CASE , truncation=SCREAMING_SNAKE_CASE , max_length=512 )
bert_res.extend(res['''input_ids'''] )
assert len(SCREAMING_SNAKE_CASE ) == len(SCREAMING_SNAKE_CASE )
__UpperCamelCase :List[Any] = []
for input_ids, chinese_word in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
__UpperCamelCase :str = []
for id in input_ids:
__UpperCamelCase :Dict = bert_tokenizer._convert_id_to_token(SCREAMING_SNAKE_CASE )
input_tokens.append(SCREAMING_SNAKE_CASE )
__UpperCamelCase :Dict = add_sub_symbol(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__UpperCamelCase :Optional[int] = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(SCREAMING_SNAKE_CASE ):
if token[:2] == "##":
__UpperCamelCase :Union[str, Any] = token[2:]
# save chinese tokens' pos
if len(SCREAMING_SNAKE_CASE ) == 1 and _is_chinese_char(ord(SCREAMING_SNAKE_CASE ) ):
ref_id.append(SCREAMING_SNAKE_CASE )
ref_ids.append(SCREAMING_SNAKE_CASE )
assert len(SCREAMING_SNAKE_CASE ) == len(SCREAMING_SNAKE_CASE )
return ref_ids
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
with open(args.file_name , '''r''' , encoding='''utf-8''' ) as f:
__UpperCamelCase :Any = f.readlines()
__UpperCamelCase :str = [line.strip() for line in data if len(SCREAMING_SNAKE_CASE ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
__UpperCamelCase :Optional[Any] = LTP(args.ltp ) # faster in GPU device
__UpperCamelCase :Union[str, Any] = BertTokenizer.from_pretrained(args.bert )
__UpperCamelCase :Any = prepare_ref(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
with open(args.save_path , '''w''' , encoding='''utf-8''' ) as f:
__UpperCamelCase :Optional[Any] = [json.dumps(SCREAMING_SNAKE_CASE ) + '''\n''' for ref in ref_ids]
f.writelines(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__lowercase = argparse.ArgumentParser(description='''prepare_chinese_ref''')
parser.add_argument(
'''--file_name''',
type=str,
default='''./resources/chinese-demo.txt''',
help='''file need process, same as training data in lm''',
)
parser.add_argument(
'''--ltp''', type=str, default='''./resources/ltp''', help='''resources for LTP tokenizer, usually a path'''
)
parser.add_argument('''--bert''', type=str, default='''./resources/robert''', help='''resources for Bert tokenizer''')
parser.add_argument('''--save_path''', type=str, default='''./resources/ref.txt''', help='''path to save res''')
__lowercase = parser.parse_args()
main(args)
| 167 | 1 |
from typing import Dict
from .base import GenericTensor, Pipeline
class lowerCAmelCase_ ( snake_case__ ):
def __snake_case ( self : str , SCREAMING_SNAKE_CASE_ : Optional[Any]=None , SCREAMING_SNAKE_CASE_ : Optional[Any]=None , SCREAMING_SNAKE_CASE_ : List[str]=None , **SCREAMING_SNAKE_CASE_ : Any ):
if tokenize_kwargs is None:
lowerCAmelCase__ = {}
if truncation is not None:
if "truncation" in tokenize_kwargs:
raise ValueError(
'''truncation parameter defined twice (given as keyword argument as well as in tokenize_kwargs)''' )
lowerCAmelCase__ = truncation
lowerCAmelCase__ = tokenize_kwargs
lowerCAmelCase__ = {}
if return_tensors is not None:
lowerCAmelCase__ = return_tensors
return preprocess_params, {}, postprocess_params
def __snake_case ( self : Dict , SCREAMING_SNAKE_CASE_ : Optional[int] , **SCREAMING_SNAKE_CASE_ : Optional[Any] ):
lowerCAmelCase__ = self.framework
lowerCAmelCase__ = self.tokenizer(SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
return model_inputs
def __snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE_ : int ):
lowerCAmelCase__ = self.model(**SCREAMING_SNAKE_CASE_ )
return model_outputs
def __snake_case ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int=False ):
# [0] is the first available tensor, logits or last_hidden_state.
if return_tensors:
return model_outputs[0]
if self.framework == "pt":
return model_outputs[0].tolist()
elif self.framework == "tf":
return model_outputs[0].numpy().tolist()
def __call__( self : Union[str, Any] , *SCREAMING_SNAKE_CASE_ : Tuple , **SCREAMING_SNAKE_CASE_ : Any ):
return super().__call__(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
| 288 |
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
_UpperCAmelCase : Tuple = logging.get_logger(__name__)
_UpperCAmelCase : int = {
"Salesforce/codegen-350M-nl": "https://huggingface.co/Salesforce/codegen-350M-nl/resolve/main/config.json",
"Salesforce/codegen-350M-multi": "https://huggingface.co/Salesforce/codegen-350M-multi/resolve/main/config.json",
"Salesforce/codegen-350M-mono": "https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/config.json",
"Salesforce/codegen-2B-nl": "https://huggingface.co/Salesforce/codegen-2B-nl/resolve/main/config.json",
"Salesforce/codegen-2B-multi": "https://huggingface.co/Salesforce/codegen-2B-multi/resolve/main/config.json",
"Salesforce/codegen-2B-mono": "https://huggingface.co/Salesforce/codegen-2B-mono/resolve/main/config.json",
"Salesforce/codegen-6B-nl": "https://huggingface.co/Salesforce/codegen-6B-nl/resolve/main/config.json",
"Salesforce/codegen-6B-multi": "https://huggingface.co/Salesforce/codegen-6B-multi/resolve/main/config.json",
"Salesforce/codegen-6B-mono": "https://huggingface.co/Salesforce/codegen-6B-mono/resolve/main/config.json",
"Salesforce/codegen-16B-nl": "https://huggingface.co/Salesforce/codegen-16B-nl/resolve/main/config.json",
"Salesforce/codegen-16B-multi": "https://huggingface.co/Salesforce/codegen-16B-multi/resolve/main/config.json",
"Salesforce/codegen-16B-mono": "https://huggingface.co/Salesforce/codegen-16B-mono/resolve/main/config.json",
}
class lowerCAmelCase_ ( snake_case__ ):
UpperCamelCase_ :int = 'codegen'
UpperCamelCase_ :int = {
'max_position_embeddings': 'n_positions',
'hidden_size': 'n_embd',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE_ : str=50_400 , SCREAMING_SNAKE_CASE_ : str=2_048 , SCREAMING_SNAKE_CASE_ : int=2_048 , SCREAMING_SNAKE_CASE_ : Any=4_096 , SCREAMING_SNAKE_CASE_ : List[Any]=28 , SCREAMING_SNAKE_CASE_ : str=16 , SCREAMING_SNAKE_CASE_ : str=64 , SCREAMING_SNAKE_CASE_ : Tuple=None , SCREAMING_SNAKE_CASE_ : Dict="gelu_new" , SCREAMING_SNAKE_CASE_ : str=0.0 , SCREAMING_SNAKE_CASE_ : Any=0.0 , SCREAMING_SNAKE_CASE_ : Any=0.0 , SCREAMING_SNAKE_CASE_ : Any=1e-5 , SCREAMING_SNAKE_CASE_ : Any=0.02 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=True , SCREAMING_SNAKE_CASE_ : Optional[int]=50_256 , SCREAMING_SNAKE_CASE_ : Any=50_256 , SCREAMING_SNAKE_CASE_ : List[str]=False , **SCREAMING_SNAKE_CASE_ : Union[str, Any] , ):
lowerCAmelCase__ = vocab_size
lowerCAmelCase__ = n_ctx
lowerCAmelCase__ = n_positions
lowerCAmelCase__ = n_embd
lowerCAmelCase__ = n_layer
lowerCAmelCase__ = n_head
lowerCAmelCase__ = n_inner
lowerCAmelCase__ = rotary_dim
lowerCAmelCase__ = activation_function
lowerCAmelCase__ = resid_pdrop
lowerCAmelCase__ = embd_pdrop
lowerCAmelCase__ = attn_pdrop
lowerCAmelCase__ = layer_norm_epsilon
lowerCAmelCase__ = initializer_range
lowerCAmelCase__ = use_cache
lowerCAmelCase__ = bos_token_id
lowerCAmelCase__ = eos_token_id
super().__init__(
bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , tie_word_embeddings=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
class lowerCAmelCase_ ( snake_case__ ):
def __init__( self : str , SCREAMING_SNAKE_CASE_ : PretrainedConfig , SCREAMING_SNAKE_CASE_ : str = "default" , SCREAMING_SNAKE_CASE_ : List[PatchingSpec] = None , SCREAMING_SNAKE_CASE_ : bool = False , ):
super().__init__(SCREAMING_SNAKE_CASE_ , task=SCREAMING_SNAKE_CASE_ , patching_specs=SCREAMING_SNAKE_CASE_ , use_past=SCREAMING_SNAKE_CASE_ )
if not getattr(self._config , '''pad_token_id''' , SCREAMING_SNAKE_CASE_ ):
# TODO: how to do that better?
lowerCAmelCase__ = 0
@property
def __snake_case ( self : str ):
lowerCAmelCase__ = OrderedDict({'''input_ids''': {0: '''batch''', 1: '''sequence'''}} )
if self.use_past:
self.fill_with_past_key_values_(SCREAMING_SNAKE_CASE_ , direction='''inputs''' )
lowerCAmelCase__ = {0: '''batch''', 1: '''past_sequence + sequence'''}
else:
lowerCAmelCase__ = {0: '''batch''', 1: '''sequence'''}
return common_inputs
@property
def __snake_case ( self : Dict ):
return self._config.n_layer
@property
def __snake_case ( self : Union[str, Any] ):
return self._config.n_head
def __snake_case ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : PreTrainedTokenizer , SCREAMING_SNAKE_CASE_ : int = -1 , SCREAMING_SNAKE_CASE_ : int = -1 , SCREAMING_SNAKE_CASE_ : bool = False , SCREAMING_SNAKE_CASE_ : Optional[TensorType] = None , ):
lowerCAmelCase__ = super(SCREAMING_SNAKE_CASE_ , self ).generate_dummy_inputs(
SCREAMING_SNAKE_CASE_ , batch_size=SCREAMING_SNAKE_CASE_ , seq_length=SCREAMING_SNAKE_CASE_ , is_pair=SCREAMING_SNAKE_CASE_ , framework=SCREAMING_SNAKE_CASE_ )
# We need to order the input in the way they appears in the forward()
lowerCAmelCase__ = OrderedDict({'''input_ids''': common_inputs['''input_ids''']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
lowerCAmelCase__ , lowerCAmelCase__ = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
lowerCAmelCase__ = seqlen + 2
lowerCAmelCase__ = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
lowerCAmelCase__ = [
(torch.zeros(SCREAMING_SNAKE_CASE_ ), torch.zeros(SCREAMING_SNAKE_CASE_ )) for _ in range(self.num_layers )
]
lowerCAmelCase__ = common_inputs['''attention_mask''']
if self.use_past:
lowerCAmelCase__ = ordered_inputs['''attention_mask'''].dtype
lowerCAmelCase__ = torch.cat(
[ordered_inputs['''attention_mask'''], torch.ones(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , dtype=SCREAMING_SNAKE_CASE_ )] , dim=1 )
return ordered_inputs
@property
def __snake_case ( self : Optional[int] ):
return 13
| 288 | 1 |
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class _lowerCamelCase( unittest.TestCase ):
def __init__( self, lowerCamelCase, lowerCamelCase=7, lowerCamelCase=3, lowerCamelCase=18, lowerCamelCase=30, lowerCamelCase=4_00, lowerCamelCase=True, lowerCamelCase=None, lowerCamelCase=True, ) -> Optional[Any]:
"""simple docstring"""
_lowercase : Tuple = size if size is not None else {'height': 18, 'width': 18}
_lowercase : Optional[int] = parent
_lowercase : Optional[Any] = batch_size
_lowercase : Any = num_channels
_lowercase : Optional[int] = image_size
_lowercase : List[Any] = min_resolution
_lowercase : Optional[int] = max_resolution
_lowercase : Optional[int] = do_resize
_lowercase : str = size
_lowercase : Optional[int] = apply_ocr
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class _lowerCamelCase( _a, unittest.TestCase ):
lowercase_ : Optional[Any] = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
_lowercase : Union[str, Any] = LayoutLMvaImageProcessingTester(self)
@property
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase ( self) -> List[Any]:
"""simple docstring"""
_lowercase : Tuple = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(lowerCamelCase, 'do_resize'))
self.assertTrue(hasattr(lowerCamelCase, 'size'))
self.assertTrue(hasattr(lowerCamelCase, 'apply_ocr'))
def UpperCamelCase ( self) -> List[Any]:
"""simple docstring"""
_lowercase : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size, {'height': 18, 'width': 18})
_lowercase : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict, size=42)
self.assertEqual(image_processor.size, {'height': 42, 'width': 42})
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
pass
def UpperCamelCase ( self) -> str:
"""simple docstring"""
_lowercase : Optional[Any] = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
_lowercase : int = prepare_image_inputs(self.image_processor_tester, equal_resolution=lowerCamelCase)
for image in image_inputs:
self.assertIsInstance(lowerCamelCase, Image.Image)
# Test not batched input
_lowercase : Dict = image_processing(image_inputs[0], return_tensors='pt')
self.assertEqual(
encoding.pixel_values.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
), )
self.assertIsInstance(encoding.words, lowerCamelCase)
self.assertIsInstance(encoding.boxes, lowerCamelCase)
# Test batched
_lowercase : Optional[Any] = image_processing(lowerCamelCase, return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
), )
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
_lowercase : List[Any] = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
_lowercase : Dict = prepare_image_inputs(self.image_processor_tester, equal_resolution=lowerCamelCase, numpify=lowerCamelCase)
for image in image_inputs:
self.assertIsInstance(lowerCamelCase, np.ndarray)
# Test not batched input
_lowercase : Union[str, Any] = image_processing(image_inputs[0], return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
), )
# Test batched
_lowercase : Tuple = image_processing(lowerCamelCase, return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
), )
def UpperCamelCase ( self) -> List[Any]:
"""simple docstring"""
_lowercase : Dict = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
_lowercase : Any = prepare_image_inputs(self.image_processor_tester, equal_resolution=lowerCamelCase, torchify=lowerCamelCase)
for image in image_inputs:
self.assertIsInstance(lowerCamelCase, torch.Tensor)
# Test not batched input
_lowercase : Optional[Any] = image_processing(image_inputs[0], return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
), )
# Test batched
_lowercase : Union[str, Any] = image_processing(lowerCamelCase, return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
), )
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
_lowercase : int = LayoutLMvaImageProcessor()
from datasets import load_dataset
_lowercase : List[str] = load_dataset('hf-internal-testing/fixtures_docvqa', split='test')
_lowercase : Tuple = Image.open(ds[0]['file']).convert('RGB')
_lowercase : Dict = image_processing(lowerCamelCase, return_tensors='pt')
self.assertEqual(encoding.pixel_values.shape, (1, 3, 2_24, 2_24))
self.assertEqual(len(encoding.words), len(encoding.boxes))
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
_lowercase : Union[str, Any] = [['11:14', 'to', '11:39', 'a.m', '11:39', 'to', '11:44', 'a.m.', '11:44', 'a.m.', 'to', '12:25', 'p.m.', '12:25', 'to', '12:58', 'p.m.', '12:58', 'to', '4:00', 'p.m.', '2:00', 'to', '5:00', 'p.m.', 'Coffee', 'Break', 'Coffee', 'will', 'be', 'served', 'for', 'men', 'and', 'women', 'in', 'the', 'lobby', 'adjacent', 'to', 'exhibit', 'area.', 'Please', 'move', 'into', 'exhibit', 'area.', '(Exhibits', 'Open)', 'TRRF', 'GENERAL', 'SESSION', '(PART', '|)', 'Presiding:', 'Lee', 'A.', 'Waller', 'TRRF', 'Vice', 'President', '“Introductory', 'Remarks”', 'Lee', 'A.', 'Waller,', 'TRRF', 'Vice', 'Presi-', 'dent', 'Individual', 'Interviews', 'with', 'TRRF', 'Public', 'Board', 'Members', 'and', 'Sci-', 'entific', 'Advisory', 'Council', 'Mem-', 'bers', 'Conducted', 'by', 'TRRF', 'Treasurer', 'Philip', 'G.', 'Kuehn', 'to', 'get', 'answers', 'which', 'the', 'public', 'refrigerated', 'warehousing', 'industry', 'is', 'looking', 'for.', 'Plus', 'questions', 'from', 'the', 'floor.', 'Dr.', 'Emil', 'M.', 'Mrak,', 'University', 'of', 'Cal-', 'ifornia,', 'Chairman,', 'TRRF', 'Board;', 'Sam', 'R.', 'Cecil,', 'University', 'of', 'Georgia', 'College', 'of', 'Agriculture;', 'Dr.', 'Stanley', 'Charm,', 'Tufts', 'University', 'School', 'of', 'Medicine;', 'Dr.', 'Robert', 'H.', 'Cotton,', 'ITT', 'Continental', 'Baking', 'Company;', 'Dr.', 'Owen', 'Fennema,', 'University', 'of', 'Wis-', 'consin;', 'Dr.', 'Robert', 'E.', 'Hardenburg,', 'USDA.', 'Questions', 'and', 'Answers', 'Exhibits', 'Open', 'Capt.', 'Jack', 'Stoney', 'Room', 'TRRF', 'Scientific', 'Advisory', 'Council', 'Meeting', 'Ballroom', 'Foyer']] # noqa: E231
_lowercase : Optional[int] = [[[1_41, 57, 2_14, 69], [2_28, 58, 2_52, 69], [1_41, 75, 2_16, 88], [2_30, 79, 2_80, 88], [1_42, 2_60, 2_18, 2_73], [2_30, 2_61, 2_55, 2_73], [1_43, 2_79, 2_18, 2_90], [2_31, 2_82, 2_90, 2_91], [1_43, 3_42, 2_18, 3_54], [2_31, 3_45, 2_89, 3_55], [2_02, 3_62, 2_27, 3_73], [1_43, 3_79, 2_20, 3_92], [2_31, 3_82, 2_91, 3_94], [1_44, 7_14, 2_20, 7_26], [2_31, 7_15, 2_56, 7_26], [1_44, 7_32, 2_20, 7_45], [2_32, 7_36, 2_91, 7_47], [1_44, 7_69, 2_18, 7_82], [2_31, 7_70, 2_56, 7_82], [1_41, 7_88, 2_02, 8_01], [2_15, 7_91, 2_74, 8_04], [1_43, 8_26, 2_04, 8_38], [2_15, 8_26, 2_40, 8_38], [1_42, 8_44, 2_02, 8_57], [2_15, 8_47, 2_74, 8_59], [3_34, 57, 4_27, 69], [4_40, 57, 5_22, 69], [3_69, 75, 4_61, 88], [4_69, 75, 5_16, 88], [5_28, 76, 5_62, 88], [5_70, 76, 6_67, 88], [6_75, 75, 7_11, 87], [7_21, 79, 7_78, 88], [7_89, 75, 8_40, 88], [3_69, 97, 4_70, 1_07], [4_84, 94, 5_07, 1_06], [5_18, 94, 5_62, 1_07], [5_76, 94, 6_55, 1_10], [6_68, 94, 7_92, 1_09], [8_04, 95, 8_29, 1_07], [3_69, 1_13, 4_65, 1_25], [4_77, 1_16, 5_47, 1_25], [5_62, 1_13, 6_58, 1_25], [6_71, 1_16, 7_48, 1_25], [7_61, 1_13, 8_11, 1_25], [3_69, 1_31, 4_65, 1_43], [4_77, 1_33, 5_48, 1_43], [5_63, 1_30, 6_98, 1_45], [7_10, 1_30, 8_02, 1_46], [3_36, 1_71, 4_12, 1_83], [4_23, 1_71, 5_72, 1_83], [5_82, 1_70, 7_16, 1_84], [7_28, 1_71, 8_17, 1_87], [8_29, 1_71, 8_44, 1_86], [3_38, 1_97, 4_82, 2_12], [5_07, 1_96, 5_57, 2_09], [5_69, 1_96, 5_95, 2_08], [6_10, 1_96, 7_02, 2_09], [5_05, 2_14, 5_83, 2_26], [5_95, 2_14, 6_56, 2_27], [6_70, 2_15, 8_07, 2_27], [3_35, 2_59, 5_43, 2_74], [5_56, 2_59, 7_08, 2_72], [3_72, 2_79, 4_22, 2_91], [4_35, 2_79, 4_60, 2_91], [4_74, 2_79, 5_74, 2_92], [5_87, 2_78, 6_64, 2_91], [6_76, 2_78, 7_38, 2_91], [7_51, 2_79, 8_34, 2_91], [3_72, 2_98, 4_34, 3_10], [3_35, 3_41, 4_83, 3_54], [4_97, 3_41, 6_55, 3_54], [6_67, 3_41, 7_28, 3_54], [7_40, 3_41, 8_25, 3_54], [3_35, 3_60, 4_30, 3_72], [4_42, 3_60, 5_34, 3_72], [5_45, 3_59, 6_87, 3_72], [6_97, 3_60, 7_54, 3_72], [7_65, 3_60, 8_23, 3_73], [3_34, 3_78, 4_28, 3_91], [4_40, 3_78, 5_77, 3_94], [5_90, 3_78, 7_05, 3_91], [7_20, 3_78, 8_01, 3_91], [3_34, 3_97, 4_00, 4_09], [3_70, 4_16, 5_29, 4_29], [5_44, 4_16, 5_76, 4_32], [5_87, 4_16, 6_65, 4_28], [6_77, 4_16, 8_14, 4_29], [3_72, 4_35, 4_52, 4_50], [4_65, 4_34, 4_95, 4_47], [5_11, 4_34, 6_00, 4_47], [6_11, 4_36, 6_37, 4_47], [6_49, 4_36, 6_94, 4_51], [7_05, 4_38, 8_24, 4_47], [3_69, 4_53, 4_52, 4_66], [4_64, 4_54, 5_09, 4_66], [5_22, 4_53, 6_11, 4_69], [6_25, 4_53, 7_92, 4_69], [3_70, 4_72, 5_56, 4_88], [5_70, 4_72, 6_84, 4_87], [6_97, 4_72, 7_18, 4_85], [7_32, 4_72, 8_35, 4_88], [3_69, 4_90, 4_11, 5_03], [4_25, 4_90, 4_84, 5_03], [4_96, 4_90, 6_35, 5_06], [6_45, 4_90, 7_07, 5_03], [7_18, 4_91, 7_61, 5_03], [7_71, 4_90, 8_40, 5_03], [3_36, 5_10, 3_74, 5_21], [3_88, 5_10, 4_47, 5_22], [4_60, 5_10, 4_89, 5_21], [5_03, 5_10, 5_80, 5_22], [5_92, 5_09, 7_36, 5_25], [7_45, 5_09, 7_70, 5_22], [7_81, 5_09, 8_40, 5_22], [3_38, 5_28, 4_34, 5_41], [4_48, 5_28, 5_96, 5_41], [6_09, 5_27, 6_87, 5_40], [7_00, 5_28, 7_92, 5_41], [3_36, 5_46, 3_97, 5_59], [4_07, 5_46, 4_31, 5_59], [4_43, 5_46, 5_25, 5_60], [5_37, 5_46, 6_80, 5_62], [6_88, 5_46, 7_14, 5_59], [7_22, 5_46, 8_37, 5_62], [3_36, 5_65, 4_49, 5_81], [4_61, 5_65, 4_85, 5_77], [4_97, 5_65, 6_65, 5_81], [6_81, 5_65, 7_18, 5_77], [7_32, 5_65, 8_37, 5_80], [3_37, 5_84, 4_38, 5_97], [4_52, 5_83, 5_21, 5_96], [5_35, 5_84, 6_77, 5_99], [6_90, 5_83, 7_87, 5_96], [8_01, 5_83, 8_25, 5_96], [3_38, 6_02, 4_78, 6_15], [4_92, 6_02, 5_30, 6_14], [5_43, 6_02, 6_38, 6_15], [6_50, 6_02, 6_76, 6_14], [6_88, 6_02, 7_88, 6_15], [8_02, 6_02, 8_43, 6_14], [3_37, 6_21, 5_02, 6_33], [5_16, 6_21, 6_15, 6_37], [6_29, 6_21, 7_74, 6_36], [7_89, 6_21, 8_27, 6_33], [3_37, 6_39, 4_18, 6_52], [4_32, 6_40, 5_71, 6_53], [5_87, 6_39, 7_31, 6_55], [7_43, 6_39, 7_69, 6_52], [7_80, 6_39, 8_41, 6_52], [3_38, 6_58, 4_40, 6_73], [4_55, 6_58, 4_91, 6_70], [5_08, 6_58, 6_02, 6_71], [6_16, 6_58, 6_38, 6_70], [6_54, 6_58, 8_35, 6_74], [3_37, 6_77, 4_29, 6_89], [3_37, 7_14, 4_82, 7_26], [4_95, 7_14, 5_48, 7_26], [5_61, 7_14, 6_83, 7_26], [3_38, 7_70, 4_61, 7_82], [4_74, 7_69, 5_54, 7_85], [4_89, 7_88, 5_62, 8_03], [5_76, 7_88, 6_43, 8_01], [6_56, 7_87, 7_51, 8_04], [7_64, 7_88, 8_44, 8_01], [3_34, 8_25, 4_21, 8_38], [4_30, 8_24, 5_74, 8_38], [5_84, 8_24, 7_23, 8_41], [3_35, 8_44, 4_50, 8_57], [4_64, 8_43, 5_83, 8_60], [6_28, 8_62, 7_55, 8_75], [7_69, 8_61, 8_48, 8_78]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words, lowerCamelCase)
self.assertListEqual(encoding.boxes, lowerCamelCase)
# with apply_OCR = False
_lowercase : int = LayoutLMvaImageProcessor(apply_ocr=lowerCamelCase)
_lowercase : int = image_processing(lowerCamelCase, return_tensors='pt')
self.assertEqual(encoding.pixel_values.shape, (1, 3, 2_24, 2_24))
| 89 | from pathlib import Path
from typing import List
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import get_tests_dir, is_tool_test
from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
lowercase = ["""text""", """image""", """audio"""]
def lowerCamelCase_ ( UpperCamelCase__ : List[str] ):
'''simple docstring'''
UpperCamelCase__ = []
for input_type in input_types:
if input_type == "text":
inputs.append('''Text input''' )
elif input_type == "image":
inputs.append(
Image.open(Path(get_tests_dir('''fixtures/tests_samples/COCO''' ) ) / '''000000039769.png''' ).resize((512, 512) ) )
elif input_type == "audio":
inputs.append(torch.ones(3000 ) )
elif isinstance(UpperCamelCase__, UpperCamelCase__ ):
inputs.append(create_inputs(UpperCamelCase__ ) )
else:
raise ValueError(F"""Invalid type requested: {input_type}""" )
return inputs
def lowerCamelCase_ ( UpperCamelCase__ : List ):
'''simple docstring'''
UpperCamelCase__ = []
for output in outputs:
if isinstance(UpperCamelCase__, (str, AgentText) ):
output_types.append('''text''' )
elif isinstance(UpperCamelCase__, (Image.Image, AgentImage) ):
output_types.append('''image''' )
elif isinstance(UpperCamelCase__, (torch.Tensor, AgentAudio) ):
output_types.append('''audio''' )
else:
raise ValueError(F"""Invalid output: {output}""" )
return output_types
@is_tool_test
class __lowercase :
'''simple docstring'''
def A_ ( self : List[str] ):
self.assertTrue(hasattr(self.tool , '''inputs''' ) )
self.assertTrue(hasattr(self.tool , '''outputs''' ) )
UpperCamelCase__ = self.tool.inputs
for _input in inputs:
if isinstance(_input , _a ):
for __input in _input:
self.assertTrue(__input in authorized_types )
else:
self.assertTrue(_input in authorized_types )
UpperCamelCase__ = self.tool.outputs
for _output in outputs:
self.assertTrue(_output in authorized_types )
def A_ ( self : str ):
UpperCamelCase__ = create_inputs(self.tool.inputs )
UpperCamelCase__ = self.tool(*_a )
# There is a single output
if len(self.tool.outputs ) == 1:
UpperCamelCase__ = [outputs]
self.assertListEqual(output_types(_a ) , self.tool.outputs )
def A_ ( self : List[str] ):
self.assertTrue(hasattr(self.tool , '''description''' ) )
self.assertTrue(hasattr(self.tool , '''default_checkpoint''' ) )
self.assertTrue(self.tool.description.startswith('''This is a tool that''' ) )
def A_ ( self : List[str] ):
UpperCamelCase__ = create_inputs(self.tool.inputs )
UpperCamelCase__ = self.tool(*_a )
if not isinstance(_a , _a ):
UpperCamelCase__ = [outputs]
self.assertEqual(len(_a ) , len(self.tool.outputs ) )
for output, output_type in zip(_a , self.tool.outputs ):
UpperCamelCase__ = AGENT_TYPE_MAPPING[output_type]
self.assertTrue(isinstance(_a , _a ) )
def A_ ( self : Optional[int] ):
UpperCamelCase__ = create_inputs(self.tool.inputs )
UpperCamelCase__ = []
for _input, input_type in zip(_a , self.tool.inputs ):
if isinstance(_a , _a ):
_inputs.append([AGENT_TYPE_MAPPING[_input_type](_input ) for _input_type in input_type] )
else:
_inputs.append(AGENT_TYPE_MAPPING[input_type](_input ) )
# Should not raise an error
UpperCamelCase__ = self.tool(*_a )
if not isinstance(_a , _a ):
UpperCamelCase__ = [outputs]
self.assertEqual(len(_a ) , len(self.tool.outputs ) )
| 240 | 0 |
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class lowerCAmelCase_ ( _lowercase ):
"""simple docstring"""
UpperCAmelCase__ = ["image_processor", "tokenizer"]
UpperCAmelCase__ = "Pix2StructImageProcessor"
UpperCAmelCase__ = ("T5Tokenizer", "T5TokenizerFast")
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[str]:
__UpperCamelCase = False
super().__init__(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __call__( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 2_048 , _SCREAMING_SNAKE_CASE = 0 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , ) -> BatchEncoding:
if images is None and text is None:
raise ValueError('You have to specify either images or text.' )
# Get only text
if images is None and not self.image_processor.is_vqa:
__UpperCamelCase = self.tokenizer
__UpperCamelCase = self.tokenizer(
text=_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE , truncation=_SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE , stride=_SCREAMING_SNAKE_CASE , pad_to_multiple_of=_SCREAMING_SNAKE_CASE , return_attention_mask=_SCREAMING_SNAKE_CASE , return_overflowing_tokens=_SCREAMING_SNAKE_CASE , return_special_tokens_mask=_SCREAMING_SNAKE_CASE , return_offsets_mapping=_SCREAMING_SNAKE_CASE , return_token_type_ids=_SCREAMING_SNAKE_CASE , return_length=_SCREAMING_SNAKE_CASE , verbose=_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
return text_encoding
if not self.image_processor.is_vqa:
# add pixel_values
__UpperCamelCase = self.image_processor(
_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE , max_patches=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
else:
# add pixel_values and bbox
__UpperCamelCase = self.image_processor(
_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE , max_patches=_SCREAMING_SNAKE_CASE , header_text=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
if text is not None and not self.image_processor.is_vqa:
__UpperCamelCase = self.tokenizer(
text=_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE , truncation=_SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE , stride=_SCREAMING_SNAKE_CASE , pad_to_multiple_of=_SCREAMING_SNAKE_CASE , return_attention_mask=_SCREAMING_SNAKE_CASE , return_overflowing_tokens=_SCREAMING_SNAKE_CASE , return_special_tokens_mask=_SCREAMING_SNAKE_CASE , return_offsets_mapping=_SCREAMING_SNAKE_CASE , return_token_type_ids=_SCREAMING_SNAKE_CASE , return_length=_SCREAMING_SNAKE_CASE , verbose=_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
if "attention_mask" in text_encoding:
__UpperCamelCase = text_encoding.pop('attention_mask' )
if "input_ids" in text_encoding:
__UpperCamelCase = text_encoding.pop('input_ids' )
else:
__UpperCamelCase = None
if text_encoding is not None:
encoding_image_processor.update(_SCREAMING_SNAKE_CASE )
return encoding_image_processor
def __lowercase( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> str:
return self.tokenizer.batch_decode(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def __lowercase( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> List[str]:
return self.tokenizer.decode(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
@property
def __lowercase( self ) -> Optional[Any]:
__UpperCamelCase = self.tokenizer.model_input_names
__UpperCamelCase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 567 |
def _a ( __lowercase , __lowercase , __lowercase , __lowercase ) -> Any:
"""simple docstring"""
__UpperCamelCase = [False] * len(__lowercase )
__UpperCamelCase = []
queue.append(__lowercase )
__UpperCamelCase = True
while queue:
__UpperCamelCase = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(__lowercase )
__UpperCamelCase = True
__UpperCamelCase = u
return visited[t]
def _a ( __lowercase , __lowercase , __lowercase ) -> Union[str, Any]:
"""simple docstring"""
__UpperCamelCase = [-1] * (len(__lowercase ))
__UpperCamelCase = 0
while bfs(__lowercase , __lowercase , __lowercase , __lowercase ):
__UpperCamelCase = float('Inf' )
__UpperCamelCase = sink
while s != source:
# Find the minimum value in select path
__UpperCamelCase = min(__lowercase , graph[parent[s]][s] )
__UpperCamelCase = parent[s]
max_flow += path_flow
__UpperCamelCase = sink
while v != source:
__UpperCamelCase = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
__UpperCamelCase = parent[v]
return max_flow
_snake_case = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
_snake_case , _snake_case = 0, 5
print(ford_fulkerson(graph, source, sink))
| 567 | 1 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTConfig,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE( __UpperCamelCase ) -> Tuple:
a__ : List[str] = MobileViTConfig()
# size of the architecture
if "mobilevit_s" in mobilevit_name:
a__ : List[str] = [1_44, 1_92, 2_40]
a__ : Dict = [16, 32, 64, 96, 1_28, 1_60, 6_40]
elif "mobilevit_xs" in mobilevit_name:
a__ : Optional[int] = [96, 1_20, 1_44]
a__ : List[str] = [16, 32, 48, 64, 80, 96, 3_84]
elif "mobilevit_xxs" in mobilevit_name:
a__ : Dict = [64, 80, 96]
a__ : List[str] = [16, 16, 24, 48, 64, 80, 3_20]
a__ : Optional[int] = 0.0_5
a__ : Tuple = 2.0
if mobilevit_name.startswith("deeplabv3_" ):
a__ : str = 5_12
a__ : Union[str, Any] = 16
a__ : Dict = 21
a__ : Any = "pascal-voc-id2label.json"
else:
a__ : Any = 10_00
a__ : Any = "imagenet-1k-id2label.json"
a__ : Dict = "huggingface/label-files"
a__ : Any = json.load(open(hf_hub_download(__UpperCamelCase , __UpperCamelCase , repo_type="dataset" ) , "r" ) )
a__ : List[str] = {int(__UpperCamelCase ): v for k, v in idalabel.items()}
a__ : Optional[int] = idalabel
a__ : Tuple = {v: k for k, v in idalabel.items()}
return config
def SCREAMING_SNAKE_CASE( __UpperCamelCase , __UpperCamelCase=False ) -> Any:
for i in range(1 , 6 ):
if F'layer_{i}.' in name:
a__ : Any = name.replace(F'layer_{i}.' , F'encoder.layer.{i - 1}.' )
if "conv_1." in name:
a__ : Optional[Any] = name.replace("conv_1." , "conv_stem." )
if ".block." in name:
a__ : Optional[int] = name.replace(".block." , "." )
if "exp_1x1" in name:
a__ : List[str] = name.replace("exp_1x1" , "expand_1x1" )
if "red_1x1" in name:
a__ : Optional[int] = name.replace("red_1x1" , "reduce_1x1" )
if ".local_rep.conv_3x3." in name:
a__ : Tuple = name.replace(".local_rep.conv_3x3." , ".conv_kxk." )
if ".local_rep.conv_1x1." in name:
a__ : int = name.replace(".local_rep.conv_1x1." , ".conv_1x1." )
if ".norm." in name:
a__ : Union[str, Any] = name.replace(".norm." , ".normalization." )
if ".conv." in name:
a__ : Optional[int] = name.replace(".conv." , ".convolution." )
if ".conv_proj." in name:
a__ : str = name.replace(".conv_proj." , ".conv_projection." )
for i in range(0 , 2 ):
for j in range(0 , 4 ):
if F'.{i}.{j}.' in name:
a__ : Tuple = name.replace(F'.{i}.{j}.' , F'.{i}.layer.{j}.' )
for i in range(2 , 6 ):
for j in range(0 , 4 ):
if F'.{i}.{j}.' in name:
a__ : Union[str, Any] = name.replace(F'.{i}.{j}.' , F'.{i}.' )
if "expand_1x1" in name:
a__ : str = name.replace("expand_1x1" , "downsampling_layer.expand_1x1" )
if "conv_3x3" in name:
a__ : Any = name.replace("conv_3x3" , "downsampling_layer.conv_3x3" )
if "reduce_1x1" in name:
a__ : List[Any] = name.replace("reduce_1x1" , "downsampling_layer.reduce_1x1" )
for i in range(2 , 5 ):
if F'.global_rep.{i}.weight' in name:
a__ : str = name.replace(F'.global_rep.{i}.weight' , ".layernorm.weight" )
if F'.global_rep.{i}.bias' in name:
a__ : Tuple = name.replace(F'.global_rep.{i}.bias' , ".layernorm.bias" )
if ".global_rep." in name:
a__ : Dict = name.replace(".global_rep." , ".transformer." )
if ".pre_norm_mha.0." in name:
a__ : Any = name.replace(".pre_norm_mha.0." , ".layernorm_before." )
if ".pre_norm_mha.1.out_proj." in name:
a__ : Optional[int] = name.replace(".pre_norm_mha.1.out_proj." , ".attention.output.dense." )
if ".pre_norm_ffn.0." in name:
a__ : int = name.replace(".pre_norm_ffn.0." , ".layernorm_after." )
if ".pre_norm_ffn.1." in name:
a__ : List[str] = name.replace(".pre_norm_ffn.1." , ".intermediate.dense." )
if ".pre_norm_ffn.4." in name:
a__ : Dict = name.replace(".pre_norm_ffn.4." , ".output.dense." )
if ".transformer." in name:
a__ : Union[str, Any] = name.replace(".transformer." , ".transformer.layer." )
if ".aspp_layer." in name:
a__ : Any = name.replace(".aspp_layer." , "." )
if ".aspp_pool." in name:
a__ : Union[str, Any] = name.replace(".aspp_pool." , "." )
if "seg_head." in name:
a__ : Any = name.replace("seg_head." , "segmentation_head." )
if "segmentation_head.classifier.classifier." in name:
a__ : Tuple = name.replace("segmentation_head.classifier.classifier." , "segmentation_head.classifier." )
if "classifier.fc." in name:
a__ : Tuple = name.replace("classifier.fc." , "classifier." )
elif (not base_model) and ("segmentation_head." not in name):
a__ : Dict = "mobilevit." + name
return name
def SCREAMING_SNAKE_CASE( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=False ) -> str:
if base_model:
a__ : int = ""
else:
a__ : Dict = "mobilevit."
for key in orig_state_dict.copy().keys():
a__ : List[Any] = orig_state_dict.pop(__UpperCamelCase )
if key[:8] == "encoder.":
a__ : Any = key[8:]
if "qkv" in key:
a__ : str = key.split("." )
a__ : Union[str, Any] = int(key_split[0][6:] ) - 1
a__ : Optional[int] = int(key_split[3] )
a__ : int = model.get_submodule(F'{model_prefix}encoder.layer.{layer_num}' )
a__ : List[Any] = layer.transformer.layer[transformer_num].attention.attention.all_head_size
a__ : Tuple = (
F'{model_prefix}encoder.layer.{layer_num}.transformer.layer.{transformer_num}.attention.attention.'
)
if "weight" in key:
a__ : Any = val[:dim, :]
a__ : int = val[dim : dim * 2, :]
a__ : Union[str, Any] = val[-dim:, :]
else:
a__ : Optional[int] = val[:dim]
a__ : Optional[Any] = val[dim : dim * 2]
a__ : int = val[-dim:]
else:
a__ : List[str] = val
return orig_state_dict
def SCREAMING_SNAKE_CASE( ) -> List[str]:
a__ : Optional[Any] = "http://images.cocodataset.org/val2017/000000039769.jpg"
a__ : Dict = Image.open(requests.get(__UpperCamelCase , stream=__UpperCamelCase ).raw )
return im
@torch.no_grad()
def SCREAMING_SNAKE_CASE( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=False ) -> Tuple:
a__ : Dict = get_mobilevit_config(__UpperCamelCase )
# load original state_dict
a__ : Tuple = torch.load(__UpperCamelCase , map_location="cpu" )
# load 🤗 model
if mobilevit_name.startswith("deeplabv3_" ):
a__ : List[Any] = MobileViTForSemanticSegmentation(__UpperCamelCase ).eval()
else:
a__ : Tuple = MobileViTForImageClassification(__UpperCamelCase ).eval()
a__ : Any = convert_state_dict(__UpperCamelCase , __UpperCamelCase )
model.load_state_dict(__UpperCamelCase )
# Check outputs on an image, prepared by MobileViTImageProcessor
a__ : Union[str, Any] = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 )
a__ : Optional[Any] = image_processor(images=prepare_img() , return_tensors="pt" )
a__ : Tuple = model(**__UpperCamelCase )
a__ : Dict = outputs.logits
if mobilevit_name.startswith("deeplabv3_" ):
assert logits.shape == (1, 21, 32, 32)
if mobilevit_name == "deeplabv3_mobilevit_s":
a__ : str = torch.tensor(
[
[[6.2_0_6_5, 6.1_2_9_2, 6.2_0_7_0], [6.1_0_7_9, 6.1_2_5_4, 6.1_7_4_7], [6.0_0_4_2, 6.1_0_7_1, 6.1_0_3_4]],
[[-6.9_2_5_3, -6.8_6_5_3, -7.0_3_9_8], [-7.3_2_1_8, -7.3_9_8_3, -7.3_6_7_0], [-7.1_9_6_1, -7.2_4_8_2, -7.1_5_6_9]],
[[-4.4_7_2_3, -4.4_3_4_8, -4.3_7_6_9], [-5.3_6_2_9, -5.4_6_3_2, -5.4_5_9_8], [-5.1_5_8_7, -5.3_4_0_2, -5.5_0_5_9]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xs":
a__ : Optional[int] = torch.tensor(
[
[[5.4_4_4_9, 5.5_7_3_3, 5.6_3_1_4], [5.1_8_1_5, 5.3_9_3_0, 5.5_9_6_3], [5.1_6_5_6, 5.4_3_3_3, 5.4_8_5_3]],
[[-9.4_4_2_3, -9.7_7_6_6, -9.6_7_1_4], [-9.1_5_8_1, -9.5_7_2_0, -9.5_5_1_9], [-9.1_0_0_6, -9.6_4_5_8, -9.5_7_0_3]],
[[-7.7_7_2_1, -7.3_7_1_6, -7.1_5_8_3], [-8.4_5_9_9, -8.0_6_2_4, -7.7_9_4_4], [-8.4_1_7_2, -7.8_3_6_6, -7.5_0_2_5]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xxs":
a__ : Tuple = torch.tensor(
[
[[6.9_8_1_1, 6.9_7_4_3, 7.3_1_2_3], [7.1_7_7_7, 7.1_9_3_1, 7.3_9_3_8], [7.5_6_3_3, 7.8_0_5_0, 7.8_9_0_1]],
[[-1_0.5_5_3_6, -1_0.2_3_3_2, -1_0.2_9_2_4], [-1_0.2_3_3_6, -9.8_6_2_4, -9.5_9_6_4], [-1_0.8_8_4_0, -1_0.8_1_5_8, -1_0.6_6_5_9]],
[[-3.4_9_3_8, -3.0_6_3_1, -2.8_6_2_0], [-3.4_2_0_5, -2.8_1_3_5, -2.6_8_7_5], [-3.4_1_7_9, -2.7_9_4_5, -2.8_7_5_0]],
] )
else:
raise ValueError(F'Unknown mobilevit_name: {mobilevit_name}' )
assert torch.allclose(logits[0, :3, :3, :3] , __UpperCamelCase , atol=1e-4 )
else:
assert logits.shape == (1, 10_00)
if mobilevit_name == "mobilevit_s":
a__ : List[Any] = torch.tensor([-0.9_8_6_6, 0.2_3_9_2, -1.1_2_4_1] )
elif mobilevit_name == "mobilevit_xs":
a__ : List[str] = torch.tensor([-2.4_7_6_1, -0.9_3_9_9, -1.9_5_8_7] )
elif mobilevit_name == "mobilevit_xxs":
a__ : Tuple = torch.tensor([-1.9_3_6_4, -1.2_3_2_7, -0.4_6_5_3] )
else:
raise ValueError(F'Unknown mobilevit_name: {mobilevit_name}' )
assert torch.allclose(logits[0, :3] , __UpperCamelCase , atol=1e-4 )
Path(__UpperCamelCase ).mkdir(exist_ok=__UpperCamelCase )
print(F'Saving model {mobilevit_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(__UpperCamelCase )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(__UpperCamelCase )
if push_to_hub:
a__ : Tuple = {
"mobilevit_s": "mobilevit-small",
"mobilevit_xs": "mobilevit-x-small",
"mobilevit_xxs": "mobilevit-xx-small",
"deeplabv3_mobilevit_s": "deeplabv3-mobilevit-small",
"deeplabv3_mobilevit_xs": "deeplabv3-mobilevit-x-small",
"deeplabv3_mobilevit_xxs": "deeplabv3-mobilevit-xx-small",
}
print("Pushing to the hub..." )
a__ : Any = model_mapping[mobilevit_name]
image_processor.push_to_hub(__UpperCamelCase , organization="apple" )
model.push_to_hub(__UpperCamelCase , organization="apple" )
if __name__ == "__main__":
lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--mobilevit_name""",
default="""mobilevit_s""",
type=str,
help=(
"""Name of the MobileViT model you'd like to convert. Should be one of 'mobilevit_s', 'mobilevit_xs',"""
""" 'mobilevit_xxs', 'deeplabv3_mobilevit_s', 'deeplabv3_mobilevit_xs', 'deeplabv3_mobilevit_xxs'."""
),
)
parser.add_argument(
"""--checkpoint_path""", required=True, type=str, help="""Path to the original state dict (.pt file)."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
lowerCamelCase = parser.parse_args()
convert_movilevit_checkpoint(
args.mobilevit_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 191 |
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A :int = ["image_processor", "tokenizer"]
A :Any = "LayoutLMv3ImageProcessor"
A :str = ("LayoutLMv3Tokenizer", "LayoutLMv3TokenizerFast")
def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase ):
"""simple docstring"""
a__ : str = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , __UpperCAmelCase , )
a__ : List[str] = kwargs.pop("feature_extractor" )
a__ : Optional[int] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(__UpperCAmelCase , __UpperCAmelCase )
def __call__( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = True , __UpperCAmelCase = False , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = 0 , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = False , __UpperCAmelCase = False , __UpperCAmelCase = False , __UpperCAmelCase = False , __UpperCAmelCase = True , __UpperCAmelCase = None , **__UpperCAmelCase , ):
"""simple docstring"""
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
"You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True." )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
"You cannot provide word labels if you initialized the image processor with apply_ocr set to True." )
# first, apply the image processor
a__ : List[str] = self.image_processor(images=__UpperCAmelCase , return_tensors=__UpperCAmelCase )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
a__ : Union[str, Any] = [text] # add batch dimension (as the image processor always adds a batch dimension)
a__ : Optional[Any] = features["words"]
a__ : Union[str, Any] = self.tokenizer(
text=text if text is not None else features["words"] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features["boxes"] , word_labels=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase , padding=__UpperCAmelCase , truncation=__UpperCAmelCase , max_length=__UpperCAmelCase , stride=__UpperCAmelCase , pad_to_multiple_of=__UpperCAmelCase , return_token_type_ids=__UpperCAmelCase , return_attention_mask=__UpperCAmelCase , return_overflowing_tokens=__UpperCAmelCase , return_special_tokens_mask=__UpperCAmelCase , return_offsets_mapping=__UpperCAmelCase , return_length=__UpperCAmelCase , verbose=__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase , )
# add pixel values
a__ : Dict = features.pop("pixel_values" )
if return_overflowing_tokens is True:
a__ : str = self.get_overflowing_images(__UpperCAmelCase , encoded_inputs["overflow_to_sample_mapping"] )
a__ : Tuple = images
return encoded_inputs
def _A ( self , __UpperCAmelCase , __UpperCAmelCase ):
"""simple docstring"""
a__ : Any = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(__UpperCAmelCase ) != len(__UpperCAmelCase ):
raise ValueError(
"Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got"
f' {len(__UpperCAmelCase )} and {len(__UpperCAmelCase )}' )
return images_with_overflow
def _A ( self , *__UpperCAmelCase , **__UpperCAmelCase ):
"""simple docstring"""
return self.tokenizer.batch_decode(*__UpperCAmelCase , **__UpperCAmelCase )
def _A ( self , *__UpperCAmelCase , **__UpperCAmelCase ):
"""simple docstring"""
return self.tokenizer.decode(*__UpperCAmelCase , **__UpperCAmelCase )
@property
def _A ( self ):
"""simple docstring"""
return ["input_ids", "bbox", "attention_mask", "pixel_values"]
@property
def _A ( self ):
"""simple docstring"""
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , __UpperCAmelCase , )
return self.image_processor_class
@property
def _A ( self ):
"""simple docstring"""
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , __UpperCAmelCase , )
return self.image_processor
| 191 | 1 |
'''simple docstring'''
import json
import os
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
'''vocab_file''': '''vocab.json''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
'''merges_file''': '''merges.txt''',
}
UpperCAmelCase_ = {
'''vocab_file''': {
'''facebook/s2t-wav2vec2-large-en-de''': (
'''https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/vocab.json'''
),
},
'''tokenizer_config_file''': {
'''facebook/s2t-wav2vec2-large-en-de''': (
'''https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/tokenizer_config.json'''
),
},
'''merges_file''': {
'''facebook/s2t-wav2vec2-large-en-de''': (
'''https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/merges.txt'''
),
},
}
UpperCAmelCase_ = '''</w>'''
UpperCAmelCase_ = '''@@ '''
def lowerCAmelCase_ ( lowercase: List[str] ) -> Any:
'''simple docstring'''
_UpperCamelCase: Optional[int] = set()
_UpperCamelCase: Dict = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_UpperCamelCase: Optional[Any] = char
return pairs
# Speech2Text2 has no max input length
UpperCAmelCase_ = {'''facebook/s2t-wav2vec2-large-en-de''': 1_0_2_4}
class __magic_name__ ( lowercase_ ):
"""simple docstring"""
lowerCAmelCase : str = VOCAB_FILES_NAMES
lowerCAmelCase : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase : Tuple = ['''input_ids''', '''attention_mask''']
def __init__( self : str , _lowercase : Dict , _lowercase : List[Any]="<s>" , _lowercase : Optional[int]="<pad>" , _lowercase : Any="</s>" , _lowercase : str="<unk>" , _lowercase : Dict=False , _lowercase : Dict=None , **_lowercase : str , ):
"""simple docstring"""
super().__init__(
unk_token=lowerCamelCase_ , bos_token=lowerCamelCase_ , eos_token=lowerCamelCase_ , pad_token=lowerCamelCase_ , do_lower_case=lowerCamelCase_ , **lowerCamelCase_ , )
_UpperCamelCase: Union[str, Any] = do_lower_case
with open(lowerCamelCase_ , encoding='''utf-8''' ) as vocab_handle:
_UpperCamelCase: Optional[int] = json.load(lowerCamelCase_ )
_UpperCamelCase: int = {v: k for k, v in self.encoder.items()}
if merges_file is None:
logger.info(f"""No merges files provided. {self.__class__.__name__} can only be used for decoding.""" )
_UpperCamelCase: Any = None
_UpperCamelCase: Optional[Any] = None
else:
with open(lowerCamelCase_ , encoding='''utf-8''' ) as merges_handle:
_UpperCamelCase: Optional[int] = merges_handle.read().split('''\n''' )[:-1]
_UpperCamelCase: int = [tuple(merge.split()[:2] ) for merge in merges]
_UpperCamelCase: str = dict(zip(lowerCamelCase_ , range(len(lowerCamelCase_ ) ) ) )
_UpperCamelCase: List[str] = {}
@property
def lowerCAmelCase ( self : int ):
"""simple docstring"""
return len(self.decoder )
def lowerCAmelCase ( self : List[str] ):
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def lowerCAmelCase ( self : List[Any] , _lowercase : int ):
"""simple docstring"""
_UpperCamelCase: str = tuple(token[:-1] ) + (token[-1] + BPE_TOKEN_MERGES,)
if token in self.cache:
return self.cache[token]
_UpperCamelCase: Dict = get_pairs(lowerCamelCase_ )
if not pairs:
return token
while True:
_UpperCamelCase: Optional[int] = min(lowerCamelCase_ , key=lambda _lowercase : self.bpe_ranks.get(lowerCamelCase_ , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
_UpperCamelCase: List[Any] = bigram
_UpperCamelCase: Any = []
_UpperCamelCase: Optional[int] = 0
while i < len(lowerCamelCase_ ):
try:
_UpperCamelCase: List[str] = word.index(lowerCamelCase_ , lowerCamelCase_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
_UpperCamelCase: Union[str, Any] = j
if word[i] == first and i < len(lowerCamelCase_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
_UpperCamelCase: List[Any] = tuple(lowerCamelCase_ )
_UpperCamelCase: Optional[int] = new_word
if len(lowerCamelCase_ ) == 1:
break
else:
_UpperCamelCase: Any = get_pairs(lowerCamelCase_ )
_UpperCamelCase: Dict = """ """.join(lowerCamelCase_ )
if word == "\n " + BPE_TOKEN_MERGES:
_UpperCamelCase: List[str] = """\n""" + BPE_TOKEN_MERGES
if word.endswith(lowerCamelCase_ ):
_UpperCamelCase: Tuple = word.replace(lowerCamelCase_ , '''''' )
_UpperCamelCase: Optional[Any] = word.replace(''' ''' , lowerCamelCase_ )
_UpperCamelCase: int = word
return word
def lowerCAmelCase ( self : Tuple , _lowercase : Any ):
"""simple docstring"""
if self.bpe_ranks is None:
raise ValueError(
'''This tokenizer was instantiated without a `merges.txt` file, so'''
''' that it can only be used for decoding, not for encoding.'''
'''Make sure to provide `merges.txt` file at instantiation to enable '''
'''encoding.''' )
if self.do_lower_case:
_UpperCamelCase: Any = text.lower()
_UpperCamelCase: Any = text.split()
_UpperCamelCase: Optional[int] = []
for token in text:
if token:
split_tokens.extend(list(self.bpe(lowerCamelCase_ ).split(''' ''' ) ) )
return split_tokens
def lowerCAmelCase ( self : int , _lowercase : str ):
"""simple docstring"""
return self.encoder.get(lowerCamelCase_ , self.encoder.get(self.unk_token ) )
def lowerCAmelCase ( self : Optional[int] , _lowercase : int ):
"""simple docstring"""
_UpperCamelCase: List[str] = self.decoder.get(lowerCamelCase_ , self.unk_token )
return result
def lowerCAmelCase ( self : Tuple , _lowercase : List[str] ):
"""simple docstring"""
_UpperCamelCase: int = """ """.join(lowerCamelCase_ )
# make sure @@ tokens are concatenated
_UpperCamelCase: Optional[int] = """""".join(string.split(lowerCamelCase_ ) )
return string
def lowerCAmelCase ( self : Tuple , _lowercase : str , _lowercase : Optional[str] = None ):
"""simple docstring"""
if not os.path.isdir(lowerCamelCase_ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
_UpperCamelCase: List[str] = os.path.join(
lowerCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
_UpperCamelCase: int = os.path.join(
lowerCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(lowerCamelCase_ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCamelCase_ , ensure_ascii=lowerCamelCase_ ) + '''\n''' )
_UpperCamelCase: Union[str, Any] = 0
if self.bpe_ranks is None:
return (vocab_file,)
with open(lowerCamelCase_ , '''w''' , encoding='''utf-8''' ) as writer:
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda _lowercase : kv[1] ):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {merges_file}: BPE merge indices are not consecutive."""
''' Please check that the tokenizer is not corrupted!''' )
_UpperCamelCase: Any = token_index
writer.write(''' '''.join(lowerCamelCase_ ) + '''\n''' )
index += 1
return (vocab_file, merges_file) | 713 | # Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
class __magic_name__ ( __a ):
"""simple docstring"""
lowerCAmelCase : Optional[Any] = '''philschmid/bart-large-cnn-samsum'''
lowerCAmelCase : Any = (
'''This is a tool that summarizes an English text. It takes an input `text` containing the text to summarize, '''
'''and returns a summary of the text.'''
)
lowerCAmelCase : Any = '''summarizer'''
lowerCAmelCase : Tuple = AutoTokenizer
lowerCAmelCase : Optional[Any] = AutoModelForSeqaSeqLM
lowerCAmelCase : Union[str, Any] = ['''text''']
lowerCAmelCase : Dict = ['''text''']
def lowerCAmelCase ( self : str , _lowercase : Union[str, Any] ):
"""simple docstring"""
return self.pre_processor(_lowercase , return_tensors='''pt''' , truncation=_lowercase )
def lowerCAmelCase ( self : List[Any] , _lowercase : Optional[Any] ):
"""simple docstring"""
return self.model.generate(**_lowercase )[0]
def lowerCAmelCase ( self : int , _lowercase : List[str] ):
"""simple docstring"""
return self.pre_processor.decode(_lowercase , skip_special_tokens=_lowercase , clean_up_tokenization_spaces=_lowercase ) | 264 | 0 |
import numpy as np
import pandas as pd
from sklearn.preprocessing import Normalizer
from sklearn.svm import SVR
from statsmodels.tsa.statespace.sarimax import SARIMAX
def a__ (__lowercase :Union[str, Any] , __lowercase :Optional[Any] , __lowercase :Any , __lowercase :Tuple , __lowercase :Any ) -> float:
_A : Tuple = np.array([[1, item, train_mtch[i]] for i, item in enumerate(__lowerCamelCase )] )
_A : Optional[int] = np.array(__lowerCamelCase )
_A : Union[str, Any] = np.dot(np.dot(np.linalg.inv(np.dot(x.transpose() , __lowerCamelCase ) ) , x.transpose() ) , __lowerCamelCase )
return abs(beta[0] + test_dt[0] * beta[1] + test_mtch[0] + beta[2] )
def a__ (__lowercase :Dict , __lowercase :Any , __lowercase :Optional[int] ) -> float:
_A : List[Any] = (1, 2, 1)
_A : str = (1, 1, 0, 7)
_A : str = SARIMAX(
__lowerCamelCase , exog=__lowerCamelCase , order=__lowerCamelCase , seasonal_order=__lowerCamelCase )
_A : Tuple = model.fit(disp=__lowerCamelCase , maxiter=600 , method='''nm''' )
_A : Any = model_fit.predict(1 , len(__lowerCamelCase ) , exog=[test_match] )
return result[0]
def a__ (__lowercase :Union[str, Any] , __lowercase :Optional[Any] , __lowercase :Any ) -> float:
_A : str = SVR(kernel='''rbf''' , C=1 , gamma=0.1 , epsilon=0.1 )
regressor.fit(__lowerCamelCase , __lowerCamelCase )
_A : str = regressor.predict(__lowerCamelCase )
return y_pred[0]
def a__ (__lowercase :str ) -> float:
train_user.sort()
_A : str = np.percentile(__lowerCamelCase , 25 )
_A : int = np.percentile(__lowerCamelCase , 75 )
_A : str = qa - qa
_A : Optional[int] = qa - (iqr * 0.1)
return low_lim
def a__ (__lowercase :Optional[int] , __lowercase :List[Any] ) -> bool:
_A : Any = 0
_A : Dict = 0
for i in list_vote:
if i > actual_result:
_A : Dict = not_safe + 1
else:
if abs(abs(__lowerCamelCase ) - abs(__lowerCamelCase ) ) <= 0.1:
safe += 1
else:
not_safe += 1
return safe > not_safe
if __name__ == "__main__":
# data_input_df = pd.read_csv("ex_data.csv", header=None)
_UpperCamelCase : Optional[int] =[[18231, 0.0, 1], [22621, 1.0, 2], [15675, 0.0, 3], [23583, 1.0, 4]]
_UpperCamelCase : Tuple =pd.DataFrame(
data_input, columns=['total_user', 'total_even', 'days']
)
_UpperCamelCase : Optional[Any] =Normalizer().fit_transform(data_input_df.values)
# split data
_UpperCamelCase : Optional[int] =normalize_df[:, 2].tolist()
_UpperCamelCase : Union[str, Any] =normalize_df[:, 0].tolist()
_UpperCamelCase : Optional[int] =normalize_df[:, 1].tolist()
# for svr (input variable = total date and total match)
_UpperCamelCase : Tuple =normalize_df[:, [1, 2]].tolist()
_UpperCamelCase : int =x[: len(x) - 1]
_UpperCamelCase : Any =x[len(x) - 1 :]
# for linear regression & sarimax
_UpperCamelCase : Any =total_date[: len(total_date) - 1]
_UpperCamelCase : List[Any] =total_user[: len(total_user) - 1]
_UpperCamelCase : List[str] =total_match[: len(total_match) - 1]
_UpperCamelCase : Union[str, Any] =total_date[len(total_date) - 1 :]
_UpperCamelCase : Tuple =total_user[len(total_user) - 1 :]
_UpperCamelCase : Any =total_match[len(total_match) - 1 :]
# voting system with forecasting
_UpperCamelCase : Any =[
linear_regression_prediction(
trn_date, trn_user, trn_match, tst_date, tst_match
),
sarimax_predictor(trn_user, trn_match, tst_match),
support_vector_regressor(x_train, x_test, trn_user),
]
# check the safety of today's data
_UpperCamelCase : Any ="" if data_safety_checker(res_vote, tst_user) else "not "
print('Today\'s data is {not_str}safe.')
| 206 |
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, PegasusConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFPegasusForConditionalGeneration, TFPegasusModel
@require_tf
class __lowerCAmelCase :
UpperCamelCase__ = PegasusConfig
UpperCamelCase__ = {}
UpperCamelCase__ = '''gelu'''
def __init__( self :int , __magic_name__ :Optional[int] , __magic_name__ :str=13 , __magic_name__ :List[Any]=7 , __magic_name__ :Optional[int]=True , __magic_name__ :Optional[int]=False , __magic_name__ :List[Any]=99 , __magic_name__ :int=32 , __magic_name__ :Tuple=2 , __magic_name__ :List[str]=4 , __magic_name__ :Dict=37 , __magic_name__ :Tuple=0.1 , __magic_name__ :Optional[Any]=0.1 , __magic_name__ :Dict=40 , __magic_name__ :Tuple=2 , __magic_name__ :Optional[Any]=1 , __magic_name__ :Dict=0 , ):
'''simple docstring'''
a = parent
a = batch_size
a = seq_length
a = is_training
a = use_labels
a = vocab_size
a = hidden_size
a = num_hidden_layers
a = num_attention_heads
a = intermediate_size
a = hidden_dropout_prob
a = attention_probs_dropout_prob
a = max_position_embeddings
a = eos_token_id
a = pad_token_id
a = bos_token_id
def lowerCamelCase__ ( self :str ):
'''simple docstring'''
a = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
a = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
a = tf.concat([input_ids, eos_tensor] , axis=1 )
a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
a = prepare_pegasus_inputs_dict(__magic_name__ , __magic_name__ , __magic_name__ )
return config, inputs_dict
def lowerCamelCase__ ( self :List[Any] , __magic_name__ :Any , __magic_name__ :str ):
'''simple docstring'''
a = TFPegasusModel(config=__magic_name__ ).get_decoder()
a = inputs_dict["""input_ids"""]
a = input_ids[:1, :]
a = inputs_dict["""attention_mask"""][:1, :]
a = inputs_dict["""head_mask"""]
a = 1
# first forward pass
a = model(__magic_name__ , attention_mask=__magic_name__ , head_mask=__magic_name__ , use_cache=__magic_name__ )
a , a = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
a = ids_tensor((self.batch_size, 3) , config.vocab_size )
a = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
a = tf.concat([input_ids, next_tokens] , axis=-1 )
a = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
a = model(__magic_name__ , attention_mask=__magic_name__ )[0]
a = model(__magic_name__ , attention_mask=__magic_name__ , past_key_values=__magic_name__ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
a = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
a = output_from_no_past[:, -3:, random_slice_idx]
a = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(__magic_name__ , __magic_name__ , rtol=1E-3 )
def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=None , ) -> Tuple:
if attention_mask is None:
a = tf.cast(tf.math.not_equal(__lowerCamelCase , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
a = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
a = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
a = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
a = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class __lowerCAmelCase ( __magic_name__ , __magic_name__ , unittest.TestCase ):
UpperCamelCase__ = (TFPegasusForConditionalGeneration, TFPegasusModel) if is_tf_available() else ()
UpperCamelCase__ = (TFPegasusForConditionalGeneration,) if is_tf_available() else ()
UpperCamelCase__ = (
{
'''conversational''': TFPegasusForConditionalGeneration,
'''feature-extraction''': TFPegasusModel,
'''summarization''': TFPegasusForConditionalGeneration,
'''text2text-generation''': TFPegasusForConditionalGeneration,
'''translation''': TFPegasusForConditionalGeneration,
}
if is_tf_available()
else {}
)
UpperCamelCase__ = True
UpperCamelCase__ = False
UpperCamelCase__ = False
def lowerCamelCase__ ( self :List[str] ):
'''simple docstring'''
a = TFPegasusModelTester(self )
a = ConfigTester(self , config_class=__magic_name__ )
def lowerCamelCase__ ( self :Any ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCamelCase__ ( self :Dict ):
'''simple docstring'''
a = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*__magic_name__ )
@require_sentencepiece
@require_tokenizers
@require_tf
class __lowerCAmelCase ( unittest.TestCase ):
UpperCamelCase__ = [
''' PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.''',
''' The London trio are up for best UK act and best album, as well as getting two nominations in the best song category."We got told like this morning \'Oh I think you\'re nominated\'", said Dappy."And I was like \'Oh yeah, which one?\' And now we\'ve got nominated for four awards. I mean, wow!"Bandmate Fazer added: "We thought it\'s best of us to come down and mingle with everyone and say hello to the cameras. And now we find we\'ve got four nominations."The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn\'t be too disappointed if they didn\'t win this time around."At the end of the day we\'re grateful to be where we are in our careers."If it don\'t happen then it don\'t happen - live to fight another day and keep on making albums and hits for the fans."Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers\' All These Things That I\'ve Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year\'s Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border."We just done Edinburgh the other day," said Dappy."We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!" ''',
]
UpperCamelCase__ = [
'''California\'s largest electricity provider has cut power to hundreds of thousands of customers in an effort to'''
''' reduce the risk of wildfires.''',
'''N-Dubz have revealed they\'re "grateful" to have been nominated for four Mobo Awards.''',
] # differs slightly from pytorch, likely due to numerical differences in linear layers
UpperCamelCase__ = '''google/pegasus-xsum'''
@cached_property
def lowerCamelCase__ ( self :Union[str, Any] ):
'''simple docstring'''
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def lowerCamelCase__ ( self :List[Any] ):
'''simple docstring'''
a = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def lowerCamelCase__ ( self :int , **__magic_name__ :int ):
'''simple docstring'''
a = self.translate_src_text(**__magic_name__ )
assert self.expected_text == generated_words
def lowerCamelCase__ ( self :Union[str, Any] , **__magic_name__ :int ):
'''simple docstring'''
a = self.tokenizer(self.src_text , **__magic_name__ , padding=__magic_name__ , return_tensors="""tf""" )
a = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=__magic_name__ , )
a = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=__magic_name__ )
return generated_words
@slow
def lowerCamelCase__ ( self :Union[str, Any] ):
'''simple docstring'''
self._assert_generated_batch_equal_expected()
| 468 | 0 |
'''simple docstring'''
import logging
import math
from functools import partial
from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union
import torch
from .tensor_utils import tensor_tree_map, tree_map
def _SCREAMING_SNAKE_CASE ( A : str ) -> List[str]:
"""simple docstring"""
__snake_case : Dict = []
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
for v in tree.values():
shapes.extend(_fetch_dims(SCREAMING_SNAKE_CASE_ ) )
elif isinstance(SCREAMING_SNAKE_CASE_ , (list, tuple) ):
for t in tree:
shapes.extend(_fetch_dims(SCREAMING_SNAKE_CASE_ ) )
elif isinstance(SCREAMING_SNAKE_CASE_ , torch.Tensor ):
shapes.append(tree.shape )
else:
raise ValueError('Not supported' )
return shapes
@torch.jit.ignore
def _SCREAMING_SNAKE_CASE ( A : Optional[int] , A : List[Any] ) -> Any:
"""simple docstring"""
__snake_case : Optional[Any] = []
for d in reversed(SCREAMING_SNAKE_CASE_ ):
idx.append(flat_idx % d )
__snake_case : List[Any] = flat_idx // d
return tuple(reversed(SCREAMING_SNAKE_CASE_ ) )
@torch.jit.ignore
def _SCREAMING_SNAKE_CASE ( A : Dict , A : Union[str, Any] , A : Optional[Any] , A : int = None , A : Optional[int] = None , ) -> int:
"""simple docstring"""
# start_edges and end_edges both indicate whether, starting from any given
# dimension, the start/end index is at the top/bottom edge of the
# corresponding tensor, modeled as a tree
def reduce_edge_list(A : Optional[int] ) -> None:
__snake_case : Any = True
for i in range(len(SCREAMING_SNAKE_CASE_ ) ):
__snake_case : Optional[int] = -1 * (i + 1)
l[reversed_idx] &= tally
__snake_case : List[Any] = l[reversed_idx]
if start_edges is None:
__snake_case : Optional[Any] = [s == 0 for s in start]
reduce_edge_list(SCREAMING_SNAKE_CASE_ )
if end_edges is None:
__snake_case : Optional[int] = [e == (d - 1) for e, d in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )]
reduce_edge_list(SCREAMING_SNAKE_CASE_ )
# Base cases. Either start/end are empty and we're done, or the final,
# one-dimensional tensor can be simply sliced
if len(SCREAMING_SNAKE_CASE_ ) == 0:
return [()]
elif len(SCREAMING_SNAKE_CASE_ ) == 1:
return [(slice(start[0] , end[0] + 1 ),)]
__snake_case : Optional[int] = []
__snake_case : List[str] = []
# Dimensions common to start and end can be selected directly
for s, e in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
if s == e:
path_list.append(slice(SCREAMING_SNAKE_CASE_ , s + 1 ) )
else:
break
__snake_case : Optional[int] = tuple(SCREAMING_SNAKE_CASE_ )
__snake_case : Dict = len(SCREAMING_SNAKE_CASE_ )
# start == end, and we're done
if divergence_idx == len(SCREAMING_SNAKE_CASE_ ):
return [path]
def upper() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
__snake_case : List[Any] = start[divergence_idx]
return tuple(
path + (slice(SCREAMING_SNAKE_CASE_ , sdi + 1 ),) + s
for s in _get_minimal_slice_set(
start[divergence_idx + 1 :] , [d - 1 for d in dims[divergence_idx + 1 :]] , dims[divergence_idx + 1 :] , start_edges=start_edges[divergence_idx + 1 :] , end_edges=[True for _ in end_edges[divergence_idx + 1 :]] , ) )
def lower() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
__snake_case : Tuple = end[divergence_idx]
return tuple(
path + (slice(SCREAMING_SNAKE_CASE_ , edi + 1 ),) + s
for s in _get_minimal_slice_set(
[0 for _ in start[divergence_idx + 1 :]] , end[divergence_idx + 1 :] , dims[divergence_idx + 1 :] , start_edges=[True for _ in start_edges[divergence_idx + 1 :]] , end_edges=end_edges[divergence_idx + 1 :] , ) )
# If both start and end are at the edges of the subtree rooted at
# divergence_idx, we can just select the whole subtree at once
if start_edges[divergence_idx] and end_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] + 1 ),) )
# If just start is at the edge, we can grab almost all of the subtree,
# treating only the ragged bottom edge as an edge case
elif start_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] ),) )
slices.extend(lower() )
# Analogous to the previous case, but the top is ragged this time
elif end_edges[divergence_idx]:
slices.extend(upper() )
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] + 1 ),) )
# If both sides of the range are ragged, we need to handle both sides
# separately. If there's contiguous meat in between them, we can index it
# in one big chunk
else:
slices.extend(upper() )
__snake_case : Optional[Any] = end[divergence_idx] - start[divergence_idx]
if middle_ground > 1:
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] ),) )
slices.extend(lower() )
return slices
@torch.jit.ignore
def _SCREAMING_SNAKE_CASE ( A : Dict , A : Tuple , A : Tuple , A : int ) -> Any:
"""simple docstring"""
__snake_case : str = t.shape[:no_batch_dims]
__snake_case : str = list(_flat_idx_to_idx(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
# _get_minimal_slice_set is inclusive
__snake_case : List[Any] = list(_flat_idx_to_idx(flat_end - 1 , SCREAMING_SNAKE_CASE_ ) )
# Get an ordered list of slices to perform
__snake_case : List[str] = _get_minimal_slice_set(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , )
__snake_case : Optional[int] = [t[s] for s in slices]
return torch.cat([s.view((-1,) + t.shape[no_batch_dims:] ) for s in sliced_tensors] )
def _SCREAMING_SNAKE_CASE ( A : Tuple , A : Any , A : Union[str, Any] , A : List[str] , A : Dict = False , A : int = None , A : Optional[Any] = False , ) -> str:
"""simple docstring"""
if not (len(SCREAMING_SNAKE_CASE_ ) > 0):
raise ValueError('Must provide at least one input' )
__snake_case : List[Any] = [shape[:no_batch_dims] for shape in _fetch_dims(SCREAMING_SNAKE_CASE_ )]
__snake_case : Any = tuple([max(SCREAMING_SNAKE_CASE_ ) for s in zip(*SCREAMING_SNAKE_CASE_ )] )
def _prep_inputs(A : Union[str, Any] ) -> torch.Tensor:
if not low_mem:
if not sum(t.shape[:no_batch_dims] ) == no_batch_dims:
__snake_case : List[Any] = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
__snake_case : Any = t.reshape(-1 , *t.shape[no_batch_dims:] )
else:
__snake_case : int = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
return t
__snake_case : Union[str, Any] = tensor_tree_map(_prep_inputs , SCREAMING_SNAKE_CASE_ )
__snake_case : Tuple = None
if _out is not None:
__snake_case : Union[str, Any] = tensor_tree_map(lambda A : t.view([-1] + list(t.shape[no_batch_dims:] ) ) , _out )
__snake_case : Dict = 1
for d in orig_batch_dims:
flat_batch_dim *= d
__snake_case : Union[str, Any] = flat_batch_dim // chunk_size + (flat_batch_dim % chunk_size != 0)
def _select_chunk(A : List[str] ) -> torch.Tensor:
return t[i : i + chunk_size] if t.shape[0] != 1 else t
__snake_case : Any = 0
__snake_case : List[str] = prepped_outputs
for _ in range(SCREAMING_SNAKE_CASE_ ):
# Chunk the input
if not low_mem:
__snake_case : Union[str, Any] = _select_chunk
else:
__snake_case : Union[str, Any] = partial(
_chunk_slice , flat_start=SCREAMING_SNAKE_CASE_ , flat_end=min(SCREAMING_SNAKE_CASE_ , i + chunk_size ) , no_batch_dims=len(SCREAMING_SNAKE_CASE_ ) , )
__snake_case : List[Any] = tensor_tree_map(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Run the layer on the chunk
__snake_case : Tuple = layer(**SCREAMING_SNAKE_CASE_ )
# Allocate space for the output
if out is None:
__snake_case : Optional[int] = tensor_tree_map(lambda A : t.new_zeros((flat_batch_dim,) + t.shape[1:] ) , SCREAMING_SNAKE_CASE_ )
# Put the chunk in its pre-allocated space
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
def assign(A : int , A : int ) -> None:
for k, v in da.items():
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
assign(SCREAMING_SNAKE_CASE_ , da[k] )
else:
if _add_into_out:
v[i : i + chunk_size] += da[k]
else:
__snake_case : List[str] = da[k]
assign(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
for xa, xa in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
if _add_into_out:
xa[i : i + chunk_size] += xa
else:
__snake_case : Union[str, Any] = xa
elif isinstance(SCREAMING_SNAKE_CASE_ , torch.Tensor ):
if _add_into_out:
out[i : i + chunk_size] += output_chunk
else:
__snake_case : Tuple = output_chunk
else:
raise ValueError('Not supported' )
i += chunk_size
__snake_case : str = tensor_tree_map(lambda A : t.view(orig_batch_dims + t.shape[1:] ) , SCREAMING_SNAKE_CASE_ )
return out
class a_ :
def __init__(self , __a = 5_1_2 , ) -> List[str]:
"""simple docstring"""
__snake_case : int = max_chunk_size
__snake_case : Union[str, Any] = None
__snake_case : Dict = None
def SCREAMING_SNAKE_CASE__ (self , __a , __a , __a) -> Optional[int]:
"""simple docstring"""
logging.info('Tuning chunk size...')
if min_chunk_size >= self.max_chunk_size:
return min_chunk_size
__snake_case : Union[str, Any] = [2**l for l in range(int(math.log(self.max_chunk_size , 2)) + 1)]
__snake_case : Optional[Any] = [c for c in candidates if c > min_chunk_size]
__snake_case : Optional[int] = [min_chunk_size] + candidates
candidates[-1] += 4
def test_chunk_size(__a) -> bool:
try:
with torch.no_grad():
fn(*__lowercase , chunk_size=__lowercase)
return True
except RuntimeError:
return False
__snake_case : Tuple = 0
__snake_case : int = len(__lowercase) - 1
while i > min_viable_chunk_size_index:
__snake_case : Dict = test_chunk_size(candidates[i])
if not viable:
__snake_case : Optional[Any] = (min_viable_chunk_size_index + i) // 2
else:
__snake_case : int = i
__snake_case : Optional[Any] = (i + len(__lowercase) - 1) // 2
return candidates[min_viable_chunk_size_index]
def SCREAMING_SNAKE_CASE__ (self , __a , __a) -> int:
"""simple docstring"""
__snake_case : List[Any] = True
for aa, aa in zip(__lowercase , __lowercase):
assert type(__lowercase) == type(__lowercase)
if isinstance(__lowercase , (list, tuple)):
consistent &= self._compare_arg_caches(__lowercase , __lowercase)
elif isinstance(__lowercase , __lowercase):
__snake_case : Any = [v for _, v in sorted(aa.items() , key=lambda __a: x[0])]
__snake_case : Optional[Any] = [v for _, v in sorted(aa.items() , key=lambda __a: x[0])]
consistent &= self._compare_arg_caches(__lowercase , __lowercase)
else:
consistent &= aa == aa
return consistent
def SCREAMING_SNAKE_CASE__ (self , __a , __a , __a , ) -> int:
"""simple docstring"""
__snake_case : List[str] = True
__snake_case : Dict = tree_map(lambda __a: a.shape if isinstance(__lowercase , torch.Tensor) else a , __lowercase , __lowercase)
if self.cached_arg_data is not None:
# If args have changed shape/value, we need to re-tune
assert len(self.cached_arg_data) == len(__lowercase)
__snake_case : List[Any] = self._compare_arg_caches(self.cached_arg_data , __lowercase)
else:
# Otherwise, we can reuse the precomputed value
__snake_case : Optional[Any] = False
if not consistent:
__snake_case : Dict = self._determine_favorable_chunk_size(
__lowercase , __lowercase , __lowercase , )
__snake_case : int = arg_data
assert self.cached_chunk_size is not None
return self.cached_chunk_size | 715 |
'''simple docstring'''
import inspect
import unittest
from typing import List
import numpy as np
from transformers import EfficientFormerConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
)
from transformers.models.efficientformer.modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_vision_available():
from PIL import Image
from transformers import EfficientFormerImageProcessor
class a_ :
def __init__(self , __a , __a = 1_3 , __a = 6_4 , __a = 2 , __a = 3 , __a = 3 , __a = True , __a = True , __a = 1_2_8 , __a=[1_6, 3_2, 6_4, 1_2_8] , __a = 7 , __a = 4 , __a = 3_7 , __a = "gelu" , __a = 0.1 , __a = 0.1 , __a = 1_0 , __a = 0.02 , __a = 2 , __a = 1 , __a = 1_2_8 , __a = [2, 2, 2, 2] , __a = 2 , __a = 2 , ) -> str:
"""simple docstring"""
__snake_case : Optional[Any] = parent
__snake_case : Optional[int] = batch_size
__snake_case : Optional[Any] = image_size
__snake_case : Optional[int] = patch_size
__snake_case : Optional[Any] = num_channels
__snake_case : Optional[Any] = is_training
__snake_case : Tuple = use_labels
__snake_case : Optional[int] = hidden_size
__snake_case : Any = num_hidden_layers
__snake_case : List[str] = num_attention_heads
__snake_case : Tuple = intermediate_size
__snake_case : List[str] = hidden_act
__snake_case : Dict = hidden_dropout_prob
__snake_case : Any = attention_probs_dropout_prob
__snake_case : Dict = type_sequence_label_size
__snake_case : str = initializer_range
__snake_case : int = encoder_stride
__snake_case : List[str] = num_attention_outputs
__snake_case : Optional[Any] = embed_dim
__snake_case : Optional[Any] = embed_dim + 1
__snake_case : List[str] = resolution
__snake_case : Optional[int] = depths
__snake_case : List[Any] = hidden_sizes
__snake_case : List[str] = dim
__snake_case : Union[str, Any] = mlp_expansion_ratio
def SCREAMING_SNAKE_CASE__ (self) -> List[Any]:
"""simple docstring"""
__snake_case : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
__snake_case : List[str] = None
if self.use_labels:
__snake_case : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size)
__snake_case : Tuple = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE__ (self) -> List[str]:
"""simple docstring"""
return EfficientFormerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__a , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , resolution=self.resolution , depths=self.depths , hidden_sizes=self.hidden_sizes , dim=self.dim , mlp_expansion_ratio=self.mlp_expansion_ratio , )
def SCREAMING_SNAKE_CASE__ (self , __a , __a , __a) -> Optional[int]:
"""simple docstring"""
__snake_case : Union[str, Any] = TFEfficientFormerModel(config=__a)
__snake_case : int = model(__a , training=__a)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def SCREAMING_SNAKE_CASE__ (self , __a , __a , __a) -> Tuple:
"""simple docstring"""
__snake_case : Dict = self.type_sequence_label_size
__snake_case : List[Any] = TFEfficientFormerForImageClassification(__a)
__snake_case : Optional[int] = model(__a , labels=__a , training=__a)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
# test greyscale images
__snake_case : List[Any] = 1
__snake_case : List[Any] = TFEfficientFormerForImageClassification(__a)
__snake_case : List[str] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
__snake_case : str = model(__a , labels=__a)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
def SCREAMING_SNAKE_CASE__ (self) -> Any:
"""simple docstring"""
__snake_case : Union[str, Any] = self.prepare_config_and_inputs()
__snake_case ,__snake_case ,__snake_case : Union[str, Any] = config_and_inputs
__snake_case : Optional[int] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class a_ ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
_snake_case = (
(
TFEfficientFormerModel,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerForImageClassification,
)
if is_tf_available()
else ()
)
_snake_case = (
{
"""feature-extraction""": TFEfficientFormerModel,
"""image-classification""": (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
),
}
if is_tf_available()
else {}
)
_snake_case = False
_snake_case = False
_snake_case = False
_snake_case = False
_snake_case = False
def SCREAMING_SNAKE_CASE__ (self) -> Dict:
"""simple docstring"""
__snake_case : Dict = TFEfficientFormerModelTester(self)
__snake_case : List[Any] = ConfigTester(
self , config_class=__a , has_text_modality=__a , hidden_size=3_7)
def SCREAMING_SNAKE_CASE__ (self) -> str:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='EfficientFormer does not use inputs_embeds')
def SCREAMING_SNAKE_CASE__ (self) -> Union[str, Any]:
"""simple docstring"""
pass
@unittest.skip(reason='EfficientFormer does not support input and output embeddings')
def SCREAMING_SNAKE_CASE__ (self) -> Optional[Any]:
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE__ (self) -> Any:
"""simple docstring"""
__snake_case ,__snake_case : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case : Optional[int] = model_class(__a)
__snake_case : Union[str, Any] = inspect.signature(model.call)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__snake_case : Optional[int] = [*signature.parameters.keys()]
__snake_case : Dict = ['pixel_values']
self.assertListEqual(arg_names[:1] , __a)
def SCREAMING_SNAKE_CASE__ (self) -> Optional[int]:
"""simple docstring"""
def check_hidden_states_output(__a , __a , __a):
__snake_case : str = model_class(__a)
__snake_case : List[Any] = model(**self._prepare_for_class(__a , __a) , training=__a)
__snake_case : str = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__snake_case : Optional[Any] = getattr(
self.model_tester , 'expected_num_hidden_layers' , self.model_tester.num_hidden_layers + 1)
self.assertEqual(len(__a) , __a)
if hasattr(self.model_tester , 'encoder_seq_length'):
__snake_case : List[Any] = self.model_tester.encoder_seq_length
if hasattr(self.model_tester , 'chunk_length') and self.model_tester.chunk_length > 1:
__snake_case : str = seq_length * self.model_tester.chunk_length
else:
__snake_case : Optional[int] = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[-1].shape[-2:]) , [seq_length, self.model_tester.hidden_size] , )
if config.is_encoder_decoder:
__snake_case : List[Any] = outputs.decoder_hidden_states
self.asseretIsInstance(__a , (list, tuple))
self.assertEqual(len(__a) , __a)
__snake_case : List[str] = getattr(self.model_tester , 'seq_length' , __a)
__snake_case : Tuple = getattr(self.model_tester , 'decoder_seq_length' , __a)
self.assertListEqual(
list(hidden_states[-1].shape[-2:]) , [decoder_seq_length, self.model_tester.hidden_size] , )
__snake_case ,__snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case : List[str] = True
check_hidden_states_output(__a , __a , __a)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__snake_case : str = True
check_hidden_states_output(__a , __a , __a)
def SCREAMING_SNAKE_CASE__ (self , __a , __a , __a=False) -> int:
"""simple docstring"""
__snake_case : Optional[int] = super()._prepare_for_class(__a , __a , return_labels=__a)
if return_labels:
if model_class.__name__ == "TFEfficientFormerForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def SCREAMING_SNAKE_CASE__ (self) -> Union[str, Any]:
"""simple docstring"""
__snake_case : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a)
@unittest.skip(reason='EfficientFormer does not implement masked image modeling yet')
def SCREAMING_SNAKE_CASE__ (self) -> Tuple:
"""simple docstring"""
__snake_case : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*__a)
def SCREAMING_SNAKE_CASE__ (self) -> int:
"""simple docstring"""
__snake_case : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__a)
@slow
def SCREAMING_SNAKE_CASE__ (self) -> List[str]:
"""simple docstring"""
for model_name in TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case : Any = TFEfficientFormerModel.from_pretrained(__a)
self.assertIsNotNone(__a)
def SCREAMING_SNAKE_CASE__ (self) -> Tuple:
"""simple docstring"""
__snake_case ,__snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case : Tuple = True
__snake_case : Optional[Any] = getattr(self.model_tester , 'seq_length' , __a)
__snake_case : List[Any] = getattr(self.model_tester , 'encoder_seq_length' , __a)
__snake_case : Tuple = getattr(self.model_tester , 'key_length' , __a)
__snake_case : Optional[Any] = getattr(self.model_tester , 'chunk_length' , __a)
if chunk_length is not None and hasattr(self.model_tester , 'num_hashes'):
__snake_case : str = encoder_seq_length * self.model_tester.num_hashes
for model_class in self.all_model_classes:
__snake_case : Optional[Any] = True
__snake_case : Dict = False
__snake_case : Optional[int] = True
__snake_case : Dict = model_class(__a)
__snake_case : Tuple = model(**self._prepare_for_class(__a , __a) , training=__a)
__snake_case : Union[str, Any] = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(__a) , self.model_tester.num_attention_outputs)
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__snake_case : Dict = True
__snake_case : str = model_class(__a)
__snake_case : str = model(**self._prepare_for_class(__a , __a) , training=__a)
__snake_case : Tuple = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(__a) , self.model_tester.num_attention_outputs)
if chunk_length is not None:
self.assertListEqual(
list(attentions[0].shape[-4:]) , [self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length] , )
else:
self.assertListEqual(
list(attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length] , )
def SCREAMING_SNAKE_CASE__ (self) -> int:
"""simple docstring"""
__snake_case ,__snake_case : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# Prepare our model
__snake_case : Tuple = model_class(__a)
# These are maximally general inputs for the model, with multiple None dimensions
# Hopefully this will catch any conditionals that fail for flexible shapes
__snake_case : Optional[Any] = {
key: tf.keras.Input(shape=val.shape[1:] , dtype=val.dtype , name=__a)
for key, val in model.input_signature.items()
if key in model.dummy_inputs
}
__snake_case : Tuple = model(__a)
self.assertTrue(outputs_dict is not None)
def _SCREAMING_SNAKE_CASE ( ) -> int:
"""simple docstring"""
__snake_case : Tuple = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class a_ ( unittest.TestCase ):
@cached_property
def SCREAMING_SNAKE_CASE__ (self) -> int:
"""simple docstring"""
return (
EfficientFormerImageProcessor.from_pretrained('snap-research/efficientformer-l1-300')
if is_vision_available()
else None
)
@slow
def SCREAMING_SNAKE_CASE__ (self) -> Union[str, Any]:
"""simple docstring"""
__snake_case : List[str] = TFEfficientFormerForImageClassification.from_pretrained('snap-research/efficientformer-l1-300')
__snake_case : Optional[int] = self.default_image_processor
__snake_case : List[Any] = prepare_img()
__snake_case : List[Any] = image_processor(images=__a , return_tensors='tf')
# forward pass
__snake_case : List[str] = model(**__a , training=__a)
# verify the logits
__snake_case : str = tf.TensorShape((1, 1_0_0_0))
self.assertEqual(outputs.logits.shape , __a)
__snake_case : Any = tf.constant([-0.0_555, 0.4_825, -0.0_852])
self.assertTrue(np.allclose(outputs.logits[0, :3] , __a , atol=1E-4))
@slow
def SCREAMING_SNAKE_CASE__ (self) -> List[Any]:
"""simple docstring"""
__snake_case : List[Any] = TFEfficientFormerForImageClassificationWithTeacher.from_pretrained(
'snap-research/efficientformer-l1-300')
__snake_case : List[Any] = self.default_image_processor
__snake_case : Union[str, Any] = prepare_img()
__snake_case : List[Any] = image_processor(images=__a , return_tensors='tf')
# forward pass
__snake_case : Optional[int] = model(**__a , training=__a)
# verify the logits
__snake_case : Optional[int] = tf.TensorShape((1, 1_0_0_0))
self.assertEqual(outputs.logits.shape , __a)
__snake_case : List[str] = tf.constant([-0.1_312, 0.4_353, -1.0_499])
self.assertTrue(np.allclose(outputs.logits[0, :3] , __a , atol=1E-4)) | 61 | 0 |
"""simple docstring"""
def _snake_case ( _snake_case : list[list[int]] , _snake_case : int , _snake_case : int , _snake_case : set ) -> int:
'''simple docstring'''
_A , _A = len(_snake_case ), len(grid[0] )
if (
min(_snake_case , _snake_case ) < 0
or row == row_length
or col == col_length
or (row, col) in visit
or grid[row][col] == 1
):
return 0
if row == row_length - 1 and col == col_length - 1:
return 1
visit.add((row, col) )
_A = 0
count += depth_first_search(_snake_case , row + 1 , _snake_case , _snake_case )
count += depth_first_search(_snake_case , row - 1 , _snake_case , _snake_case )
count += depth_first_search(_snake_case , _snake_case , col + 1 , _snake_case )
count += depth_first_search(_snake_case , _snake_case , col - 1 , _snake_case )
visit.remove((row, col) )
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 7 |
"""simple docstring"""
def _snake_case ( _snake_case : str ) -> str:
'''simple docstring'''
return " ".join(
''.join(word[::-1] ) if len(_snake_case ) > 4 else word for word in sentence.split() )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(reverse_long_words('''Hey wollef sroirraw'''))
| 7 | 1 |
from __future__ import annotations
from typing import Any
class lowercase ( A__ ):
'''simple docstring'''
pass
class lowercase :
'''simple docstring'''
def __init__( self , _snake_case ) -> None:
"""simple docstring"""
UpperCAmelCase = data
UpperCAmelCase = None
def __iter__( self ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase = self
UpperCAmelCase = []
while node:
if node in visited:
raise ContainsLoopError
visited.append(_snake_case )
yield node.data
UpperCAmelCase = node.next_node
@property
def snake_case_ ( self ) -> bool:
"""simple docstring"""
try:
list(self )
return False
except ContainsLoopError:
return True
if __name__ == "__main__":
__magic_name__ = Node(1)
__magic_name__ = Node(2)
__magic_name__ = Node(3)
__magic_name__ = Node(4)
print(root_node.has_loop) # False
__magic_name__ = root_node.next_node
print(root_node.has_loop) # True
__magic_name__ = Node(5)
__magic_name__ = Node(6)
__magic_name__ = Node(5)
__magic_name__ = Node(6)
print(root_node.has_loop) # False
__magic_name__ = Node(1)
print(root_node.has_loop) # False
| 391 |
from __future__ import annotations
from collections import deque
from collections.abc import Iterator
from dataclasses import dataclass
@dataclass
class lowercase :
'''simple docstring'''
__SCREAMING_SNAKE_CASE = 42
__SCREAMING_SNAKE_CASE = 42
class lowercase :
'''simple docstring'''
def __init__( self , _snake_case ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase = [[] for _ in range(_snake_case )]
UpperCAmelCase = size
def __getitem__( self , _snake_case ) -> Iterator[Edge]:
"""simple docstring"""
return iter(self._graph[vertex] )
@property
def snake_case_ ( self ) -> Optional[int]:
"""simple docstring"""
return self._size
def snake_case_ ( self , _snake_case , _snake_case , _snake_case ) -> Dict:
"""simple docstring"""
if weight not in (0, 1):
raise ValueError('''Edge weight must be either 0 or 1.''' )
if to_vertex < 0 or to_vertex >= self.size:
raise ValueError('''Vertex indexes must be in [0; size).''' )
self._graph[from_vertex].append(Edge(_snake_case , _snake_case ) )
def snake_case_ ( self , _snake_case , _snake_case ) -> int | None:
"""simple docstring"""
UpperCAmelCase = deque([start_vertex] )
UpperCAmelCase = [None] * self.size
UpperCAmelCase = 0
while queue:
UpperCAmelCase = queue.popleft()
UpperCAmelCase = distances[current_vertex]
if current_distance is None:
continue
for edge in self[current_vertex]:
UpperCAmelCase = current_distance + edge.weight
UpperCAmelCase = distances[edge.destination_vertex]
if (
isinstance(_snake_case , _snake_case )
and new_distance >= dest_vertex_distance
):
continue
UpperCAmelCase = new_distance
if edge.weight == 0:
queue.appendleft(edge.destination_vertex )
else:
queue.append(edge.destination_vertex )
if distances[finish_vertex] is None:
raise ValueError('''No path from start_vertex to finish_vertex.''' )
return distances[finish_vertex]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 391 | 1 |
"""simple docstring"""
def _snake_case ( snake_case__ : int ):
assert (
isinstance(snake_case__ , snake_case__ ) and number_of_steps > 0
), F'number_of_steps needs to be positive integer, your input {number_of_steps}'
if number_of_steps == 1:
return 1
A , A = 1, 1
for _ in range(number_of_steps - 1 ):
A , A = current + previous, current
return current
if __name__ == "__main__":
import doctest
doctest.testmod() | 91 |
"""simple docstring"""
from torch import nn
def _snake_case ( snake_case__ : Union[str, Any] ):
if act_fn in ["swish", "silu"]:
return nn.SiLU()
elif act_fn == "mish":
return nn.Mish()
elif act_fn == "gelu":
return nn.GELU()
else:
raise ValueError(F'Unsupported activation function: {act_fn}' ) | 91 | 1 |
from manim import *
class a ( __lowercase ):
def snake_case_ ( self ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: str = Rectangle(height=0.5 , width=0.5 )
__SCREAMING_SNAKE_CASE: Any = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
__SCREAMING_SNAKE_CASE: Tuple = [mem.copy() for i in range(6 )]
__SCREAMING_SNAKE_CASE: Optional[int] = [mem.copy() for i in range(6 )]
__SCREAMING_SNAKE_CASE: Optional[int] = VGroup(*_lowerCAmelCase ).arrange(_lowerCAmelCase , buff=0 )
__SCREAMING_SNAKE_CASE: Union[str, Any] = VGroup(*_lowerCAmelCase ).arrange(_lowerCAmelCase , buff=0 )
__SCREAMING_SNAKE_CASE: Optional[int] = VGroup(_lowerCAmelCase , _lowerCAmelCase ).arrange(_lowerCAmelCase , buff=0 )
__SCREAMING_SNAKE_CASE: List[str] = Text('''CPU''' , font_size=24 )
__SCREAMING_SNAKE_CASE: List[str] = Group(_lowerCAmelCase , _lowerCAmelCase ).arrange(_lowerCAmelCase , buff=0.5 , aligned_edge=_lowerCAmelCase )
cpu.move_to([-2.5, -0.5, 0] )
self.add(_lowerCAmelCase )
__SCREAMING_SNAKE_CASE: List[Any] = [mem.copy() for i in range(4 )]
__SCREAMING_SNAKE_CASE: Tuple = VGroup(*_lowerCAmelCase ).arrange(_lowerCAmelCase , buff=0 )
__SCREAMING_SNAKE_CASE: Optional[Any] = Text('''GPU''' , font_size=24 )
__SCREAMING_SNAKE_CASE: Union[str, Any] = Group(_lowerCAmelCase , _lowerCAmelCase ).arrange(_lowerCAmelCase , buff=0.5 , aligned_edge=_lowerCAmelCase )
gpu.move_to([-1, -1, 0] )
self.add(_lowerCAmelCase )
__SCREAMING_SNAKE_CASE: Optional[int] = [mem.copy() for i in range(6 )]
__SCREAMING_SNAKE_CASE: str = VGroup(*_lowerCAmelCase ).arrange(_lowerCAmelCase , buff=0 )
__SCREAMING_SNAKE_CASE: Any = Text('''Model''' , font_size=24 )
__SCREAMING_SNAKE_CASE: List[str] = Group(_lowerCAmelCase , _lowerCAmelCase ).arrange(_lowerCAmelCase , buff=0.5 , aligned_edge=_lowerCAmelCase )
model.move_to([3, -1.0, 0] )
self.add(_lowerCAmelCase )
__SCREAMING_SNAKE_CASE: Union[str, Any] = []
for i, rect in enumerate(_lowerCAmelCase ):
rect.set_stroke(_lowerCAmelCase )
# target = fill.copy().set_fill(YELLOW, opacity=0.7)
# target.move_to(rect)
# self.add(target)
__SCREAMING_SNAKE_CASE: int = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(_lowerCAmelCase , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=_lowerCAmelCase )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(cpu_targs[0] , direction=_lowerCAmelCase , buff=0.0 )
else:
cpu_target.next_to(cpu_targs[i - 1] , direction=_lowerCAmelCase , buff=0.0 )
self.add(_lowerCAmelCase )
cpu_targs.append(_lowerCAmelCase )
__SCREAMING_SNAKE_CASE: Dict = [mem.copy() for i in range(6 )]
__SCREAMING_SNAKE_CASE: Union[str, Any] = VGroup(*_lowerCAmelCase ).arrange(_lowerCAmelCase , buff=0 )
__SCREAMING_SNAKE_CASE: str = Text('''Loaded Checkpoint''' , font_size=24 )
__SCREAMING_SNAKE_CASE: Dict = Group(_lowerCAmelCase , _lowerCAmelCase ).arrange(_lowerCAmelCase , aligned_edge=_lowerCAmelCase , buff=0.4 )
checkpoint.move_to([3, 0.5, 0] )
__SCREAMING_SNAKE_CASE: Optional[Any] = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
__SCREAMING_SNAKE_CASE: Optional[int] = MarkupText(
f"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(_lowerCAmelCase , _lowerCAmelCase )
__SCREAMING_SNAKE_CASE: Optional[int] = MarkupText(
f"""<span fgcolor='{BLUE}'>●</span> Checkpoint""" , font_size=18 , )
blue_text.next_to(_lowerCAmelCase , DOWN * 2.4 , aligned_edge=key_text.get_left() )
__SCREAMING_SNAKE_CASE: Optional[int] = MarkupText(
f"""Next, a <i><span fgcolor=\"{BLUE}\">second</span></i> model is loaded into memory,\nwith the weights of a <span fgcolor=\"{BLUE}\">single shard</span>.""" , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(_lowerCAmelCase ) , Write(_lowerCAmelCase ) )
self.play(Write(_lowerCAmelCase , run_time=1 ) , Create(_lowerCAmelCase , run_time=1 ) )
__SCREAMING_SNAKE_CASE: List[str] = []
__SCREAMING_SNAKE_CASE: Tuple = []
for i, rect in enumerate(_lowerCAmelCase ):
__SCREAMING_SNAKE_CASE: int = fill.copy().set_fill(_lowerCAmelCase , opacity=0.7 )
target.move_to(_lowerCAmelCase )
first_animations.append(GrowFromCenter(_lowerCAmelCase , run_time=1 ) )
__SCREAMING_SNAKE_CASE: List[Any] = target.copy()
cpu_target.generate_target()
if i < 5:
cpu_target.target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.target.move_to(cpu_right_col_base[i - 5] )
second_animations.append(MoveToTarget(_lowerCAmelCase , run_time=1.5 ) )
self.play(*_lowerCAmelCase )
self.play(*_lowerCAmelCase )
self.wait()
| 146 |
import torch
from diffusers import EulerDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class a ( __lowercase ):
SCREAMING_SNAKE_CASE__ : List[Any] = (EulerDiscreteScheduler,)
SCREAMING_SNAKE_CASE__ : Any = 10
def snake_case_ ( self , **_lowerCAmelCase ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: str = {
'''num_train_timesteps''': 1100,
'''beta_start''': 0.0001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
}
config.update(**_lowerCAmelCase )
return config
def snake_case_ ( self ):
"""simple docstring"""
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=_lowerCAmelCase )
def snake_case_ ( self ):
"""simple docstring"""
for beta_start, beta_end in zip([0.00001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ):
self.check_over_configs(beta_start=_lowerCAmelCase , beta_end=_lowerCAmelCase )
def snake_case_ ( self ):
"""simple docstring"""
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=_lowerCAmelCase )
def snake_case_ ( self ):
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_lowerCAmelCase )
def snake_case_ ( self ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: str = self.scheduler_classes[0]
__SCREAMING_SNAKE_CASE: Optional[int] = self.get_scheduler_config()
__SCREAMING_SNAKE_CASE: List[str] = scheduler_class(**_lowerCAmelCase )
scheduler.set_timesteps(self.num_inference_steps )
__SCREAMING_SNAKE_CASE: Dict = torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE: Optional[Any] = self.dummy_model()
__SCREAMING_SNAKE_CASE: Dict = self.dummy_sample_deter * scheduler.init_noise_sigma
__SCREAMING_SNAKE_CASE: Optional[int] = sample.to(_lowerCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
__SCREAMING_SNAKE_CASE: Any = scheduler.scale_model_input(_lowerCAmelCase , _lowerCAmelCase )
__SCREAMING_SNAKE_CASE: Union[str, Any] = model(_lowerCAmelCase , _lowerCAmelCase )
__SCREAMING_SNAKE_CASE: Optional[Any] = scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , generator=_lowerCAmelCase )
__SCREAMING_SNAKE_CASE: int = output.prev_sample
__SCREAMING_SNAKE_CASE: Tuple = torch.sum(torch.abs(_lowerCAmelCase ) )
__SCREAMING_SNAKE_CASE: List[Any] = torch.mean(torch.abs(_lowerCAmelCase ) )
assert abs(result_sum.item() - 10.0807 ) < 1e-2
assert abs(result_mean.item() - 0.0131 ) < 1e-3
def snake_case_ ( self ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Any = self.scheduler_classes[0]
__SCREAMING_SNAKE_CASE: Union[str, Any] = self.get_scheduler_config(prediction_type='''v_prediction''' )
__SCREAMING_SNAKE_CASE: str = scheduler_class(**_lowerCAmelCase )
scheduler.set_timesteps(self.num_inference_steps )
__SCREAMING_SNAKE_CASE: str = torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE: Optional[int] = self.dummy_model()
__SCREAMING_SNAKE_CASE: Optional[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
__SCREAMING_SNAKE_CASE: Any = sample.to(_lowerCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
__SCREAMING_SNAKE_CASE: Tuple = scheduler.scale_model_input(_lowerCAmelCase , _lowerCAmelCase )
__SCREAMING_SNAKE_CASE: Tuple = model(_lowerCAmelCase , _lowerCAmelCase )
__SCREAMING_SNAKE_CASE: Union[str, Any] = scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , generator=_lowerCAmelCase )
__SCREAMING_SNAKE_CASE: Optional[int] = output.prev_sample
__SCREAMING_SNAKE_CASE: List[Any] = torch.sum(torch.abs(_lowerCAmelCase ) )
__SCREAMING_SNAKE_CASE: Any = torch.mean(torch.abs(_lowerCAmelCase ) )
assert abs(result_sum.item() - 0.0002 ) < 1e-2
assert abs(result_mean.item() - 2.26_76e-06 ) < 1e-3
def snake_case_ ( self ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Any = self.scheduler_classes[0]
__SCREAMING_SNAKE_CASE: Tuple = self.get_scheduler_config()
__SCREAMING_SNAKE_CASE: List[str] = scheduler_class(**_lowerCAmelCase )
scheduler.set_timesteps(self.num_inference_steps , device=_lowerCAmelCase )
__SCREAMING_SNAKE_CASE: List[str] = torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE: Optional[Any] = self.dummy_model()
__SCREAMING_SNAKE_CASE: int = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
__SCREAMING_SNAKE_CASE: Any = sample.to(_lowerCAmelCase )
for t in scheduler.timesteps:
__SCREAMING_SNAKE_CASE: Optional[Any] = scheduler.scale_model_input(_lowerCAmelCase , _lowerCAmelCase )
__SCREAMING_SNAKE_CASE: Tuple = model(_lowerCAmelCase , _lowerCAmelCase )
__SCREAMING_SNAKE_CASE: Optional[int] = scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , generator=_lowerCAmelCase )
__SCREAMING_SNAKE_CASE: int = output.prev_sample
__SCREAMING_SNAKE_CASE: List[str] = torch.sum(torch.abs(_lowerCAmelCase ) )
__SCREAMING_SNAKE_CASE: Dict = torch.mean(torch.abs(_lowerCAmelCase ) )
assert abs(result_sum.item() - 10.0807 ) < 1e-2
assert abs(result_mean.item() - 0.0131 ) < 1e-3
def snake_case_ ( self ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: int = self.scheduler_classes[0]
__SCREAMING_SNAKE_CASE: str = self.get_scheduler_config()
__SCREAMING_SNAKE_CASE: Union[str, Any] = scheduler_class(**_lowerCAmelCase , use_karras_sigmas=_lowerCAmelCase )
scheduler.set_timesteps(self.num_inference_steps , device=_lowerCAmelCase )
__SCREAMING_SNAKE_CASE: int = torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE: Any = self.dummy_model()
__SCREAMING_SNAKE_CASE: str = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
__SCREAMING_SNAKE_CASE: Optional[int] = sample.to(_lowerCAmelCase )
for t in scheduler.timesteps:
__SCREAMING_SNAKE_CASE: Optional[Any] = scheduler.scale_model_input(_lowerCAmelCase , _lowerCAmelCase )
__SCREAMING_SNAKE_CASE: List[Any] = model(_lowerCAmelCase , _lowerCAmelCase )
__SCREAMING_SNAKE_CASE: Any = scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , generator=_lowerCAmelCase )
__SCREAMING_SNAKE_CASE: List[str] = output.prev_sample
__SCREAMING_SNAKE_CASE: int = torch.sum(torch.abs(_lowerCAmelCase ) )
__SCREAMING_SNAKE_CASE: Optional[int] = torch.mean(torch.abs(_lowerCAmelCase ) )
assert abs(result_sum.item() - 124.52299499511719 ) < 1e-2
assert abs(result_mean.item() - 0.16213932633399963 ) < 1e-3
| 146 | 1 |
'''simple docstring'''
from copy import deepcopy
from typing import Optional, Union
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_tf_available, is_torch_available
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
class _UpperCamelCase ( _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE:Union[str, Any] = ['image_processor']
SCREAMING_SNAKE_CASE:Union[str, Any] = 'SamImageProcessor'
def __init__( self , _a ):
"""simple docstring"""
super().__init__(_a )
a__ = self.image_processor
a__ = -10
a__ = self.image_processor.size['longest_edge']
def __call__( self , _a=None , _a=None , _a=None , _a=None , _a = None , **_a , ):
"""simple docstring"""
a__ = self.image_processor(
_a , return_tensors=_a , **_a , )
# pop arguments that are not used in the foward but used nevertheless
a__ = encoding_image_processor['original_sizes']
if hasattr(_a , 'numpy' ): # Checks if Torch or TF tensor
a__ = original_sizes.numpy()
a__ , a__ , a__ = self._check_and_preprocess_points(
input_points=_a , input_labels=_a , input_boxes=_a , )
a__ = self._normalize_and_convert(
_a , _a , input_points=_a , input_labels=_a , input_boxes=_a , return_tensors=_a , )
return encoding_image_processor
def lowercase__ ( self , _a , _a , _a=None , _a=None , _a=None , _a="pt" , ):
"""simple docstring"""
if input_points is not None:
if len(_a ) != len(_a ):
a__ = [
self._normalize_coordinates(self.target_size , _a , original_sizes[0] ) for point in input_points
]
else:
a__ = [
self._normalize_coordinates(self.target_size , _a , _a )
for point, original_size in zip(_a , _a )
]
# check that all arrays have the same shape
if not all(point.shape == input_points[0].shape for point in input_points ):
if input_labels is not None:
a__ , a__ = self._pad_points_and_labels(_a , _a )
a__ = np.array(_a )
if input_labels is not None:
a__ = np.array(_a )
if input_boxes is not None:
if len(_a ) != len(_a ):
a__ = [
self._normalize_coordinates(self.target_size , _a , original_sizes[0] , is_bounding_box=_a )
for box in input_boxes
]
else:
a__ = [
self._normalize_coordinates(self.target_size , _a , _a , is_bounding_box=_a )
for box, original_size in zip(_a , _a )
]
a__ = np.array(_a )
if input_boxes is not None:
if return_tensors == "pt":
a__ = torch.from_numpy(_a )
# boxes batch size of 1 by default
a__ = input_boxes.unsqueeze(1 ) if len(input_boxes.shape ) != 3 else input_boxes
elif return_tensors == "tf":
a__ = tf.convert_to_tensor(_a )
# boxes batch size of 1 by default
a__ = tf.expand_dims(_a , 1 ) if len(input_boxes.shape ) != 3 else input_boxes
encoding_image_processor.update({'input_boxes': input_boxes} )
if input_points is not None:
if return_tensors == "pt":
a__ = torch.from_numpy(_a )
# point batch size of 1 by default
a__ = input_points.unsqueeze(1 ) if len(input_points.shape ) != 4 else input_points
elif return_tensors == "tf":
a__ = tf.convert_to_tensor(_a )
# point batch size of 1 by default
a__ = tf.expand_dims(_a , 1 ) if len(input_points.shape ) != 4 else input_points
encoding_image_processor.update({'input_points': input_points} )
if input_labels is not None:
if return_tensors == "pt":
a__ = torch.from_numpy(_a )
# point batch size of 1 by default
a__ = input_labels.unsqueeze(1 ) if len(input_labels.shape ) != 3 else input_labels
elif return_tensors == "tf":
a__ = tf.convert_to_tensor(_a )
# point batch size of 1 by default
a__ = tf.expand_dims(_a , 1 ) if len(input_labels.shape ) != 3 else input_labels
encoding_image_processor.update({'input_labels': input_labels} )
return encoding_image_processor
def lowercase__ ( self , _a , _a ):
"""simple docstring"""
a__ = max([point.shape[0] for point in input_points] )
a__ = []
for i, point in enumerate(_a ):
if point.shape[0] != expected_nb_points:
a__ = np.concatenate(
[point, np.zeros((expected_nb_points - point.shape[0], 2) ) + self.point_pad_value] , axis=0 )
a__ = np.append(input_labels[i] , [self.point_pad_value] )
processed_input_points.append(_a )
a__ = processed_input_points
return input_points, input_labels
def lowercase__ ( self , _a , _a , _a , _a=False ):
"""simple docstring"""
a__ , a__ = original_size
a__ , a__ = self.image_processor._get_preprocess_shape(_a , longest_edge=_a )
a__ = deepcopy(_a ).astype(_a )
if is_bounding_box:
a__ = coords.reshape(-1 , 2 , 2 )
a__ = coords[..., 0] * (new_w / old_w)
a__ = coords[..., 1] * (new_h / old_h)
if is_bounding_box:
a__ = coords.reshape(-1 , 4 )
return coords
def lowercase__ ( self , _a=None , _a=None , _a=None , ):
"""simple docstring"""
if input_points is not None:
if hasattr(_a , 'numpy' ): # Checks for TF or Torch tensor
a__ = input_points.numpy().tolist()
if not isinstance(_a , _a ) or not isinstance(input_points[0] , _a ):
raise ValueError('Input points must be a list of list of floating points.' )
a__ = [np.array(_a ) for input_point in input_points]
else:
a__ = None
if input_labels is not None:
if hasattr(_a , 'numpy' ):
a__ = input_labels.numpy().tolist()
if not isinstance(_a , _a ) or not isinstance(input_labels[0] , _a ):
raise ValueError('Input labels must be a list of list integers.' )
a__ = [np.array(_a ) for label in input_labels]
else:
a__ = None
if input_boxes is not None:
if hasattr(_a , 'numpy' ):
a__ = input_boxes.numpy().tolist()
if (
not isinstance(_a , _a )
or not isinstance(input_boxes[0] , _a )
or not isinstance(input_boxes[0][0] , _a )
):
raise ValueError('Input boxes must be a list of list of list of floating points.' )
a__ = [np.array(_a ).astype(np.floataa ) for box in input_boxes]
else:
a__ = None
return input_points, input_labels, input_boxes
@property
def lowercase__ ( self ):
"""simple docstring"""
a__ = self.image_processor.model_input_names
return list(dict.fromkeys(_a ) )
def lowercase__ ( self , *_a , **_a ):
"""simple docstring"""
return self.image_processor.post_process_masks(*_a , **_a )
| 394 |
'''simple docstring'''
import argparse
import logging
import os
import datasets
import tensorflow as tf
from transformers import AutoTokenizer
__A : Optional[Any] = logging.getLogger(__name__)
def lowerCAmelCase_ ( ):
a__ = argparse.ArgumentParser(
description='Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset.' )
parser.add_argument(
'--dataset_name' , type=a , default='wikitext' , help='Name of the training. Explore datasets at: hf.co/datasets.' , )
parser.add_argument(
'--dataset_config' , type=a , default='wikitext-103-raw-v1' , help='Configuration name of the dataset.' )
parser.add_argument(
'--tokenizer_name_or_path' , type=a , default='sayakpaul/unigram-tokenizer-wikitext' , help='Tokenizer identifier. Can be a local filepath or a Hub identifier.' , )
parser.add_argument(
'--shard_size' , type=a , default=1000 , help='Number of entries to go in a single shard.' , )
parser.add_argument('--split' , type=a , default='train' , choices=['train', 'test', 'validation'] )
parser.add_argument(
'--limit' , default=a , type=a , help='Limit the number of shards (used for debugging).' , )
parser.add_argument(
'--max_length' , type=a , default=512 , help='Maximum sequence length. For training on TPUs, it helps to have a maximum'
' sequence length that is a multiple of 8.' , )
parser.add_argument(
'--output_dir' , default='tf-tpu' , type=a , help='Output directory where the TFRecord shards will be saved. If the'
' path is appended with `gs://` (\'gs://tf-tpu\', for example) then the TFRecord'
' shards will be directly saved to a Google Cloud Storage bucket.' , )
a__ = parser.parse_args()
return args
def lowerCAmelCase_ ( a : int ):
def fn(a : int ):
return tokenizer(examples['text'] )
return fn
def lowerCAmelCase_ ( a : str ):
a__ = []
for i in range(len(tokenized_data['input_ids'] ) ):
a__ = {
'input_ids': tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data['input_ids'][i] ) ),
'attention_mask': tf.train.Feature(
intaa_list=tf.train.IntaaList(value=tokenized_data['attention_mask'][i] ) ),
}
a__ = tf.train.Features(feature=a )
a__ = tf.train.Example(features=a )
a__ = example.SerializeToString()
records.append(a )
return records
def lowerCAmelCase_ ( a : Any ):
a__ = datasets.load_dataset(args.dataset_name , args.dataset_config , split=args.split )
if args.limit is not None:
a__ = min(len(a ) , args.limit )
a__ = dataset.select(range(a ) )
print(f'''Limiting the dataset to {args.limit} entries.''' )
a__ = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path )
# Handle output directory creation.
# For serializing into a Google Cloud Storage Bucket, one needs to first
# create a bucket.
if "gs" not in args.output_dir:
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
a__ = os.path.join(args.output_dir , args.split )
if not os.path.exists(a ):
os.makedirs(a )
else:
a__ = os.path.join(args.output_dir , args.split )
# Tokenize the whole dataset at once.
a__ = tokenize_function(a )
a__ = dataset.map(a , batched=a , num_proc=4 , remove_columns=['text'] )
# We need to concatenate all our texts together, and then split the result
# into chunks of a fixed size, which we will call block_size. To do this, we
# will use the map method again, with the option batched=True. When we use batched=True,
# the function we pass to map() will be passed multiple inputs at once, allowing us
# to group them into more or fewer examples than we had in the input.
# This allows us to create our new fixed-length samples. The advantage of this
# method is that we don't lose a whole lot of content from the dataset compared to the
# case where we simply tokenize with a pre-defined max_length.
def group_texts(a : Optional[Any] ):
# Concatenate all texts.
a__ = {k: sum(examples[k] , [] ) for k in examples.keys()}
a__ = len(concatenated_examples[list(examples.keys() )[0]] )
# We drop the small remainder, though you could add padding instead if the model supports it
# In this, as in all things, we advise you to follow your heart 🫀
a__ = (total_length // args.max_length) * args.max_length
# Split by chunks of max_len.
a__ = {
k: [t[i : i + args.max_length] for i in range(0 , a , args.max_length )]
for k, t in concatenated_examples.items()
}
return result
a__ = dataset_tokenized.map(a , batched=a , batch_size=1000 , num_proc=4 )
a__ = 0
a__ = 0
for shard in range(0 , len(a ) , args.shard_size ):
a__ = grouped_dataset[shard : shard + args.shard_size]
a__ = len(dataset_snapshot['input_ids'] )
a__ = os.path.join(a , f'''dataset-{shard_count}-{records_containing}.tfrecord''' )
a__ = get_serialized_examples(a )
with tf.io.TFRecordWriter(a ) as out_file:
for i in range(len(a ) ):
a__ = serialized_examples[i]
out_file.write(a )
print('Wrote file {} containing {} records'.format(a , a ) )
shard_count += 1
total_records += records_containing
with open(f'''split-{args.split}-records-count.txt''' , 'w' ) as f:
print(f'''Total {args.split} records: {total_records}''' , file=a )
if __name__ == "__main__":
__A : str = parse_args()
main(args)
| 394 | 1 |
import os
from datetime import datetime as dt
from github import Github
__magic_name__ = [
'''good first issue''',
'''good second issue''',
'''good difficult issue''',
'''enhancement''',
'''new pipeline/model''',
'''new scheduler''',
'''wip''',
]
def __snake_case ( ):
"""simple docstring"""
lowercase = Github(os.environ['GITHUB_TOKEN'] )
lowercase = g.get_repo('huggingface/diffusers' )
lowercase = repo.get_issues(state='open' )
for issue in open_issues:
lowercase = sorted(issue.get_comments() , key=lambda _UpperCAmelCase : i.created_at , reverse=_UpperCAmelCase )
lowercase = comments[0] if len(_UpperCAmelCase ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Closes the issue after 7 days of inactivity since the Stalebot notification.
issue.edit(state='closed' )
elif (
"stale" in issue.get_labels()
and last_comment is not None
and last_comment.user.login != "github-actions[bot]"
):
# Opens the issue if someone other than Stalebot commented.
issue.edit(state='open' )
issue.remove_from_labels('stale' )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Post a Stalebot notification after 23 days of inactivity.
issue.create_comment(
'This issue has been automatically marked as stale because it has not had '
'recent activity. If you think this still needs to be addressed '
'please comment on this thread.\n\nPlease note that issues that do not follow the '
'[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) '
'are likely to be ignored.' )
issue.add_to_labels('stale' )
if __name__ == "__main__":
main()
| 314 |
from __future__ import annotations
from fractions import Fraction
from math import gcd, sqrt
def __snake_case ( _UpperCAmelCase ):
"""simple docstring"""
lowercase = int(number**0.5 )
return number == sq * sq
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
lowercase = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den
lowercase = x_den * y_den * z_den
lowercase = gcd(_UpperCAmelCase , _UpperCAmelCase )
top //= hcf
bottom //= hcf
return top, bottom
def __snake_case ( _UpperCAmelCase = 35 ):
"""simple docstring"""
lowercase = set()
lowercase = 42
lowercase = Fraction(0 )
lowercase = 42
for x_num in range(1 , order + 1 ):
for x_den in range(x_num + 1 , order + 1 ):
for y_num in range(1 , order + 1 ):
for y_den in range(y_num + 1 , order + 1 ):
# n=1
lowercase = x_num * y_den + x_den * y_num
lowercase = x_den * y_den
lowercase = gcd(_UpperCAmelCase , _UpperCAmelCase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
lowercase = add_three(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
unique_s.add(_UpperCAmelCase )
# n=2
lowercase = (
x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num
)
lowercase = x_den * x_den * y_den * y_den
if is_sq(_UpperCAmelCase ) and is_sq(_UpperCAmelCase ):
lowercase = int(sqrt(_UpperCAmelCase ) )
lowercase = int(sqrt(_UpperCAmelCase ) )
lowercase = gcd(_UpperCAmelCase , _UpperCAmelCase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
lowercase = add_three(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
unique_s.add(_UpperCAmelCase )
# n=-1
lowercase = x_num * y_num
lowercase = x_den * y_num + x_num * y_den
lowercase = gcd(_UpperCAmelCase , _UpperCAmelCase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
lowercase = add_three(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
unique_s.add(_UpperCAmelCase )
# n=2
lowercase = x_num * x_num * y_num * y_num
lowercase = (
x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den
)
if is_sq(_UpperCAmelCase ) and is_sq(_UpperCAmelCase ):
lowercase = int(sqrt(_UpperCAmelCase ) )
lowercase = int(sqrt(_UpperCAmelCase ) )
lowercase = gcd(_UpperCAmelCase , _UpperCAmelCase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
lowercase = add_three(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
unique_s.add(_UpperCAmelCase )
for num, den in unique_s:
total += Fraction(_UpperCAmelCase , _UpperCAmelCase )
return total.denominator + total.numerator
if __name__ == "__main__":
print(F"""{solution() = }""")
| 314 | 1 |
'''simple docstring'''
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.text import TextDatasetReader
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def __snake_case (__UpperCAmelCase , __UpperCAmelCase ):
"""simple docstring"""
assert isinstance(__UpperCAmelCase , __UpperCAmelCase )
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def __snake_case (__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase_ : int = tmp_path / '''cache'''
lowerCamelCase_ : str = {'''text''': '''string'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
lowerCamelCase_ : Tuple = TextDatasetReader(__UpperCAmelCase , cache_dir=__UpperCAmelCase , keep_in_memory=__UpperCAmelCase ).read()
_check_text_dataset(__UpperCAmelCase , __UpperCAmelCase )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''text''': '''string'''},
{'''text''': '''int32'''},
{'''text''': '''float32'''},
] , )
def __snake_case (__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase_ : Tuple = tmp_path / '''cache'''
lowerCamelCase_ : int = {'''text''': '''string'''}
lowerCamelCase_ : Optional[Any] = features.copy() if features else default_expected_features
lowerCamelCase_ : Dict = (
Features({feature: Value(__UpperCAmelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
lowerCamelCase_ : Optional[int] = TextDatasetReader(__UpperCAmelCase , features=__UpperCAmelCase , cache_dir=__UpperCAmelCase ).read()
_check_text_dataset(__UpperCAmelCase , __UpperCAmelCase )
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def __snake_case (__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase_ : Union[str, Any] = tmp_path / '''cache'''
lowerCamelCase_ : Any = {'''text''': '''string'''}
lowerCamelCase_ : Optional[Any] = TextDatasetReader(__UpperCAmelCase , cache_dir=__UpperCAmelCase , split=__UpperCAmelCase ).read()
_check_text_dataset(__UpperCAmelCase , __UpperCAmelCase )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('''path_type''' , [str, list] )
def __snake_case (__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
"""simple docstring"""
if issubclass(__UpperCAmelCase , __UpperCAmelCase ):
lowerCamelCase_ : List[Any] = text_path
elif issubclass(__UpperCAmelCase , __UpperCAmelCase ):
lowerCamelCase_ : Optional[int] = [text_path]
lowerCamelCase_ : Any = tmp_path / '''cache'''
lowerCamelCase_ : Dict = {'''text''': '''string'''}
lowerCamelCase_ : Dict = TextDatasetReader(__UpperCAmelCase , cache_dir=__UpperCAmelCase ).read()
_check_text_dataset(__UpperCAmelCase , __UpperCAmelCase )
def __snake_case (__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=("train",) ):
"""simple docstring"""
assert isinstance(__UpperCAmelCase , __UpperCAmelCase )
for split in splits:
lowerCamelCase_ : List[str] = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def __snake_case (__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase_ : Tuple = tmp_path / '''cache'''
lowerCamelCase_ : Optional[Any] = {'''text''': '''string'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
lowerCamelCase_ : Union[str, Any] = TextDatasetReader({'''train''': text_path} , cache_dir=__UpperCAmelCase , keep_in_memory=__UpperCAmelCase ).read()
_check_text_datasetdict(__UpperCAmelCase , __UpperCAmelCase )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''text''': '''string'''},
{'''text''': '''int32'''},
{'''text''': '''float32'''},
] , )
def __snake_case (__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase_ : List[str] = tmp_path / '''cache'''
# CSV file loses col_1 string dtype information: default now is "int64" instead of "string"
lowerCamelCase_ : Optional[Any] = {'''text''': '''string'''}
lowerCamelCase_ : List[Any] = features.copy() if features else default_expected_features
lowerCamelCase_ : List[str] = (
Features({feature: Value(__UpperCAmelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
lowerCamelCase_ : str = TextDatasetReader({'''train''': text_path} , features=__UpperCAmelCase , cache_dir=__UpperCAmelCase ).read()
_check_text_datasetdict(__UpperCAmelCase , __UpperCAmelCase )
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def __snake_case (__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
"""simple docstring"""
if split:
lowerCamelCase_ : Optional[Any] = {split: text_path}
else:
lowerCamelCase_ : List[str] = '''train'''
lowerCamelCase_ : int = {'''train''': text_path, '''test''': text_path}
lowerCamelCase_ : Union[str, Any] = tmp_path / '''cache'''
lowerCamelCase_ : List[str] = {'''text''': '''string'''}
lowerCamelCase_ : str = TextDatasetReader(__UpperCAmelCase , cache_dir=__UpperCAmelCase ).read()
_check_text_datasetdict(__UpperCAmelCase , __UpperCAmelCase , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
| 501 |
'''simple docstring'''
from __future__ import annotations
def __snake_case (__UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase_ : Union[str, Any] = [True] * limit
lowerCamelCase_ : Optional[int] = False
lowerCamelCase_ : Tuple = False
lowerCamelCase_ : Union[str, Any] = True
for i in range(3 , int(limit**0.5 + 1 ) , 2 ):
lowerCamelCase_ : List[str] = i * 2
while index < limit:
lowerCamelCase_ : Optional[int] = False
lowerCamelCase_ : Union[str, Any] = index + i
lowerCamelCase_ : str = [2]
for i in range(3 , __UpperCAmelCase , 2 ):
if is_prime[i]:
primes.append(__UpperCAmelCase )
return primes
def __snake_case (__UpperCAmelCase = 1000000 ):
"""simple docstring"""
lowerCamelCase_ : int = prime_sieve(__UpperCAmelCase )
lowerCamelCase_ : Optional[int] = 0
lowerCamelCase_ : Tuple = 0
for i in range(len(__UpperCAmelCase ) ):
for j in range(i + length , len(__UpperCAmelCase ) ):
lowerCamelCase_ : List[str] = sum(primes[i:j] )
if sol >= ceiling:
break
if sol in primes:
lowerCamelCase_ : Tuple = j - i
lowerCamelCase_ : List[str] = sol
return largest
if __name__ == "__main__":
print(f"""{solution() = }""")
| 501 | 1 |
"""simple docstring"""
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
_UpperCamelCase = re.compile('[^A-Za-z_0-9]')
# parameters used in DuplicationIndex
_UpperCamelCase = 10
_UpperCamelCase = 256
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE : List[str] ):
'''simple docstring'''
if len(SCREAMING_SNAKE_CASE ) < MIN_NUM_TOKENS:
return None
__lowerCamelCase : List[str] =MinHash(num_perm=SCREAMING_SNAKE_CASE )
for token in set(SCREAMING_SNAKE_CASE ):
min_hash.update(token.encode() )
return min_hash
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
return {t for t in NON_ALPHA.split(SCREAMING_SNAKE_CASE ) if len(t.strip() ) > 0}
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
def __init__( self :Tuple , *,
__lowercase :float = 0.85 , ):
__lowerCamelCase : Union[str, Any] =duplication_jaccard_threshold
__lowerCamelCase : Dict =NUM_PERM
__lowerCamelCase : Dict =MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm )
__lowerCamelCase : Optional[Any] =defaultdict(__lowercase )
def __lowercase ( self :Any , __lowercase :Tuple , __lowercase :MinHash ):
__lowerCamelCase : Optional[int] =self._index.query(__lowercase )
if code_key in self._index.keys:
print(f'Duplicate key {code_key}' )
return
self._index.insert(__lowercase , __lowercase )
if len(__lowercase ) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(__lowercase )
break
else:
self._duplicate_clusters[close_duplicates[0]].add(__lowercase )
def __lowercase ( self :Tuple ):
__lowerCamelCase : List[Any] =[]
for base, duplicates in self._duplicate_clusters.items():
__lowerCamelCase : Union[str, Any] =[base] + list(__lowercase )
# reformat the cluster to be a list of dict
__lowerCamelCase : Optional[int] =[{'''base_index''': el[0], '''repo_name''': el[1], '''path''': el[2]} for el in cluster]
duplicate_clusters.append(__lowercase )
return duplicate_clusters
def __lowercase ( self :Optional[Any] , __lowercase :Union[str, Any] ):
__lowerCamelCase : Any =self.get_duplicate_clusters()
with open(__lowercase , '''w''' ) as f:
json.dump(__lowercase , __lowercase )
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE : List[str] ):
'''simple docstring'''
__lowerCamelCase , __lowerCamelCase : Union[str, Any] =element
__lowerCamelCase : List[Any] =get_min_hash([t for t in NON_ALPHA.split(data['''content'''] ) if len(t.strip() ) > 0] )
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE : Type[Dataset] ):
'''simple docstring'''
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash , ThreadedIterator(SCREAMING_SNAKE_CASE , max_queue_size=10000 ) , chunksize=100 , ):
if data is not None:
yield data
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE : Type[Dataset] , SCREAMING_SNAKE_CASE : float ):
'''simple docstring'''
__lowerCamelCase : Any =DuplicationIndex(duplication_jaccard_threshold=SCREAMING_SNAKE_CASE )
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(SCREAMING_SNAKE_CASE ) ) , max_queue_size=100 ) ):
di.add(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
__lowerCamelCase : Optional[int] =get_tokens(SCREAMING_SNAKE_CASE )
__lowerCamelCase : Union[str, Any] =get_tokens(SCREAMING_SNAKE_CASE )
return len(tokensa & tokensa ) / len(tokensa | tokensa )
_UpperCamelCase = None
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Union[str, Any] ):
'''simple docstring'''
__lowerCamelCase : Optional[int] =[]
for elementa in cluster:
__lowerCamelCase : Optional[Any] =_shared_dataset[elementa['''base_index''']]['''content''']
for elementa in extremes:
__lowerCamelCase : List[Any] =_shared_dataset[elementa['''base_index''']]['''content''']
if jaccard_similarity(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
__lowerCamelCase : Union[str, Any] =1
extremes.append(SCREAMING_SNAKE_CASE )
return extremes
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : List[Any] ):
'''simple docstring'''
global _shared_dataset
__lowerCamelCase : str =dataset
__lowerCamelCase : str =[]
__lowerCamelCase : Dict =partial(_find_cluster_extremes_shared , jaccard_threshold=SCREAMING_SNAKE_CASE )
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , ) , total=len(SCREAMING_SNAKE_CASE ) , ):
extremes_list.append(SCREAMING_SNAKE_CASE )
return extremes_list
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE : Type[Dataset] , SCREAMING_SNAKE_CASE : float = 0.85 ):
'''simple docstring'''
__lowerCamelCase : int =make_duplicate_clusters(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__lowerCamelCase : int ={x['''base_index'''] for cluster in duplicate_clusters for x in cluster}
__lowerCamelCase : List[str] ={}
__lowerCamelCase : Union[str, Any] =find_extremes(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
for extremes in extremes_clusters:
for element in extremes:
__lowerCamelCase : Dict =element
__lowerCamelCase : str =duplicate_indices - set(extreme_dict.keys() )
__lowerCamelCase : Optional[int] =dataset.filter(lambda SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : idx not in remove_indices , with_indices=SCREAMING_SNAKE_CASE )
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
__lowerCamelCase : Dict =element['''base_index'''] in extreme_dict
if element["is_extreme"]:
__lowerCamelCase : int =extreme_dict[element['''base_index''']]['''copies''']
print(F'Original dataset size: {len(SCREAMING_SNAKE_CASE )}' )
print(F'Number of duplicate clusters: {len(SCREAMING_SNAKE_CASE )}' )
print(F'Files in duplicate cluster: {len(SCREAMING_SNAKE_CASE )}' )
print(F'Unique files in duplicate cluster: {len(SCREAMING_SNAKE_CASE )}' )
print(F'Filtered dataset size: {len(SCREAMING_SNAKE_CASE )}' )
return ds_filter, duplicate_clusters
| 363 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_flava import FlavaImageProcessor
_UpperCamelCase = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE_ ( snake_case__ ):
"""simple docstring"""
def __init__( self :List[str] , *__lowercase :int , **__lowercase :Any ):
warnings.warn(
'''The class FlavaFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use FlavaImageProcessor instead.''' , __lowercase , )
super().__init__(*__lowercase , **__lowercase )
| 363 | 1 |
'''simple docstring'''
import collections
import os
from typing import List, Optional, Tuple
from transformers.utils import is_jieba_available, requires_backends
if is_jieba_available():
import jieba
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__UpperCamelCase : Union[str, Any] = logging.get_logger(__name__)
__UpperCamelCase : Optional[int] = {"""vocab_file""": """vocab.txt"""}
__UpperCamelCase : Any = {
"""vocab_file""": {
"""openbmb/cpm-ant-10b""": """https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt""",
},
}
__UpperCamelCase : Optional[int] = {
"""openbmb/cpm-ant-10b""": 1024,
}
def __UpperCAmelCase ( SCREAMING_SNAKE_CASE__: Tuple ) -> Optional[int]:
"""simple docstring"""
__a = collections.OrderedDict()
with open(SCREAMING_SNAKE_CASE__, 'r', encoding='utf-8' ) as reader:
__a = reader.readlines()
for index, token in enumerate(SCREAMING_SNAKE_CASE__ ):
__a = token.rstrip('\n' )
__a = index
return vocab
class __SCREAMING_SNAKE_CASE ( _lowerCAmelCase ):
def __init__( self , lowerCamelCase , lowerCamelCase="<unk>" , lowerCamelCase=200 ) ->str:
'''simple docstring'''
__a = vocab
__a = unk_token
__a = max_input_chars_per_word
def __UpperCamelCase ( self , lowerCamelCase ) ->Dict:
'''simple docstring'''
__a = list(lowerCamelCase )
if len(lowerCamelCase ) > self.max_input_chars_per_word:
return [self.unk_token]
__a = 0
__a = []
while start < len(lowerCamelCase ):
__a = len(lowerCamelCase )
__a = None
while start < end:
__a = ''.join(chars[start:end] )
if substr in self.vocab:
__a = substr
break
end -= 1
if cur_substr is None:
sub_tokens.append(self.unk_token )
start += 1
else:
sub_tokens.append(lowerCamelCase )
__a = end
return sub_tokens
class __SCREAMING_SNAKE_CASE ( _lowerCAmelCase ):
__a =VOCAB_FILES_NAMES
__a =PRETRAINED_VOCAB_FILES_MAP
__a =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__a =["input_ids", "attention_mask"]
__a =False
def __init__( self , lowerCamelCase , lowerCamelCase="<d>" , lowerCamelCase="</d>" , lowerCamelCase="<s>" , lowerCamelCase="</s>" , lowerCamelCase="<pad>" , lowerCamelCase="<unk>" , lowerCamelCase="</n>" , lowerCamelCase="</_>" , lowerCamelCase="left" , **lowerCamelCase , ) ->Tuple:
'''simple docstring'''
requires_backends(self , ['jieba'] )
super().__init__(
bod_token=lowerCamelCase , eod_token=lowerCamelCase , bos_token=lowerCamelCase , eos_token=lowerCamelCase , pad_token=lowerCamelCase , unk_token=lowerCamelCase , line_token=lowerCamelCase , space_token=lowerCamelCase , padding_side=lowerCamelCase , **lowerCamelCase , )
__a = bod_token
__a = eod_token
__a = load_vocab(lowerCamelCase )
__a = self.encoder[space_token]
__a = self.encoder[line_token]
del self.encoder[space_token]
del self.encoder[line_token]
__a = collections.OrderedDict(sorted(self.encoder.items() , key=lambda lowerCamelCase : x[1] ) )
__a = {v: k for k, v in self.encoder.items()}
__a = WordpieceTokenizer(vocab=self.encoder , unk_token=self.unk_token )
@property
def __UpperCamelCase ( self ) ->List[str]:
'''simple docstring'''
return self.encoder[self.bod_token]
@property
def __UpperCamelCase ( self ) ->Any:
'''simple docstring'''
return self.encoder[self.eod_token]
@property
def __UpperCamelCase ( self ) ->Tuple:
'''simple docstring'''
return self.encoder["\n"]
@property
def __UpperCamelCase ( self ) ->int:
'''simple docstring'''
return len(self.encoder )
def __UpperCamelCase ( self ) ->Union[str, Any]:
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def __UpperCamelCase ( self , lowerCamelCase ) ->int:
'''simple docstring'''
__a = []
for x in jieba.cut(lowerCamelCase , cut_all=lowerCamelCase ):
output_tokens.extend(self.wordpiece_tokenizer.tokenize(lowerCamelCase ) )
return output_tokens
def __UpperCamelCase ( self , lowerCamelCase , **lowerCamelCase ) ->List[str]:
'''simple docstring'''
__a = [i for i in token_ids if i >= 0]
__a = [
x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id
]
return super()._decode(lowerCamelCase , **lowerCamelCase )
def __UpperCamelCase ( self , lowerCamelCase ) ->Any:
'''simple docstring'''
return token in self.encoder
def __UpperCamelCase ( self , lowerCamelCase ) ->str:
'''simple docstring'''
return "".join(lowerCamelCase )
def __UpperCamelCase ( self , lowerCamelCase ) ->Optional[Any]:
'''simple docstring'''
return self.encoder.get(lowerCamelCase , self.encoder.get(self.unk_token ) )
def __UpperCamelCase ( self , lowerCamelCase ) ->List[Any]:
'''simple docstring'''
return self.decoder.get(lowerCamelCase , self.unk_token )
def __UpperCamelCase ( self , lowerCamelCase , lowerCamelCase = None ) ->Tuple[str]:
'''simple docstring'''
if os.path.isdir(lowerCamelCase ):
__a = os.path.join(
lowerCamelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
else:
__a = (filename_prefix + '-' if filename_prefix else '') + save_directory
__a = 0
if " " in self.encoder:
__a = self.encoder[' ']
del self.encoder[" "]
if "\n" in self.encoder:
__a = self.encoder['\n']
del self.encoder["\n"]
__a = collections.OrderedDict(sorted(self.encoder.items() , key=lambda lowerCamelCase : x[1] ) )
with open(lowerCamelCase , 'w' , encoding='utf-8' ) as writer:
for token, token_index in self.encoder.items():
if index != token_index:
logger.warning(
F"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."""
' Please check that the vocabulary is not corrupted!' )
__a = token_index
writer.write(token + '\n' )
index += 1
return (vocab_file,)
def __UpperCamelCase ( self , lowerCamelCase , lowerCamelCase = None ) ->List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.bos_token_id] + token_ids_a
return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a
def __UpperCamelCase ( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = False ) ->List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase , token_ids_a=lowerCamelCase , already_has_special_tokens=lowerCamelCase )
if token_ids_a is not None:
return [1] + ([0] * len(lowerCamelCase )) + [1] + ([0] * len(lowerCamelCase ))
return [1] + ([0] * len(lowerCamelCase )) | 448 |
'''simple docstring'''
import os
def __UpperCAmelCase ( SCREAMING_SNAKE_CASE__: str = "matrix.txt" ) -> int:
"""simple docstring"""
with open(os.path.join(os.path.dirname(SCREAMING_SNAKE_CASE__ ), SCREAMING_SNAKE_CASE__ ) ) as in_file:
__a = in_file.read()
__a = [[int(SCREAMING_SNAKE_CASE__ ) for cell in row.split(',' )] for row in data.strip().splitlines()]
__a = [[0 for cell in row] for row in grid]
__a = len(grid[0] )
__a = [[0 for i in range(SCREAMING_SNAKE_CASE__ )] for j in range(SCREAMING_SNAKE_CASE__ )]
__a = grid[0][0]
for i in range(1, SCREAMING_SNAKE_CASE__ ):
__a = grid[0][i] + dp[0][i - 1]
for i in range(1, SCREAMING_SNAKE_CASE__ ):
__a = grid[i][0] + dp[i - 1][0]
for i in range(1, SCREAMING_SNAKE_CASE__ ):
for j in range(1, SCREAMING_SNAKE_CASE__ ):
__a = grid[i][j] + min(dp[i - 1][j], dp[i][j - 1] )
return dp[-1][-1]
if __name__ == "__main__":
print(f"""{solution() = }""") | 448 | 1 |
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_multi_gpu
from accelerate.utils import patch_environment
class snake_case_ ( unittest.TestCase):
def __lowercase ( self ) -> Optional[Any]:
lowerCamelCase : Dict =inspect.getfile(accelerate.test_utils )
lowerCamelCase : List[Any] =os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_script.py'''] )
lowerCamelCase : Any =os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_distributed_data_loop.py'''] )
lowerCamelCase : str =os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_ops.py'''] )
@require_multi_gpu
def __lowercase ( self ) -> Any:
print(F"Found {torch.cuda.device_count()} devices." )
lowerCamelCase : int =['''torchrun''', F"--nproc_per_node={torch.cuda.device_count()}", self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__lowercase , env=os.environ.copy() )
@require_multi_gpu
def __lowercase ( self ) -> Union[str, Any]:
print(F"Found {torch.cuda.device_count()} devices." )
lowerCamelCase : Dict =['''torchrun''', F"--nproc_per_node={torch.cuda.device_count()}", self.operation_file_path]
print(F"Command: {cmd}" )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__lowercase , env=os.environ.copy() )
@require_multi_gpu
def __lowercase ( self ) -> List[str]:
lowerCamelCase : List[Any] =['''torchrun''', F"--nproc_per_node={torch.cuda.device_count()}", inspect.getfile(self.__class__ )]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__lowercase , env=os.environ.copy() )
@require_multi_gpu
def __lowercase ( self ) -> Optional[int]:
print(F"Found {torch.cuda.device_count()} devices, using 2 devices only" )
lowerCamelCase : Any =['''torchrun''', F"--nproc_per_node={torch.cuda.device_count()}", self.data_loop_file_path]
with patch_environment(omp_num_threads=1 , cuda_visible_devices='''0,1''' ):
execute_subprocess_async(__lowercase , env=os.environ.copy() )
if __name__ == "__main__":
snake_case_ = Accelerator()
snake_case_ = (accelerator.state.process_index + 2, 1_0)
snake_case_ = torch.randint(0, 1_0, shape).to(accelerator.device)
snake_case_ = ''''''
snake_case_ = accelerator.pad_across_processes(tensor)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0):
error_msg += "Padding was not done with the right value (0)."
snake_case_ = accelerator.pad_across_processes(tensor, pad_first=True)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
snake_case_ = accelerator.state.num_processes - accelerator.state.process_index - 1
if not torch.equal(tensora[index:], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[:index] == 0):
error_msg += "Padding was not done with the right value (0)."
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 711 |
import math
import random
from typing import Any
from .hill_climbing import SearchProblem
def A__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = math.inf , SCREAMING_SNAKE_CASE_ = -math.inf , SCREAMING_SNAKE_CASE_ = math.inf , SCREAMING_SNAKE_CASE_ = -math.inf , SCREAMING_SNAKE_CASE_ = False , SCREAMING_SNAKE_CASE_ = 1_0_0 , SCREAMING_SNAKE_CASE_ = 0.0_1 , SCREAMING_SNAKE_CASE_ = 1 , ) -> Any:
lowerCamelCase : Union[str, Any] =False
lowerCamelCase : int =search_prob
lowerCamelCase : int =start_temperate
lowerCamelCase : Any =[]
lowerCamelCase : Any =0
lowerCamelCase : Union[str, Any] =None
while not search_end:
lowerCamelCase : Dict =current_state.score()
if best_state is None or current_score > best_state.score():
lowerCamelCase : Tuple =current_state
scores.append(SCREAMING_SNAKE_CASE_ )
iterations += 1
lowerCamelCase : Union[str, Any] =None
lowerCamelCase : int =current_state.get_neighbors()
while (
next_state is None and neighbors
): # till we do not find a neighbor that we can move to
lowerCamelCase : Union[str, Any] =random.randint(0 , len(SCREAMING_SNAKE_CASE_ ) - 1 ) # picking a random neighbor
lowerCamelCase : List[str] =neighbors.pop(SCREAMING_SNAKE_CASE_ )
lowerCamelCase : Any =picked_neighbor.score() - current_score
if (
picked_neighbor.x > max_x
or picked_neighbor.x < min_x
or picked_neighbor.y > max_y
or picked_neighbor.y < min_y
):
continue # neighbor outside our bounds
if not find_max:
lowerCamelCase : Dict =change * -1 # in case we are finding minimum
if change > 0: # improves the solution
lowerCamelCase : Optional[int] =picked_neighbor
else:
lowerCamelCase : Any =(math.e) ** (
change / current_temp
) # probability generation function
if random.random() < probability: # random number within probability
lowerCamelCase : Union[str, Any] =picked_neighbor
lowerCamelCase : Union[str, Any] =current_temp - (current_temp * rate_of_decrease)
if current_temp < threshold_temp or next_state is None:
# temperature below threshold, or could not find a suitable neighbor
lowerCamelCase : Optional[int] =True
else:
lowerCamelCase : List[str] =next_state
if visualization:
from matplotlib import pyplot as plt
plt.plot(range(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
plt.xlabel('''Iterations''' )
plt.ylabel('''Function values''' )
plt.show()
return best_state
if __name__ == "__main__":
def A__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[str]:
return (x**2) + (y**2)
# starting the problem with initial coordinates (12, 47)
snake_case_ = SearchProblem(x=1_2, y=4_7, step_size=1, function_to_optimize=test_fa)
snake_case_ = simulated_annealing(
prob, find_max=False, max_x=1_0_0, min_x=5, max_y=5_0, min_y=-5, visualization=True
)
print(
'''The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 '''
F"""and 50 > y > - 5 found via hill climbing: {local_min.score()}"""
)
# starting the problem with initial coordinates (12, 47)
snake_case_ = SearchProblem(x=1_2, y=4_7, step_size=1, function_to_optimize=test_fa)
snake_case_ = simulated_annealing(
prob, find_max=True, max_x=1_0_0, min_x=5, max_y=5_0, min_y=-5, visualization=True
)
print(
'''The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 '''
F"""and 50 > y > - 5 found via hill climbing: {local_min.score()}"""
)
def A__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Tuple:
return (3 * x**2) - (6 * y)
snake_case_ = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
snake_case_ = simulated_annealing(prob, find_max=False, visualization=True)
print(
'''The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: '''
F"""{local_min.score()}"""
)
snake_case_ = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
snake_case_ = simulated_annealing(prob, find_max=True, visualization=True)
print(
'''The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: '''
F"""{local_min.score()}"""
)
| 262 | 0 |
"""simple docstring"""
import argparse
import shutil
from pathlib import Path
from tqdm import tqdm
from transformers import AutoTokenizer
def __A ( a_ :Any , a_ :Optional[int] , a_ :str , a_ :Optional[Any]=10_24) -> Dict:
__a , __a : int = [], []
__a : Optional[Any] = list(zip(a_ , a_))
__a , __a : int = sorted_examples[0]
def is_too_big(a_ :List[str]):
return tok(a_ , return_tensors='''pt''').input_ids.shape[1] > max_tokens
for src, tgt in tqdm(sorted_examples[1:]):
__a : int = new_src + ''' ''' + src
__a : Optional[int] = new_tgt + ''' ''' + tgt
if is_too_big(a_) or is_too_big(a_): # cant fit, finalize example
finished_src.append(a_)
finished_tgt.append(a_)
__a , __a : List[str] = src, tgt
else: # can fit, keep adding
__a , __a : Dict = cand_src, cand_tgt
# cleanup
if new_src:
assert new_tgt
finished_src.append(a_)
finished_tgt.append(a_)
return finished_src, finished_tgt
def __A ( a_ :Any , a_ :Path , a_ :int , a_ :int) -> Optional[int]:
__a : Tuple = Path(a_)
save_path.mkdir(exist_ok=a_)
for split in ["train"]:
__a , __a : Optional[int] = data_dir / F"""{split}.source""", data_dir / F"""{split}.target"""
__a : Optional[Any] = [x.rstrip() for x in Path(a_).open().readlines()]
__a : Union[str, Any] = [x.rstrip() for x in Path(a_).open().readlines()]
__a , __a : Optional[int] = pack_examples(a_ , a_ , a_ , a_)
print(F"""packed {split} split from {len(a_)} examples -> {len(a_)}.""")
Path(save_path / F"""{split}.source""").open('''w''').write('''\n'''.join(a_))
Path(save_path / F"""{split}.target""").open('''w''').write('''\n'''.join(a_))
for split in ["val", "test"]:
__a , __a : List[str] = data_dir / F"""{split}.source""", data_dir / F"""{split}.target"""
shutil.copyfile(a_ , save_path / F"""{split}.source""")
shutil.copyfile(a_ , save_path / F"""{split}.target""")
def __A ( ) -> Dict:
__a : List[str] = argparse.ArgumentParser()
parser.add_argument('''--tok_name''' , type=a_ , help='''like facebook/bart-large-cnn,t5-base, etc.''')
parser.add_argument('''--max_seq_len''' , type=a_ , default=1_28)
parser.add_argument('''--data_dir''' , type=a_)
parser.add_argument('''--save_path''' , type=a_)
__a : Tuple = parser.parse_args()
__a : Union[str, Any] = AutoTokenizer.from_pretrained(args.tok_name)
return pack_data_dir(a_ , Path(args.data_dir) , args.max_seq_len , args.save_path)
if __name__ == "__main__":
packer_cli() | 52 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase : Dict = logging.get_logger(__name__)
UpperCAmelCase : Tuple = "▁"
UpperCAmelCase : List[Any] = {"vocab_file": "sentencepiece.bpe.model"}
UpperCAmelCase : int = {
"vocab_file": {
"facebook/mbart-large-en-ro": (
"https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model"
),
"facebook/mbart-large-cc25": (
"https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model"
),
}
}
UpperCAmelCase : Dict = {
"facebook/mbart-large-en-ro": 1024,
"facebook/mbart-large-cc25": 1024,
}
# fmt: off
UpperCAmelCase : List[Any] = ["ar_AR", "cs_CZ", "de_DE", "en_XX", "es_XX", "et_EE", "fi_FI", "fr_XX", "gu_IN", "hi_IN", "it_IT", "ja_XX", "kk_KZ", "ko_KR", "lt_LT", "lv_LV", "my_MM", "ne_NP", "nl_XX", "ro_RO", "ru_RU", "si_LK", "tr_TR", "vi_VN", "zh_CN"]
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ):
lowercase__ = VOCAB_FILES_NAMES
lowercase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ = PRETRAINED_VOCAB_FILES_MAP
lowercase__ = ["input_ids", "attention_mask"]
lowercase__ = []
lowercase__ = []
def __init__( self : Dict , lowerCAmelCase_ : Any , lowerCAmelCase_ : Optional[int]="<s>" , lowerCAmelCase_ : Union[str, Any]="</s>" , lowerCAmelCase_ : List[str]="</s>" , lowerCAmelCase_ : int="<s>" , lowerCAmelCase_ : int="<unk>" , lowerCAmelCase_ : Union[str, Any]="<pad>" , lowerCAmelCase_ : Dict="<mask>" , lowerCAmelCase_ : Any=None , lowerCAmelCase_ : List[Any]=None , lowerCAmelCase_ : int=None , lowerCAmelCase_ : Optional[Dict[str, Any]] = None , lowerCAmelCase_ : List[Any]=None , **lowerCAmelCase_ : Optional[int] , ):
"""simple docstring"""
lowercase_ = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_) if isinstance(lowerCAmelCase_ , lowerCAmelCase_) else mask_token
lowercase_ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=lowerCAmelCase_ , eos_token=lowerCAmelCase_ , unk_token=lowerCAmelCase_ , sep_token=lowerCAmelCase_ , cls_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , mask_token=lowerCAmelCase_ , tokenizer_file=lowerCAmelCase_ , src_lang=lowerCAmelCase_ , tgt_lang=lowerCAmelCase_ , additional_special_tokens=lowerCAmelCase_ , sp_model_kwargs=self.sp_model_kwargs , **lowerCAmelCase_ , )
lowercase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(str(lowerCAmelCase_))
lowercase_ = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
lowercase_ = {"""<s>""": 0, """<pad>""": 1, """</s>""": 2, """<unk>""": 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
lowercase_ = 1
lowercase_ = len(self.sp_model)
lowercase_ = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(lowerCAmelCase_)
}
lowercase_ = {v: k for k, v in self.lang_code_to_id.items()}
lowercase_ = len(self.sp_model) + len(self.lang_code_to_id) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id)
lowercase_ = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
lowercase_ = list(self.lang_code_to_id.keys())
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
self._additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in self._additional_special_tokens])
lowercase_ = src_lang if src_lang is not None else """en_XX"""
lowercase_ = self.lang_code_to_id[self._src_lang]
lowercase_ = tgt_lang
self.set_src_lang_special_tokens(self._src_lang)
def __getstate__( self : Dict):
"""simple docstring"""
lowercase_ = self.__dict__.copy()
lowercase_ = None
lowercase_ = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : List[str] , lowerCAmelCase_ : int):
"""simple docstring"""
lowercase_ = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs"""):
lowercase_ = {}
lowercase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.LoadFromSerializedProto(self.sp_model_proto)
@property
def _UpperCAmelCase ( self : str):
"""simple docstring"""
return len(self.sp_model) + len(self.lang_code_to_id) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def _UpperCAmelCase ( self : Tuple):
"""simple docstring"""
return self._src_lang
@src_lang.setter
def _UpperCAmelCase ( self : Dict , lowerCAmelCase_ : str):
"""simple docstring"""
lowercase_ = new_src_lang
self.set_src_lang_special_tokens(self._src_lang)
def _UpperCAmelCase ( self : Union[str, Any] , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : Optional[List[int]] = None , lowerCAmelCase_ : bool = False):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase_ , token_ids_a=lowerCAmelCase_ , already_has_special_tokens=lowerCAmelCase_)
lowercase_ = [1] * len(self.prefix_tokens)
lowercase_ = [1] * len(self.suffix_tokens)
if token_ids_a is None:
return prefix_ones + ([0] * len(lowerCAmelCase_)) + suffix_ones
return prefix_ones + ([0] * len(lowerCAmelCase_)) + ([0] * len(lowerCAmelCase_)) + suffix_ones
def _UpperCAmelCase ( self : List[Any] , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : Optional[List[int]] = None):
"""simple docstring"""
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def _UpperCAmelCase ( self : str , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : Optional[List[int]] = None):
"""simple docstring"""
lowercase_ = [self.sep_token_id]
lowercase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
def _UpperCAmelCase ( self : int , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[str] , lowerCAmelCase_ : Optional[str] , **lowerCAmelCase_ : Any):
"""simple docstring"""
if src_lang is None or tgt_lang is None:
raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""")
lowercase_ = src_lang
lowercase_ = self(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , **lowerCAmelCase_)
lowercase_ = self.convert_tokens_to_ids(lowerCAmelCase_)
lowercase_ = tgt_lang_id
return inputs
def _UpperCAmelCase ( self : Tuple):
"""simple docstring"""
lowercase_ = {self.convert_ids_to_tokens(lowerCAmelCase_): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def _UpperCAmelCase ( self : str , lowerCAmelCase_ : str):
"""simple docstring"""
return self.sp_model.encode(lowerCAmelCase_ , out_type=lowerCAmelCase_)
def _UpperCAmelCase ( self : str , lowerCAmelCase_ : Optional[int]):
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
lowercase_ = self.sp_model.PieceToId(lowerCAmelCase_)
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def _UpperCAmelCase ( self : Optional[int] , lowerCAmelCase_ : Optional[Any]):
"""simple docstring"""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset)
def _UpperCAmelCase ( self : str , lowerCAmelCase_ : Tuple):
"""simple docstring"""
lowercase_ = """""".join(lowerCAmelCase_).replace(lowerCAmelCase_ , """ """).strip()
return out_string
def _UpperCAmelCase ( self : Dict , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[str] = None):
"""simple docstring"""
if not os.path.isdir(lowerCAmelCase_):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''')
return
lowercase_ = os.path.join(
lowerCAmelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""])
if os.path.abspath(self.vocab_file) != os.path.abspath(lowerCAmelCase_) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file , lowerCAmelCase_)
elif not os.path.isfile(self.vocab_file):
with open(lowerCAmelCase_ , """wb""") as fi:
lowercase_ = self.sp_model.serialized_model_proto()
fi.write(lowerCAmelCase_)
return (out_vocab_file,)
def _UpperCAmelCase ( self : Optional[int] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : str = "en_XX" , lowerCAmelCase_ : Optional[List[str]] = None , lowerCAmelCase_ : str = "ro_RO" , **lowerCAmelCase_ : Union[str, Any] , ):
"""simple docstring"""
lowercase_ = src_lang
lowercase_ = tgt_lang
return super().prepare_seqaseq_batch(lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_)
def _UpperCAmelCase ( self : Union[str, Any]):
"""simple docstring"""
return self.set_src_lang_special_tokens(self.src_lang)
def _UpperCAmelCase ( self : Optional[int]):
"""simple docstring"""
return self.set_tgt_lang_special_tokens(self.tgt_lang)
def _UpperCAmelCase ( self : Optional[int] , lowerCAmelCase_ : Union[str, Any]):
"""simple docstring"""
lowercase_ = self.lang_code_to_id[src_lang]
lowercase_ = []
lowercase_ = [self.eos_token_id, self.cur_lang_code]
def _UpperCAmelCase ( self : Optional[int] , lowerCAmelCase_ : str):
"""simple docstring"""
lowercase_ = self.lang_code_to_id[lang]
lowercase_ = []
lowercase_ = [self.eos_token_id, self.cur_lang_code]
| 567 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCamelCase__ = {
'''configuration_biogpt''': ['''BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BioGptConfig'''],
'''tokenization_biogpt''': ['''BioGptTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
'''BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BioGptForCausalLM''',
'''BioGptForTokenClassification''',
'''BioGptForSequenceClassification''',
'''BioGptModel''',
'''BioGptPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig
from .tokenization_biogpt import BioGptTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_biogpt import (
BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST,
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 408 |
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
lowerCamelCase__ = '''\
@inproceedings{wang2019glue,
title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},
author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},
note={In the Proceedings of ICLR.},
year={2019}
}
'''
lowerCamelCase__ = '''\
GLUE, the General Language Understanding Evaluation benchmark
(https://gluebenchmark.com/) is a collection of resources for training,
evaluating, and analyzing natural language understanding systems.
'''
lowerCamelCase__ = '''
Compute GLUE evaluation metric associated to each GLUE dataset.
Args:
predictions: list of predictions to score.
Each translation should be tokenized into a list of tokens.
references: list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
Returns: depending on the GLUE subset, one or several of:
"accuracy": Accuracy
"f1": F1 score
"pearson": Pearson Correlation
"spearmanr": Spearman Correlation
"matthews_correlation": Matthew Correlation
Examples:
>>> glue_metric = datasets.load_metric(\'glue\', \'sst2\') # \'sst2\' or any of ["mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0}
>>> glue_metric = datasets.load_metric(\'glue\', \'mrpc\') # \'mrpc\' or \'qqp\'
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0, \'f1\': 1.0}
>>> glue_metric = datasets.load_metric(\'glue\', \'stsb\')
>>> references = [0., 1., 2., 3., 4., 5.]
>>> predictions = [0., 1., 2., 3., 4., 5.]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print({"pearson": round(results["pearson"], 2), "spearmanr": round(results["spearmanr"], 2)})
{\'pearson\': 1.0, \'spearmanr\': 1.0}
>>> glue_metric = datasets.load_metric(\'glue\', \'cola\')
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'matthews_correlation\': 1.0}
'''
def lowercase_ ( SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : List[str] ):
"""simple docstring"""
return float((preds == labels).mean() )
def lowercase_ ( SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
snake_case__ : Tuple =simple_accuracy(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
snake_case__ : int =float(fa_score(y_true=SCREAMING_SNAKE_CASE , y_pred=SCREAMING_SNAKE_CASE ) )
return {
"accuracy": acc,
"f1": fa,
}
def lowercase_ ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
snake_case__ : Dict =float(pearsonr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )[0] )
snake_case__ : Dict =float(spearmanr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )[0] )
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowerCAmelCase ( datasets.Metric ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
if self.config_name not in [
"sst2",
"mnli",
"mnli_mismatched",
"mnli_matched",
"cola",
"stsb",
"mrpc",
"qqp",
"qnli",
"rte",
"wnli",
"hans",
]:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["sst2", "mnli", "mnli_mismatched", "mnli_matched", '''
'''"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]''' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''int64''' if self.config_name != '''stsb''' else '''float32''' ),
'''references''': datasets.Value('''int64''' if self.config_name != '''stsb''' else '''float32''' ),
} ) , codebase_urls=[] , reference_urls=[] , format='''numpy''' , )
def UpperCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Optional[Any]:
"""simple docstring"""
if self.config_name == "cola":
return {"matthews_correlation": matthews_corrcoef(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )}
elif self.config_name == "stsb":
return pearson_and_spearman(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
elif self.config_name in ["mrpc", "qqp"]:
return acc_and_fa(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
elif self.config_name in ["sst2", "mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]:
return {"accuracy": simple_accuracy(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )}
else:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["sst2", "mnli", "mnli_mismatched", "mnli_matched", '''
'''"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]''' )
| 408 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ = logging.get_logger(__name__)
A_ = {
"facebook/s2t-wav2vec2-large-en-de": (
"https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/config.json"
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech2text2
}
class _snake_case ( snake_case__ ):
_A : List[str] = "speech_to_text_2"
_A : Dict = ["past_key_values"]
_A : Dict = {"num_attention_heads": "decoder_attention_heads", "hidden_size": "d_model"}
def __init__( self : str ,SCREAMING_SNAKE_CASE__ : Dict=10_000 ,SCREAMING_SNAKE_CASE__ : Tuple=6 ,SCREAMING_SNAKE_CASE__ : int=2_048 ,SCREAMING_SNAKE_CASE__ : Tuple=4 ,SCREAMING_SNAKE_CASE__ : Dict=0.0 ,SCREAMING_SNAKE_CASE__ : Tuple=True ,SCREAMING_SNAKE_CASE__ : Optional[int]="relu" ,SCREAMING_SNAKE_CASE__ : int=256 ,SCREAMING_SNAKE_CASE__ : List[str]=0.1 ,SCREAMING_SNAKE_CASE__ : Optional[int]=0.0 ,SCREAMING_SNAKE_CASE__ : str=0.0 ,SCREAMING_SNAKE_CASE__ : Dict=0.02 ,SCREAMING_SNAKE_CASE__ : Tuple=2 ,SCREAMING_SNAKE_CASE__ : str=True ,SCREAMING_SNAKE_CASE__ : List[str]=1 ,SCREAMING_SNAKE_CASE__ : str=0 ,SCREAMING_SNAKE_CASE__ : List[str]=2 ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=1_024 ,**SCREAMING_SNAKE_CASE__ : Union[str, Any] ,):
SCREAMING_SNAKE_CASE:List[str] = vocab_size
SCREAMING_SNAKE_CASE:int = d_model
SCREAMING_SNAKE_CASE:Optional[int] = decoder_ffn_dim
SCREAMING_SNAKE_CASE:Optional[Any] = decoder_layers
SCREAMING_SNAKE_CASE:Tuple = decoder_attention_heads
SCREAMING_SNAKE_CASE:Union[str, Any] = dropout
SCREAMING_SNAKE_CASE:Dict = attention_dropout
SCREAMING_SNAKE_CASE:Dict = activation_dropout
SCREAMING_SNAKE_CASE:Dict = activation_function
SCREAMING_SNAKE_CASE:Tuple = init_std
SCREAMING_SNAKE_CASE:Optional[Any] = decoder_layerdrop
SCREAMING_SNAKE_CASE:Dict = use_cache
SCREAMING_SNAKE_CASE:Dict = decoder_layers
SCREAMING_SNAKE_CASE:int = scale_embedding # scale factor will be sqrt(d_model) if True
SCREAMING_SNAKE_CASE:List[str] = max_target_positions
super().__init__(
pad_token_id=__UpperCAmelCase ,bos_token_id=__UpperCAmelCase ,eos_token_id=__UpperCAmelCase ,decoder_start_token_id=__UpperCAmelCase ,**__UpperCAmelCase ,)
| 143 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__A = {'''configuration_yolos''': ['''YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''YolosConfig''', '''YolosOnnxConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ['''YolosFeatureExtractor''']
__A = ['''YolosImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
'''YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''YolosForObjectDetection''',
'''YolosModel''',
'''YolosPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_yolos import YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP, YolosConfig, YolosOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_yolos import YolosFeatureExtractor
from .image_processing_yolos import YolosImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_yolos import (
YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST,
YolosForObjectDetection,
YolosModel,
YolosPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 593 | 0 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_electra import ElectraTokenizer
A : Optional[int] = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
A : Optional[int] = {
"vocab_file": {
"google/electra-small-generator": (
"https://huggingface.co/google/electra-small-generator/resolve/main/vocab.txt"
),
"google/electra-base-generator": "https://huggingface.co/google/electra-base-generator/resolve/main/vocab.txt",
"google/electra-large-generator": (
"https://huggingface.co/google/electra-large-generator/resolve/main/vocab.txt"
),
"google/electra-small-discriminator": (
"https://huggingface.co/google/electra-small-discriminator/resolve/main/vocab.txt"
),
"google/electra-base-discriminator": (
"https://huggingface.co/google/electra-base-discriminator/resolve/main/vocab.txt"
),
"google/electra-large-discriminator": (
"https://huggingface.co/google/electra-large-discriminator/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"google/electra-small-generator": (
"https://huggingface.co/google/electra-small-generator/resolve/main/tokenizer.json"
),
"google/electra-base-generator": (
"https://huggingface.co/google/electra-base-generator/resolve/main/tokenizer.json"
),
"google/electra-large-generator": (
"https://huggingface.co/google/electra-large-generator/resolve/main/tokenizer.json"
),
"google/electra-small-discriminator": (
"https://huggingface.co/google/electra-small-discriminator/resolve/main/tokenizer.json"
),
"google/electra-base-discriminator": (
"https://huggingface.co/google/electra-base-discriminator/resolve/main/tokenizer.json"
),
"google/electra-large-discriminator": (
"https://huggingface.co/google/electra-large-discriminator/resolve/main/tokenizer.json"
),
},
}
A : Tuple = {
"google/electra-small-generator": 5_12,
"google/electra-base-generator": 5_12,
"google/electra-large-generator": 5_12,
"google/electra-small-discriminator": 5_12,
"google/electra-base-discriminator": 5_12,
"google/electra-large-discriminator": 5_12,
}
A : Tuple = {
"google/electra-small-generator": {"do_lower_case": True},
"google/electra-base-generator": {"do_lower_case": True},
"google/electra-large-generator": {"do_lower_case": True},
"google/electra-small-discriminator": {"do_lower_case": True},
"google/electra-base-discriminator": {"do_lower_case": True},
"google/electra-large-discriminator": {"do_lower_case": True},
}
class lowerCamelCase (SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
lowerCamelCase__ = VOCAB_FILES_NAMES
lowerCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ = PRETRAINED_INIT_CONFIGURATION
lowerCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ = ElectraTokenizer
def __init__( self : List[str] , __magic_name__ : int=None , __magic_name__ : int=None , __magic_name__ : str=True , __magic_name__ : int="[UNK]" , __magic_name__ : Any="[SEP]" , __magic_name__ : str="[PAD]" , __magic_name__ : Optional[Any]="[CLS]" , __magic_name__ : Optional[int]="[MASK]" , __magic_name__ : Dict=True , __magic_name__ : Union[str, Any]=None , **__magic_name__ : Optional[Any] , ) -> List[str]:
super().__init__(
__magic_name__ , tokenizer_file=__magic_name__ , do_lower_case=__magic_name__ , unk_token=__magic_name__ , sep_token=__magic_name__ , pad_token=__magic_name__ , cls_token=__magic_name__ , mask_token=__magic_name__ , tokenize_chinese_chars=__magic_name__ , strip_accents=__magic_name__ , **__magic_name__ , )
SCREAMING_SNAKE_CASE_ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , __magic_name__ ) != do_lower_case
or normalizer_state.get("strip_accents" , __magic_name__ ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , __magic_name__ ) != tokenize_chinese_chars
):
SCREAMING_SNAKE_CASE_ = getattr(__magic_name__ , normalizer_state.pop("type" ) )
SCREAMING_SNAKE_CASE_ = do_lower_case
SCREAMING_SNAKE_CASE_ = strip_accents
SCREAMING_SNAKE_CASE_ = tokenize_chinese_chars
SCREAMING_SNAKE_CASE_ = normalizer_class(**__magic_name__ )
SCREAMING_SNAKE_CASE_ = do_lower_case
def __A ( self : Union[str, Any] , __magic_name__ : List[str] , __magic_name__ : int=None ) -> Dict:
SCREAMING_SNAKE_CASE_ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __A ( self : Optional[Any] , __magic_name__ : List[int] , __magic_name__ : Optional[List[int]] = None ) -> List[int]:
SCREAMING_SNAKE_CASE_ = [self.sep_token_id]
SCREAMING_SNAKE_CASE_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __A ( self : Dict , __magic_name__ : str , __magic_name__ : Optional[str] = None ) -> Tuple[str]:
SCREAMING_SNAKE_CASE_ = self._tokenizer.model.save(__magic_name__ , name=__magic_name__ )
return tuple(__magic_name__ )
| 356 | import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SwiftFormerConfig,
SwiftFormerForImageClassification,
ViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
A : Optional[Any] = logging.get_logger(__name__)
A : List[Any] = torch.device("cpu")
def a__ ( ):
SCREAMING_SNAKE_CASE_ = "http://images.cocodataset.org/val2017/000000039769.jpg"
SCREAMING_SNAKE_CASE_ = Image.open(requests.get(__UpperCamelCase , stream=__UpperCamelCase ).raw )
return im
def a__ ( __UpperCamelCase ):
if swiftformer_name == "swiftformer_xs":
return torch.tensor([-2.1_703E00, 2.1_107E00, -2.0_811E00, 8.8_685E-01, 2.4_360E-01] )
elif swiftformer_name == "swiftformer_s":
return torch.tensor([3.9_636E-01, 2.3_478E-01, -1.6_963E00, -1.7_381E00, -8.6_337E-01] )
elif swiftformer_name == "swiftformer_l1":
return torch.tensor([-4.2_768E-01, -4.7_429E-01, -1.0_897E00, -1.0_248E00, 3.5_523E-02] )
elif swiftformer_name == "swiftformer_l3":
return torch.tensor([-2.5_330E-01, 2.4_211E-01, -6.0_185E-01, -8.2_789E-01, -6.0_446E-02] )
def a__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = dct.pop(__UpperCamelCase )
SCREAMING_SNAKE_CASE_ = val
def a__ ( __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = []
for k in state_dict.keys():
SCREAMING_SNAKE_CASE_ = k
if ".pwconv" in k:
SCREAMING_SNAKE_CASE_ = k_new.replace(".pwconv" , ".point_wise_conv" )
if ".dwconv" in k:
SCREAMING_SNAKE_CASE_ = k_new.replace(".dwconv" , ".depth_wise_conv" )
if ".Proj." in k:
SCREAMING_SNAKE_CASE_ = k_new.replace(".Proj." , ".proj." )
if "patch_embed" in k_new:
SCREAMING_SNAKE_CASE_ = k_new.replace("patch_embed" , "swiftformer.patch_embed.patch_embedding" )
if "network" in k_new:
SCREAMING_SNAKE_CASE_ = k_new.split("." )
if ls[2].isdigit():
SCREAMING_SNAKE_CASE_ = "swiftformer.encoder.network." + ls[1] + ".blocks." + ls[2] + "." + ".".join(ls[3:] )
else:
SCREAMING_SNAKE_CASE_ = k_new.replace("network" , "swiftformer.encoder.network" )
rename_keys.append((k, k_new) )
return rename_keys
@torch.no_grad()
def a__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = SwiftFormerConfig()
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
SCREAMING_SNAKE_CASE_ = 1_0_0_0
SCREAMING_SNAKE_CASE_ = "huggingface/label-files"
SCREAMING_SNAKE_CASE_ = "imagenet-1k-id2label.json"
SCREAMING_SNAKE_CASE_ = json.load(open(hf_hub_download(__UpperCamelCase , __UpperCamelCase , repo_type="dataset" ) , "r" ) )
SCREAMING_SNAKE_CASE_ = {int(__UpperCamelCase ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE_ = idalabel
SCREAMING_SNAKE_CASE_ = {v: k for k, v in idalabel.items()}
# size of the architecture
if swiftformer_name == "swiftformer_xs":
SCREAMING_SNAKE_CASE_ = [3, 3, 6, 4]
SCREAMING_SNAKE_CASE_ = [4_8, 5_6, 1_1_2, 2_2_0]
elif swiftformer_name == "swiftformer_s":
SCREAMING_SNAKE_CASE_ = [3, 3, 9, 6]
SCREAMING_SNAKE_CASE_ = [4_8, 6_4, 1_6_8, 2_2_4]
elif swiftformer_name == "swiftformer_l1":
SCREAMING_SNAKE_CASE_ = [4, 3, 1_0, 5]
SCREAMING_SNAKE_CASE_ = [4_8, 9_6, 1_9_2, 3_8_4]
elif swiftformer_name == "swiftformer_l3":
SCREAMING_SNAKE_CASE_ = [4, 4, 1_2, 6]
SCREAMING_SNAKE_CASE_ = [6_4, 1_2_8, 3_2_0, 5_1_2]
# load state_dict of original model, remove and rename some keys
if original_ckpt:
if original_ckpt.startswith("https" ):
SCREAMING_SNAKE_CASE_ = torch.hub.load_state_dict_from_url(__UpperCamelCase , map_location="cpu" , check_hash=__UpperCamelCase )
else:
SCREAMING_SNAKE_CASE_ = torch.load(__UpperCamelCase , map_location="cpu" )
SCREAMING_SNAKE_CASE_ = checkpoint
SCREAMING_SNAKE_CASE_ = create_rename_keys(__UpperCamelCase )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# load HuggingFace model
SCREAMING_SNAKE_CASE_ = SwiftFormerForImageClassification(__UpperCamelCase ).eval()
hf_model.load_state_dict(__UpperCamelCase )
# prepare test inputs
SCREAMING_SNAKE_CASE_ = prepare_img()
SCREAMING_SNAKE_CASE_ = ViTImageProcessor.from_pretrained("preprocessor_config" )
SCREAMING_SNAKE_CASE_ = processor(images=__UpperCamelCase , return_tensors="pt" )
# compare outputs from both models
SCREAMING_SNAKE_CASE_ = get_expected_output(__UpperCamelCase )
SCREAMING_SNAKE_CASE_ = hf_model(inputs["pixel_values"] ).logits
assert hf_logits.shape == torch.Size([1, 1_0_0_0] )
assert torch.allclose(hf_logits[0, 0:5] , __UpperCamelCase , atol=1E-3 )
Path(__UpperCamelCase ).mkdir(exist_ok=__UpperCamelCase )
print(F'''Saving model {swiftformer_name} to {pytorch_dump_folder_path}''' )
hf_model.save_pretrained(__UpperCamelCase )
if __name__ == "__main__":
A : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--swiftformer_name",
default="swiftformer_xs",
choices=["swiftformer_xs", "swiftformer_s", "swiftformer_l1", "swiftformer_l3"],
type=str,
help="Name of the SwiftFormer model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default="./converted_outputs/",
type=str,
help="Path to the output PyTorch model directory.",
)
parser.add_argument("--original_ckpt", default=None, type=str, help="Path to the original model checkpoint.")
A : List[Any] = parser.parse_args()
convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
| 356 | 1 |
"""simple docstring"""
def lowerCamelCase_( _lowerCamelCase = 10 , _lowerCamelCase = 22 ) -> int:
'''simple docstring'''
_lowerCamelCase : Dict = range(1 , _lowerCamelCase )
_lowerCamelCase : Optional[int] = range(1 , _lowerCamelCase )
return sum(
1 for power in powers for base in bases if len(str(base**power ) ) == power )
if __name__ == "__main__":
print(f'''{solution(10, 22) = }''') | 46 |
import copy
import os
from typing import TYPE_CHECKING, List, Union
if TYPE_CHECKING:
pass
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = {
'''kakaobrain/align-base''': '''https://huggingface.co/kakaobrain/align-base/resolve/main/config.json''',
}
class A ( A_ ):
UpperCamelCase_ : Tuple ='''align_text_model'''
def __init__(self , lowerCAmelCase=3_0_5_2_2 , lowerCAmelCase=7_6_8 , lowerCAmelCase=1_2 , lowerCAmelCase=1_2 , lowerCAmelCase=3_0_7_2 , lowerCAmelCase="gelu" , lowerCAmelCase=0.1 , lowerCAmelCase=0.1 , lowerCAmelCase=5_1_2 , lowerCAmelCase=2 , lowerCAmelCase=0.02 , lowerCAmelCase=1E-12 , lowerCAmelCase=0 , lowerCAmelCase="absolute" , lowerCAmelCase=True , **lowerCAmelCase , ):
super().__init__(**lowerCAmelCase )
__lowercase= vocab_size
__lowercase= hidden_size
__lowercase= num_hidden_layers
__lowercase= num_attention_heads
__lowercase= hidden_act
__lowercase= intermediate_size
__lowercase= hidden_dropout_prob
__lowercase= attention_probs_dropout_prob
__lowercase= max_position_embeddings
__lowercase= type_vocab_size
__lowercase= initializer_range
__lowercase= layer_norm_eps
__lowercase= position_embedding_type
__lowercase= use_cache
__lowercase= pad_token_id
@classmethod
def _A (cls , lowerCAmelCase , **lowerCAmelCase ):
cls._set_token_in_kwargs(lowerCAmelCase )
__lowercase, __lowercase= cls.get_config_dict(lowerCAmelCase , **lowerCAmelCase )
# get the text config dict if we are loading from AlignConfig
if config_dict.get('model_type' ) == "align":
__lowercase= config_dict['text_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(lowerCAmelCase , **lowerCAmelCase )
class A ( A_ ):
UpperCamelCase_ : Optional[int] ='''align_vision_model'''
def __init__(self , lowerCAmelCase = 3 , lowerCAmelCase = 6_0_0 , lowerCAmelCase = 2.0 , lowerCAmelCase = 3.1 , lowerCAmelCase = 8 , lowerCAmelCase = [3, 3, 5, 3, 5, 5, 3] , lowerCAmelCase = [3_2, 1_6, 2_4, 4_0, 8_0, 1_1_2, 1_9_2] , lowerCAmelCase = [1_6, 2_4, 4_0, 8_0, 1_1_2, 1_9_2, 3_2_0] , lowerCAmelCase = [] , lowerCAmelCase = [1, 2, 2, 2, 1, 2, 1] , lowerCAmelCase = [1, 2, 2, 3, 3, 4, 1] , lowerCAmelCase = [1, 6, 6, 6, 6, 6, 6] , lowerCAmelCase = 0.25 , lowerCAmelCase = "swish" , lowerCAmelCase = 2_5_6_0 , lowerCAmelCase = "mean" , lowerCAmelCase = 0.02 , lowerCAmelCase = 0.0_01 , lowerCAmelCase = 0.99 , lowerCAmelCase = 0.2 , **lowerCAmelCase , ):
super().__init__(**lowerCAmelCase )
__lowercase= num_channels
__lowercase= image_size
__lowercase= width_coefficient
__lowercase= depth_coefficient
__lowercase= depth_divisor
__lowercase= kernel_sizes
__lowercase= in_channels
__lowercase= out_channels
__lowercase= depthwise_padding
__lowercase= strides
__lowercase= num_block_repeats
__lowercase= expand_ratios
__lowercase= squeeze_expansion_ratio
__lowercase= hidden_act
__lowercase= hidden_dim
__lowercase= pooling_type
__lowercase= initializer_range
__lowercase= batch_norm_eps
__lowercase= batch_norm_momentum
__lowercase= drop_connect_rate
__lowercase= sum(lowerCAmelCase ) * 4
@classmethod
def _A (cls , lowerCAmelCase , **lowerCAmelCase ):
cls._set_token_in_kwargs(lowerCAmelCase )
__lowercase, __lowercase= cls.get_config_dict(lowerCAmelCase , **lowerCAmelCase )
# get the vision config dict if we are loading from AlignConfig
if config_dict.get('model_type' ) == "align":
__lowercase= config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(lowerCAmelCase , **lowerCAmelCase )
class A ( A_ ):
UpperCamelCase_ : Union[str, Any] ='''align'''
UpperCamelCase_ : List[Any] =True
def __init__(self , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=6_4_0 , lowerCAmelCase=1.0 , lowerCAmelCase=0.02 , **lowerCAmelCase , ):
super().__init__(**lowerCAmelCase )
if text_config is None:
__lowercase= {}
logger.info('text_config is None. Initializing the AlignTextConfig with default values.' )
if vision_config is None:
__lowercase= {}
logger.info('vision_config is None. Initializing the AlignVisionConfig with default values.' )
__lowercase= AlignTextConfig(**lowerCAmelCase )
__lowercase= AlignVisionConfig(**lowerCAmelCase )
__lowercase= projection_dim
__lowercase= temperature_init_value
__lowercase= initializer_range
@classmethod
def _A (cls , lowerCAmelCase , lowerCAmelCase , **lowerCAmelCase ):
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **lowerCAmelCase )
def _A (self ):
__lowercase= copy.deepcopy(self.__dict__ )
__lowercase= self.text_config.to_dict()
__lowercase= self.vision_config.to_dict()
__lowercase= self.__class__.model_type
return output
| 230 | 0 |
"""simple docstring"""
from math import sqrt
def _UpperCamelCase ( UpperCamelCase ) -> bool:
"""simple docstring"""
assert isinstance(__A , __A ) and (
number >= 0
), "'number' must been an int and positive"
__UpperCAmelCase : int = True
# 0 and 1 are none primes.
if number <= 1:
__UpperCAmelCase : Optional[Any] = False
for divisor in range(2 , int(round(sqrt(__A ) ) ) + 1 ):
# if 'number' divisible by 'divisor' then sets 'status'
# of false and break up the loop.
if number % divisor == 0:
__UpperCAmelCase : Optional[int] = False
break
# precondition
assert isinstance(__A , __A ), "'status' must been from type bool"
return status
def _UpperCamelCase ( UpperCamelCase ) -> int:
"""simple docstring"""
assert isinstance(__A , __A ) and (n > 2), "'N' must been an int and > 2"
# beginList: contains all natural numbers from 2 up to N
__UpperCAmelCase : Union[str, Any] = list(range(2 , n + 1 ) )
__UpperCAmelCase : str = [] # this list will be returns.
# actual sieve of erathostenes
for i in range(len(__A ) ):
for j in range(i + 1 , len(__A ) ):
if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0):
__UpperCAmelCase : str = 0
# filters actual prime numbers.
__UpperCAmelCase : List[Any] = [x for x in begin_list if x != 0]
# precondition
assert isinstance(__A , __A ), "'ans' must been from type list"
return ans
def _UpperCamelCase ( UpperCamelCase ) -> Dict:
"""simple docstring"""
assert isinstance(__A , __A ) and (n > 2), "'N' must been an int and > 2"
__UpperCAmelCase : Optional[int] = []
# iterates over all numbers between 2 up to N+1
# if a number is prime then appends to list 'ans'
for number in range(2 , n + 1 ):
if is_prime(__A ):
ans.append(__A )
# precondition
assert isinstance(__A , __A ), "'ans' must been from type list"
return ans
def _UpperCamelCase ( UpperCamelCase ) -> Optional[Any]:
"""simple docstring"""
assert isinstance(__A , __A ) and number >= 0, "'number' must been an int and >= 0"
__UpperCAmelCase : Dict = [] # this list will be returns of the function.
# potential prime number factors.
__UpperCAmelCase : Dict = 2
__UpperCAmelCase : Tuple = number
if number == 0 or number == 1:
ans.append(__A )
# if 'number' not prime then builds the prime factorization of 'number'
elif not is_prime(__A ):
while quotient != 1:
if is_prime(__A ) and (quotient % factor == 0):
ans.append(__A )
quotient /= factor
else:
factor += 1
else:
ans.append(__A )
# precondition
assert isinstance(__A , __A ), "'ans' must been from type list"
return ans
def _UpperCamelCase ( UpperCamelCase ) -> Optional[int]:
"""simple docstring"""
assert isinstance(__A , __A ) and (
number >= 0
), "'number' bust been an int and >= 0"
__UpperCAmelCase : Union[str, Any] = 0
# prime factorization of 'number'
__UpperCAmelCase : Optional[Any] = prime_factorization(__A )
__UpperCAmelCase : str = max(__A )
# precondition
assert isinstance(__A , __A ), "'ans' must been from type int"
return ans
def _UpperCamelCase ( UpperCamelCase ) -> str:
"""simple docstring"""
assert isinstance(__A , __A ) and (
number >= 0
), "'number' bust been an int and >= 0"
__UpperCAmelCase : List[Any] = 0
# prime factorization of 'number'
__UpperCAmelCase : Tuple = prime_factorization(__A )
__UpperCAmelCase : Tuple = min(__A )
# precondition
assert isinstance(__A , __A ), "'ans' must been from type int"
return ans
def _UpperCamelCase ( UpperCamelCase ) -> Any:
"""simple docstring"""
assert isinstance(__A , __A ), "'number' must been an int"
assert isinstance(number % 2 == 0 , __A ), "compare bust been from type bool"
return number % 2 == 0
def _UpperCamelCase ( UpperCamelCase ) -> str:
"""simple docstring"""
assert isinstance(__A , __A ), "'number' must been an int"
assert isinstance(number % 2 != 0 , __A ), "compare bust been from type bool"
return number % 2 != 0
def _UpperCamelCase ( UpperCamelCase ) -> List[str]:
"""simple docstring"""
assert (
isinstance(__A , __A ) and (number > 2) and is_even(__A )
), "'number' must been an int, even and > 2"
__UpperCAmelCase : List[Any] = [] # this list will returned
# creates a list of prime numbers between 2 up to 'number'
__UpperCAmelCase : Any = get_prime_numbers(__A )
__UpperCAmelCase : Tuple = len(__A )
# run variable for while-loops.
__UpperCAmelCase : str = 0
__UpperCAmelCase : Dict = None
# exit variable. for break up the loops
__UpperCAmelCase : Any = True
while i < len_pn and loop:
__UpperCAmelCase : int = i + 1
while j < len_pn and loop:
if prime_numbers[i] + prime_numbers[j] == number:
__UpperCAmelCase : Any = False
ans.append(prime_numbers[i] )
ans.append(prime_numbers[j] )
j += 1
i += 1
# precondition
assert (
isinstance(__A , __A )
and (len(__A ) == 2)
and (ans[0] + ans[1] == number)
and is_prime(ans[0] )
and is_prime(ans[1] )
), "'ans' must contains two primes. And sum of elements must been eq 'number'"
return ans
def _UpperCamelCase ( UpperCamelCase , UpperCamelCase ) -> str:
"""simple docstring"""
assert (
isinstance(__A , __A )
and isinstance(__A , __A )
and (numbera >= 0)
and (numbera >= 0)
), "'number1' and 'number2' must been positive integer."
__UpperCAmelCase : List[str] = 0
while numbera != 0:
__UpperCAmelCase : int = numbera % numbera
__UpperCAmelCase : int = numbera
__UpperCAmelCase : Union[str, Any] = rest
# precondition
assert isinstance(__A , __A ) and (
numbera >= 0
), "'number' must been from type int and positive"
return numbera
def _UpperCamelCase ( UpperCamelCase , UpperCamelCase ) -> Union[str, Any]:
"""simple docstring"""
assert (
isinstance(__A , __A )
and isinstance(__A , __A )
and (numbera >= 1)
and (numbera >= 1)
), "'number1' and 'number2' must been positive integer."
__UpperCAmelCase : Optional[int] = 1 # actual answer that will be return.
# for kgV (x,1)
if numbera > 1 and numbera > 1:
# builds the prime factorization of 'number1' and 'number2'
__UpperCAmelCase : Optional[int] = prime_factorization(__A )
__UpperCAmelCase : Dict = prime_factorization(__A )
elif numbera == 1 or numbera == 1:
__UpperCAmelCase : List[Any] = []
__UpperCAmelCase : str = []
__UpperCAmelCase : int = max(__A , __A )
__UpperCAmelCase : int = 0
__UpperCAmelCase : Optional[int] = 0
__UpperCAmelCase : str = [] # captured numbers int both 'primeFac1' and 'primeFac2'
# iterates through primeFac1
for n in prime_fac_a:
if n not in done:
if n in prime_fac_a:
__UpperCAmelCase : Any = prime_fac_a.count(__A )
__UpperCAmelCase : int = prime_fac_a.count(__A )
for _ in range(max(__A , __A ) ):
ans *= n
else:
__UpperCAmelCase : Optional[Any] = prime_fac_a.count(__A )
for _ in range(__A ):
ans *= n
done.append(__A )
# iterates through primeFac2
for n in prime_fac_a:
if n not in done:
__UpperCAmelCase : Union[str, Any] = prime_fac_a.count(__A )
for _ in range(__A ):
ans *= n
done.append(__A )
# precondition
assert isinstance(__A , __A ) and (
ans >= 0
), "'ans' must been from type int and positive"
return ans
def _UpperCamelCase ( UpperCamelCase ) -> Union[str, Any]:
"""simple docstring"""
assert isinstance(__A , __A ) and (n >= 0), "'number' must been a positive int"
__UpperCAmelCase : str = 0
__UpperCAmelCase : str = 2 # this variable holds the answer
while index < n:
index += 1
ans += 1 # counts to the next number
# if ans not prime then
# runs to the next prime number.
while not is_prime(__A ):
ans += 1
# precondition
assert isinstance(__A , __A ) and is_prime(
__A ), "'ans' must been a prime number and from type int"
return ans
def _UpperCamelCase ( UpperCamelCase , UpperCamelCase ) -> Tuple:
"""simple docstring"""
assert (
is_prime(__A ) and is_prime(__A ) and (p_number_a < p_number_a)
), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'"
__UpperCAmelCase : Any = p_number_a + 1 # jump to the next number
__UpperCAmelCase : str = [] # this list will be returns.
# if number is not prime then
# fetch the next prime number.
while not is_prime(__A ):
number += 1
while number < p_number_a:
ans.append(__A )
number += 1
# fetch the next prime number.
while not is_prime(__A ):
number += 1
# precondition
assert (
isinstance(__A , __A )
and ans[0] != p_number_a
and ans[len(__A ) - 1] != p_number_a
), "'ans' must been a list without the arguments"
# 'ans' contains not 'pNumber1' and 'pNumber2' !
return ans
def _UpperCamelCase ( UpperCamelCase ) -> List[Any]:
"""simple docstring"""
assert isinstance(__A , __A ) and (n >= 1), "'n' must been int and >= 1"
__UpperCAmelCase : Union[str, Any] = [] # will be returned.
for divisor in range(1 , n + 1 ):
if n % divisor == 0:
ans.append(__A )
# precondition
assert ans[0] == 1 and ans[len(__A ) - 1] == n, "Error in function getDivisiors(...)"
return ans
def _UpperCamelCase ( UpperCamelCase ) -> Any:
"""simple docstring"""
assert isinstance(__A , __A ) and (
number > 1
), "'number' must been an int and >= 1"
__UpperCAmelCase : List[str] = get_divisors(__A )
# precondition
assert (
isinstance(__A , __A )
and (divisors[0] == 1)
and (divisors[len(__A ) - 1] == number)
), "Error in help-function getDivisiors(...)"
# summed all divisors up to 'number' (exclusive), hence [:-1]
return sum(divisors[:-1] ) == number
def _UpperCamelCase ( UpperCamelCase , UpperCamelCase ) -> List[Any]:
"""simple docstring"""
assert (
isinstance(__A , __A )
and isinstance(__A , __A )
and (denominator != 0)
), "The arguments must been from type int and 'denominator' != 0"
# build the greatest common divisor of numerator and denominator.
__UpperCAmelCase : Any = gcd(abs(__A ) , abs(__A ) )
# precondition
assert (
isinstance(__A , __A )
and (numerator % gcd_of_fraction == 0)
and (denominator % gcd_of_fraction == 0)
), "Error in function gcd(...,...)"
return (numerator // gcd_of_fraction, denominator // gcd_of_fraction)
def _UpperCamelCase ( UpperCamelCase ) -> Tuple:
"""simple docstring"""
assert isinstance(__A , __A ) and (n >= 0), "'n' must been a int and >= 0"
__UpperCAmelCase : List[str] = 1 # this will be return.
for factor in range(1 , n + 1 ):
ans *= factor
return ans
def _UpperCamelCase ( UpperCamelCase ) -> Optional[int]:
"""simple docstring"""
assert isinstance(__A , __A ) and (n >= 0), "'n' must been an int and >= 0"
__UpperCAmelCase : Union[str, Any] = 0
__UpperCAmelCase : List[str] = 1
__UpperCAmelCase : Any = 1 # this will be return
for _ in range(n - 1 ):
__UpperCAmelCase : str = ans
ans += fiba
__UpperCAmelCase : Tuple = tmp
return ans
| 708 |
"""simple docstring"""
from ...processing_utils import ProcessorMixin
class a__ ( __magic_name__ ):
lowercase_ = ["image_processor", "feature_extractor"]
lowercase_ = "TvltImageProcessor"
lowercase_ = "TvltFeatureExtractor"
def __init__( self : Tuple , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Dict):
"""simple docstring"""
super().__init__(image_processor=UpperCamelCase_ , feature_extractor=UpperCamelCase_)
__UpperCAmelCase : Optional[int] = image_processor
__UpperCAmelCase : Dict = feature_extractor
def __call__( self : Tuple , UpperCamelCase_ : str=None , UpperCamelCase_ : Optional[int]=None , UpperCamelCase_ : Optional[int]=None , UpperCamelCase_ : str=None , UpperCamelCase_ : List[str]=False , UpperCamelCase_ : Optional[int]=False , *UpperCamelCase_ : List[str] , **UpperCamelCase_ : str , ):
"""simple docstring"""
if images is None and audio is None:
raise ValueError("You need to specify either an `images` or `audio` input to process.")
__UpperCAmelCase : Optional[Any] = None
if images is not None:
__UpperCAmelCase : int = self.image_processor(UpperCamelCase_ , mask_pixel=UpperCamelCase_ , *UpperCamelCase_ , **UpperCamelCase_)
if images_mixed is not None:
__UpperCAmelCase : int = self.image_processor(UpperCamelCase_ , is_mixed=UpperCamelCase_ , *UpperCamelCase_ , **UpperCamelCase_)
if audio is not None:
__UpperCAmelCase : List[Any] = self.feature_extractor(
UpperCamelCase_ , *UpperCamelCase_ , sampling_rate=UpperCamelCase_ , mask_audio=UpperCamelCase_ , **UpperCamelCase_)
__UpperCAmelCase : List[str] = {}
if audio is not None:
output_dict.update(UpperCamelCase_)
if images is not None:
output_dict.update(UpperCamelCase_)
if images_mixed_dict is not None:
output_dict.update(UpperCamelCase_)
return output_dict
@property
def a_ ( self : str):
"""simple docstring"""
__UpperCAmelCase : List[Any] = self.image_processor.model_input_names
__UpperCAmelCase : List[Any] = self.feature_extractor.model_input_names
return list(dict.fromkeys(image_processor_input_names + feature_extractor_input_names))
| 487 | 0 |
from __future__ import annotations
def a ( A__ ) -> int:
'''simple docstring'''
for i in range(1 , len(matrix[0] ) ):
matrix[0][i] += matrix[0][i - 1]
# preprocessing the first column
for i in range(1 , len(A__ ) ):
matrix[i][0] += matrix[i - 1][0]
# updating the path cost for current position
for i in range(1 , len(A__ ) ):
for j in range(1 , len(matrix[0] ) ):
matrix[i][j] += min(matrix[i - 1][j] , matrix[i][j - 1] )
return matrix[-1][-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 35 |
'''simple docstring'''
import numpy as np
import datasets
__snake_case: List[str] = "\nCompute the Mahalanobis Distance\n\nMahalonobis distance is the distance between a point and a distribution.\nAnd not between two distinct points. It is effectively a multivariate equivalent of the Euclidean distance.\nIt was introduced by Prof. P. C. Mahalanobis in 1936\nand has been used in various statistical applications ever since\n[source: https://www.machinelearningplus.com/statistics/mahalanobis-distance/]\n"
__snake_case: Optional[Any] = "\\n@article{de2000mahalanobis,\n title={The mahalanobis distance},\n author={De Maesschalck, Roy and Jouan-Rimbaud, Delphine and Massart, D{\'e}sir{\'e} L},\n journal={Chemometrics and intelligent laboratory systems},\n volume={50},\n number={1},\n pages={1--18},\n year={2000},\n publisher={Elsevier}\n}\n"
__snake_case: List[Any] = "\nArgs:\n X: List of datapoints to be compared with the `reference_distribution`.\n reference_distribution: List of datapoints from the reference distribution we want to compare to.\nReturns:\n mahalanobis: The Mahalonobis distance for each datapoint in `X`.\nExamples:\n\n >>> mahalanobis_metric = datasets.load_metric(\"mahalanobis\")\n >>> results = mahalanobis_metric.compute(reference_distribution=[[0, 1], [1, 0]], X=[[0, 1]])\n >>> print(results)\n {'mahalanobis': array([0.5])}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class _UpperCAmelCase ( datasets.Metric ):
"""simple docstring"""
def _lowerCAmelCase ( self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""X""": datasets.Sequence(datasets.Value("""float""" , id="""sequence""" ) , id="""X""" ),
} ) , )
def _lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
a_ : str = np.array(lowerCAmelCase_ )
a_ : Optional[Any] = np.array(lowerCAmelCase_ )
# Assert that arrays are 2D
if len(X.shape ) != 2:
raise ValueError("""Expected `X` to be a 2D vector""" )
if len(reference_distribution.shape ) != 2:
raise ValueError("""Expected `reference_distribution` to be a 2D vector""" )
if reference_distribution.shape[0] < 2:
raise ValueError(
"""Expected `reference_distribution` to be a 2D vector with more than one element in the first dimension""" )
# Get mahalanobis distance for each prediction
a_ : Dict = X - np.mean(lowerCAmelCase_ )
a_ : Optional[int] = np.cov(reference_distribution.T )
try:
a_ : Any = np.linalg.inv(lowerCAmelCase_ )
except np.linalg.LinAlgError:
a_ : Any = np.linalg.pinv(lowerCAmelCase_ )
a_ : Tuple = np.dot(lowerCAmelCase_ , lowerCAmelCase_ )
a_ : List[str] = np.dot(lowerCAmelCase_ , X_minus_mu.T ).diagonal()
return {"mahalanobis": mahal_dist}
| 577 | 0 |
import argparse
import os
# New Code #
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils import find_executable_batch_size
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to ensure out-of-memory errors never
# interrupt training, and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
__a = 1_6
__a = 3_2
def UpperCamelCase_ ( a_ , a_ = 16 ) ->Dict:
A =AutoTokenizer.from_pretrained("bert-base-cased" )
A =load_dataset("glue" , "mrpc" )
def tokenize_function(a_ ):
# max_length=None => use the model max length (it's actually the default)
A =tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=a_ , max_length=a_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
A =datasets.map(
a_ , batched=a_ , remove_columns=["idx", "sentence1", "sentence2"] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
A =tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(a_ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
A =128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
A =16
elif accelerator.mixed_precision != "no":
A =8
else:
A =None
return tokenizer.pad(
a_ , padding="longest" , max_length=a_ , pad_to_multiple_of=a_ , return_tensors="pt" , )
# Instantiate dataloaders.
A =DataLoader(
tokenized_datasets["train"] , shuffle=a_ , collate_fn=a_ , batch_size=a_ )
A =DataLoader(
tokenized_datasets["validation"] , shuffle=a_ , collate_fn=a_ , batch_size=a_ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
__a = mocked_dataloaders # noqa: F811
def UpperCamelCase_ ( a_ , a_ ) ->List[str]:
# For testing only
if os.environ.get("TESTING_MOCKED_DATALOADERS" , a_ ) == "1":
A =2
# Initialize accelerator
A =Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
A =config["lr"]
A =int(config["num_epochs"] )
A =int(config["seed"] )
A =int(config["batch_size"] )
A =evaluate.load("glue" , "mrpc" )
# New Code #
# We now can define an inner training loop function. It should take a batch size as the only parameter,
# and build the dataloaders in there.
# It also gets our decorator
@find_executable_batch_size(starting_batch_size=a_ )
def inner_training_loop(a_ ):
# And now just move everything below under this function
# We need to bring in the Accelerator object from earlier
nonlocal accelerator
# And reset all of its attributes that could hold onto any memory:
accelerator.free_memory()
# Then we can declare the model, optimizer, and everything else:
set_seed(a_ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
A =AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=a_ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
A =model.to(accelerator.device )
# Instantiate optimizer
A =AdamW(params=model.parameters() , lr=a_ )
A , A =get_dataloaders(a_ , a_ )
# Instantiate scheduler
A =get_linear_schedule_with_warmup(
optimizer=a_ , num_warmup_steps=100 , num_training_steps=(len(a_ ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
A , A , A , A , A =accelerator.prepare(
a_ , a_ , a_ , a_ , a_ )
# Now we train the model
for epoch in range(a_ ):
model.train()
for step, batch in enumerate(a_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
A =model(**a_ )
A =outputs.loss
accelerator.backward(a_ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(a_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
A =model(**a_ )
A =outputs.logits.argmax(dim=-1 )
A , A =accelerator.gather_for_metrics((predictions, batch["labels"]) )
metric.add_batch(
predictions=a_ , references=a_ , )
A =metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'''epoch {epoch}:''' , a_ )
# New Code #
# And call it at the end with no arguments
# Note: You could also refactor this outside of your training loop function
inner_training_loop()
def UpperCamelCase_ ( ) ->Union[str, Any]:
A =argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument(
"--mixed_precision" , type=a_ , default=a_ , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." )
A =parser.parse_args()
A ={"lr": 2E-5, "num_epochs": 3, "seed": 42, "batch_size": 16}
training_function(a_ , a_ )
if __name__ == "__main__":
main()
| 707 |
from __future__ import annotations
def UpperCamelCase_ ( a_ ) ->None:
create_state_space_tree(a_ , [] , 0 , [0 for i in range(len(a_ ) )] )
def UpperCamelCase_ ( a_ , a_ , a_ , a_ , ) ->None:
if index == len(a_ ):
print(a_ )
return
for i in range(len(a_ ) ):
if not index_used[i]:
current_sequence.append(sequence[i] )
A =True
create_state_space_tree(a_ , a_ , index + 1 , a_ )
current_sequence.pop()
A =False
__a = [3, 1, 2, 4]
generate_all_permutations(sequence)
__a = ["A", "B", "C"]
generate_all_permutations(sequence_a)
| 689 | 0 |
'''simple docstring'''
import os
import socket
from contextlib import contextmanager
import torch
from ..commands.config.default import write_basic_config # noqa: F401
from ..state import PartialState
from .dataclasses import DistributedType
from .imports import is_deepspeed_available, is_tpu_available
from .transformer_engine import convert_model
from .versions import is_torch_version
if is_deepspeed_available():
from deepspeed import DeepSpeedEngine
if is_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
def __UpperCAmelCase ( A : Union[str, Any] ) -> Optional[int]:
if is_torch_version('''<''' , '''2.0.0''' ) or not hasattr(A , '''_dynamo''' ):
return False
return isinstance(A , torch._dynamo.eval_frame.OptimizedModule )
def __UpperCAmelCase ( A : Tuple , A : bool = True ) -> List[str]:
UpperCAmelCase_ : str = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel)
UpperCAmelCase_ : Optional[Any] = is_compiled_module(A )
if is_compiled:
UpperCAmelCase_ : Tuple = model
UpperCAmelCase_ : Optional[int] = model._orig_mod
if is_deepspeed_available():
options += (DeepSpeedEngine,)
while isinstance(A , A ):
UpperCAmelCase_ : int = model.module
if not keep_fpaa_wrapper:
UpperCAmelCase_ : List[str] = getattr(A , '''forward''' )
UpperCAmelCase_ : Optional[int] = model.__dict__.pop('''_original_forward''' , A )
if original_forward is not None:
while hasattr(A , '''__wrapped__''' ):
UpperCAmelCase_ : Any = forward.__wrapped__
if forward == original_forward:
break
UpperCAmelCase_ : List[str] = forward
if getattr(A , '''_converted_to_transformer_engine''' , A ):
convert_model(A , to_transformer_engine=A )
if is_compiled:
UpperCAmelCase_ : int = model
UpperCAmelCase_ : Optional[int] = compiled_model
return model
def __UpperCAmelCase ( ) -> Optional[Any]:
PartialState().wait_for_everyone()
def __UpperCAmelCase ( A : str , A : Any ) -> Optional[Any]:
if PartialState().distributed_type == DistributedType.TPU:
xm.save(A , A )
elif PartialState().local_process_index == 0:
torch.save(A , A )
@contextmanager
def __UpperCAmelCase ( **A : Optional[Any] ) -> int:
for key, value in kwargs.items():
UpperCAmelCase_ : Optional[Any] = str(A )
yield
for key in kwargs:
if key.upper() in os.environ:
del os.environ[key.upper()]
def __UpperCAmelCase ( A : Tuple ) -> str:
if not hasattr(A , '''__qualname__''' ) and not hasattr(A , '''__name__''' ):
UpperCAmelCase_ : Union[str, Any] = getattr(A , '''__class__''' , A )
if hasattr(A , '''__qualname__''' ):
return obj.__qualname__
if hasattr(A , '''__name__''' ):
return obj.__name__
return str(A )
def __UpperCAmelCase ( A : Optional[int] , A : Union[str, Any] ) -> Any:
for key, value in source.items():
if isinstance(A , A ):
UpperCAmelCase_ : Tuple = destination.setdefault(A , {} )
merge_dicts(A , A )
else:
UpperCAmelCase_ : Any = value
return destination
def __UpperCAmelCase ( A : int = None ) -> bool:
if port is None:
UpperCAmelCase_ : List[str] = 2_9_5_0_0
with socket.socket(socket.AF_INET , socket.SOCK_STREAM ) as s:
return s.connect_ex(('''localhost''', port) ) == 0
| 541 |
'''simple docstring'''
import argparse
import dataclasses
import json
import logging
import os
import shutil
from typing import List, Optional
import datasets
from accelerate import Accelerator
from datasets import load_dataset
from finetuning import finetune
from tqdm.auto import tqdm
import transformers
from transformers import AutoConfig, set_seed
from transformers.trainer_utils import IntervalStrategy
_UpperCamelCase : List[str] = logging.getLogger(__name__)
_UpperCamelCase : int = 'pytorch_model.bin'
@dataclasses.dataclass
class snake_case__ :
a_ = dataclasses.field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models."})
a_ = dataclasses.field(
default=UpperCamelCase , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co."} , )
@dataclasses.dataclass
class snake_case__ :
a_ = dataclasses.field(metadata={"help": "A csv or a json file containing the training data."})
a_ = dataclasses.field(metadata={"help": "A csv or a json file containing the data to predict on."})
a_ = dataclasses.field(
default=UpperCamelCase , metadata={"help": "A csv or a json file containing the validation data."})
a_ = dataclasses.field(
default=UpperCamelCase , metadata={"help": "The name of the task to train on."} , )
a_ = dataclasses.field(
default=UpperCamelCase , metadata={"help": "The list of labels for the task."})
@dataclasses.dataclass
class snake_case__ :
a_ = dataclasses.field(
metadata={"help": "The output directory where the model predictions and checkpoints will be written."})
a_ = dataclasses.field(
default="accuracy" , metadata={"help": "The evaluation metric used for the task."})
a_ = dataclasses.field(
default="no" , metadata={
"help": "The evaluation strategy to adopt during training. Possible values are: [\"no\", \"step\", \"epoch]"
} , )
a_ = dataclasses.field(
default=10 , metadata={"help": "Number of evaluation calls with no improvement after which training will be stopped."} , )
a_ = dataclasses.field(
default=0.0 , metadata={
"help": "How much the specified evaluation metric must improve to satisfy early stopping conditions."
} , )
a_ = dataclasses.field(
default=UpperCamelCase , metadata={"help": "Whether to filter the pseudo-labeled data based on the confidence score."} , )
a_ = dataclasses.field(
default=UpperCamelCase , metadata={"help": "Whether to filter the pseudo-labeled data based on the validation performance."} , )
a_ = dataclasses.field(
default=UpperCamelCase , metadata={"help": "Whether to fine-tune on labeled data after pseudo training."} , )
a_ = dataclasses.field(
default=0.0 , metadata={"help": "Confidence threshold for pseudo-labeled data filtering."} , )
a_ = dataclasses.field(
default=100 , metadata={"help": "Number of evaluation calls with no improvement after which training will be stopped."} , )
a_ = dataclasses.field(
default=UpperCamelCase , metadata={"help": "Random seed for initialization."} , )
def __UpperCAmelCase ( A : str , A : Optional[Any] , A : List[Any] , A : Any , A : Union[str, Any] , A : Dict ) -> int:
UpperCAmelCase_ : Tuple = datasets.concatenate_datasets([infer_input, infer_output] , axis=1 )
if args.do_filter_by_confidence:
UpperCAmelCase_ : Tuple = dataset.filter(lambda A : example["probability"] > args.confidence_threshold )
if args.do_filter_by_val_performance:
assert eval_result >= 0.0 and eval_result <= 1.0
UpperCAmelCase_ : Dict = int(eval_result * len(A ) )
print(A )
UpperCAmelCase_ : Optional[int] = dataset.sort('''probability''' , reverse=A )
UpperCAmelCase_ : Optional[Any] = dataset.select(range(A ) )
UpperCAmelCase_ : Dict = dataset.remove_columns(['''label''', '''probability'''] )
UpperCAmelCase_ : List[Any] = dataset.rename_column('''prediction''' , '''label''' )
UpperCAmelCase_ : List[str] = dataset.map(lambda A : {"label": idalabel[example["label"]]} )
UpperCAmelCase_ : Union[str, Any] = dataset.shuffle(seed=args.seed )
UpperCAmelCase_ : str = os.path.join(A , F"train_pseudo.{args.data_file_extension}" )
if args.data_file_extension == "csv":
dataset.to_csv(A , index=A )
else:
dataset.to_json(A )
def __UpperCAmelCase ( A : Any , A : int , A : Union[str, Any] , A : Dict , **A : Any ) -> Dict:
UpperCAmelCase_ : List[str] = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO , )
logger.info(accelerator.state )
# Setup logging, we only want one process per machine to log things on the
# screen. accelerator.is_local_main_process is only True for one process per
# machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
UpperCAmelCase_ : Tuple = STModelArguments(model_name_or_path=A )
UpperCAmelCase_ : int = STDataArguments(train_file=A , infer_file=A )
UpperCAmelCase_ : Optional[Any] = STTrainingArguments(output_dir=A )
UpperCAmelCase_ : Optional[int] = argparse.Namespace()
for arg_class in (model_args, data_args, training_args):
for key, value in vars(A ).items():
setattr(A , A , A )
for key, value in kwargs.items():
if hasattr(A , A ):
setattr(A , A , A )
# Sanity checks
UpperCAmelCase_ : List[str] = {}
UpperCAmelCase_ : Any = None
# You need to provide the training data and the data to predict on
assert args.train_file is not None
assert args.infer_file is not None
UpperCAmelCase_ : List[Any] = args.train_file
UpperCAmelCase_ : Optional[int] = args.infer_file
if args.evaluation_strategy != IntervalStrategy.NO.value:
assert args.eval_file is not None
UpperCAmelCase_ : List[str] = args.eval_file
for key in data_files:
UpperCAmelCase_ : Dict = data_files[key].split('''.''' )[-1]
assert extension in ["csv", "json"], F"`{key}_file` should be a csv or a json file."
if args.data_file_extension is None:
UpperCAmelCase_ : Any = extension
else:
assert extension == args.data_file_extension, F"`{key}_file` should be a {args.data_file_extension} file`."
assert (
args.eval_metric in datasets.list_metrics()
), F"{args.eval_metric} not in the list of supported metrics {datasets.list_metrics()}."
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed )
logger.info('''Creating the initial data directory for self-training...''' )
UpperCAmelCase_ : Any = F"{args.output_dir}/self-train_iter-{{}}".format
UpperCAmelCase_ : Any = data_dir_format(0 )
if accelerator.is_main_process:
if args.output_dir is not None:
os.makedirs(args.output_dir , exist_ok=A )
os.makedirs(A , exist_ok=A )
accelerator.wait_for_everyone()
UpperCAmelCase_ : Union[str, Any] = None
UpperCAmelCase_ : Tuple = None
UpperCAmelCase_ : Optional[Any] = 0
UpperCAmelCase_ : int = False
# Show the progress bar
UpperCAmelCase_ : int = tqdm(range(args.max_selftrain_iterations ) , disable=not accelerator.is_local_main_process )
# Self-train
for iteration in range(0 , int(args.max_selftrain_iterations ) ):
UpperCAmelCase_ : str = data_dir_format(A )
assert os.path.exists(A )
# Stage 1: initial fine-tuning for iteration = 0 or pseudo-training for
# iteration > 0
UpperCAmelCase_ : List[Any] = os.path.join(A , '''stage-1''' )
UpperCAmelCase_ : Dict = {
'''accelerator''': accelerator,
'''model_name_or_path''': args.model_name_or_path,
'''cache_dir''': args.cache_dir,
'''do_train''': True,
'''train_file''': data_files['''train'''] if iteration == 0 else data_files['''train_pseudo'''],
'''do_eval''': True if args.eval_file is not None else False,
'''eval_file''': data_files['''eval'''],
'''do_predict''': True,
'''infer_file''': data_files['''infer'''],
'''task_name''': args.task_name,
'''label_list''': args.label_list,
'''output_dir''': current_output_dir,
'''eval_metric''': args.eval_metric,
'''evaluation_strategy''': args.evaluation_strategy,
'''early_stopping_patience''': args.early_stopping_patience,
'''early_stopping_threshold''': args.early_stopping_threshold,
'''seed''': args.seed,
}
# Add additional training arguments
for key, value in kwargs.items():
if key not in arguments_dict and not hasattr(A , A ):
arguments_dict.update({key: value} )
UpperCAmelCase_ : int = os.path.join(A , '''best-checkpoint''' , A )
if os.path.exists(A ):
logger.info(
'''Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 1.''' , A , A , )
else:
logger.info('''***** Running self-training: iteration: %d, stage: 1 *****''' , A )
finetune(**A )
accelerator.wait_for_everyone()
assert os.path.exists(A )
logger.info('''Self-training job completed: iteration: %d, stage: 1.''' , A )
if iteration > 0 and args.finetune_on_labeled_data:
# Stage 2 (optional): fine-tuning on the original labeled data
UpperCAmelCase_ : Any = os.path.join(A , '''best-checkpoint''' )
UpperCAmelCase_ : Optional[int] = os.path.join(A , '''stage-2''' )
# Update arguments_dict
UpperCAmelCase_ : List[Any] = model_path
UpperCAmelCase_ : Optional[int] = data_files['''train''']
UpperCAmelCase_ : Union[str, Any] = current_output_dir
UpperCAmelCase_ : Optional[int] = os.path.join(A , '''best-checkpoint''' , A )
if os.path.exists(A ):
logger.info(
'''Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 2.''' , A , A , )
else:
logger.info('''***** Running self-training: iteration: %d, stage: 2 *****''' , A )
finetune(**A )
accelerator.wait_for_everyone()
assert os.path.exists(A )
logger.info('''Self-training job completed: iteration: %d, stage: 2.''' , A )
UpperCAmelCase_ : Dict = iteration
UpperCAmelCase_ : Optional[Any] = data_dir_format(iteration + 1 )
UpperCAmelCase_ : Any = AutoConfig.from_pretrained(os.path.join(A , '''best-checkpoint''' ) )
UpperCAmelCase_ : Optional[Any] = config.idalabel
UpperCAmelCase_ : List[Any] = os.path.join(A , '''eval_results_best-checkpoint.json''' )
UpperCAmelCase_ : Union[str, Any] = os.path.join(A , '''test_results_best-checkpoint.json''' )
assert os.path.exists(A )
with open(A , '''r''' ) as f:
UpperCAmelCase_ : Optional[int] = float(json.load(A )[args.eval_metric] )
UpperCAmelCase_ : List[Any] = os.path.join(A , '''infer_output_best-checkpoint.csv''' )
assert os.path.exists(A )
# Loading the dataset from local csv or json files.
UpperCAmelCase_ : str = load_dataset(args.data_file_extension , data_files={'''data''': data_files['''infer''']} )['''data''']
UpperCAmelCase_ : Union[str, Any] = load_dataset('''csv''' , data_files={'''data''': infer_output_file} )['''data''']
if accelerator.is_main_process:
os.makedirs(A , exist_ok=A )
shutil.copy(A , os.path.join(A , F"eval_results_iter-{iteration}.json" ) )
if os.path.exists(A ):
shutil.copy(A , os.path.join(A , F"test_results_iter-{iteration}.json" ) )
create_pseudo_labeled_data(A , A , A , A , A , A )
accelerator.wait_for_everyone()
UpperCAmelCase_ : int = os.path.join(A , F"train_pseudo.{args.data_file_extension}" )
if args.evaluation_strategy != IntervalStrategy.NO.value:
UpperCAmelCase_ : List[str] = eval_result
if best_iteration is None:
UpperCAmelCase_ : Any = new_iteration
UpperCAmelCase_ : List[Any] = new_eval_result
else:
if new_eval_result - best_eval_result > args.early_stopping_threshold:
UpperCAmelCase_ : str = new_iteration
UpperCAmelCase_ : Optional[int] = new_eval_result
UpperCAmelCase_ : Any = 0
else:
if new_eval_result == best_eval_result:
UpperCAmelCase_ : int = new_iteration
UpperCAmelCase_ : List[Any] = new_eval_result
early_stopping_patience_counter += 1
if early_stopping_patience_counter >= args.early_stopping_patience:
UpperCAmelCase_ : List[Any] = True
progress_bar.update(1 )
if should_training_stop:
break
if best_iteration is not None:
# Save the best iteration
logger.info('''Best iteration: %d''' , A )
logger.info('''Best evaluation result: %s = %f''' , args.eval_metric , A )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(A , F"eval_results_iter-{iteration}.json" ) , os.path.join(A , '''eval_results_best-iteration.json''' ) , )
else:
# Assume that the last iteration is the best
logger.info('''Best iteration: %d''' , args.max_selftrain_iterations - 1 )
logger.info('''Best evaluation result: %s = %f''' , args.eval_metric , A )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(A , F"eval_results_iter-{args.max_selftrain_iterations - 1}.json" ) , os.path.join(A , '''eval_results_best-iteration.json''' ) , )
| 541 | 1 |
'''simple docstring'''
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModel, PreTrainedModel
from ...utils import logging
a_ : str = logging.get_logger(__name__)
def _A (lowerCAmelCase__ :str , lowerCAmelCase__ :Dict ) -> int:
'''simple docstring'''
_a = nn.functional.normalize(lowerCAmelCase__ )
_a = nn.functional.normalize(lowerCAmelCase__ )
return torch.mm(lowerCAmelCase__ , normalized_text_embeds.t() )
class a ( _SCREAMING_SNAKE_CASE ):
_lowerCAmelCase = CLIPConfig
_lowerCAmelCase = ["""CLIPEncoderLayer"""]
def __init__( self , __magic_name__ ) -> Union[str, Any]:
super().__init__(__magic_name__ )
_a = CLIPVisionModel(config.vision_config )
_a = nn.Linear(config.vision_config.hidden_size , config.projection_dim , bias=__magic_name__ )
_a = nn.Parameter(torch.ones(17 , config.projection_dim ) , requires_grad=__magic_name__ )
_a = nn.Parameter(torch.ones(3 , config.projection_dim ) , requires_grad=__magic_name__ )
_a = nn.Parameter(torch.ones(17 ) , requires_grad=__magic_name__ )
_a = nn.Parameter(torch.ones(3 ) , requires_grad=__magic_name__ )
@torch.no_grad()
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ ) -> int:
_a = self.vision_model(__magic_name__ )[1] # pooled_output
_a = self.visual_projection(__magic_name__ )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
_a = cosine_distance(__magic_name__ , self.special_care_embeds ).cpu().float().numpy()
_a = cosine_distance(__magic_name__ , self.concept_embeds ).cpu().float().numpy()
_a = []
_a = image_embeds.shape[0]
for i in range(__magic_name__ ):
_a = {'special_scores': {}, 'special_care': [], 'concept_scores': {}, 'bad_concepts': []}
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign images
_a = 0.0
for concept_idx in range(len(special_cos_dist[0] ) ):
_a = special_cos_dist[i][concept_idx]
_a = self.special_care_embeds_weights[concept_idx].item()
_a = round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["special_scores"][concept_idx] > 0:
result_img["special_care"].append({concept_idx, result_img['special_scores'][concept_idx]} )
_a = 0.0_1
for concept_idx in range(len(cos_dist[0] ) ):
_a = cos_dist[i][concept_idx]
_a = self.concept_embeds_weights[concept_idx].item()
_a = round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["concept_scores"][concept_idx] > 0:
result_img["bad_concepts"].append(__magic_name__ )
result.append(__magic_name__ )
_a = [len(res['bad_concepts'] ) > 0 for res in result]
return images, has_nsfw_concepts
@torch.no_grad()
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ ) -> Tuple:
_a = self.vision_model(__magic_name__ )[1] # pooled_output
_a = self.visual_projection(__magic_name__ )
_a = cosine_distance(__magic_name__ , self.special_care_embeds )
_a = cosine_distance(__magic_name__ , self.concept_embeds )
# increase this value to create a stronger `nsfw` filter
# at the cost of increasing the possibility of filtering benign images
_a = 0.0
_a = special_cos_dist - self.special_care_embeds_weights + adjustment
# special_scores = special_scores.round(decimals=3)
_a = torch.any(special_scores > 0 , dim=1 )
_a = special_care * 0.0_1
_a = special_adjustment.unsqueeze(1 ).expand(-1 , cos_dist.shape[1] )
_a = (cos_dist - self.concept_embeds_weights) + special_adjustment
# concept_scores = concept_scores.round(decimals=3)
_a = torch.any(concept_scores > 0 , dim=1 )
return images, has_nsfw_concepts
| 532 |
'''simple docstring'''
import pytest
from datasets.splits import SplitDict, SplitInfo
from datasets.utils.py_utils import asdict
@pytest.mark.parametrize(
'split_dict' , [
SplitDict(),
SplitDict({'train': SplitInfo(name='train' , num_bytes=13_37 , num_examples=42 , dataset_name='my_dataset' )} ),
SplitDict({'train': SplitInfo(name='train' , num_bytes=13_37 , num_examples=42 )} ),
SplitDict({'train': SplitInfo()} ),
] , )
def _A (lowerCAmelCase__ :SplitDict ) -> Any:
'''simple docstring'''
_a = split_dict._to_yaml_list()
assert len(lowerCAmelCase__ ) == len(lowerCAmelCase__ )
_a = SplitDict._from_yaml_list(lowerCAmelCase__ )
for split_name, split_info in split_dict.items():
# dataset_name field is deprecated, and is therefore not part of the YAML dump
_a = None
# the split name of split_dict takes over the name of the split info object
_a = split_name
assert split_dict == reloaded
@pytest.mark.parametrize(
'split_info' , [SplitInfo(), SplitInfo(dataset_name=lowerCAmelCase__ ), SplitInfo(dataset_name='my_dataset' )] )
def _A (lowerCAmelCase__ :Optional[Any] ) -> List[str]:
'''simple docstring'''
_a = asdict(SplitDict({'train': split_info} ) )
assert "dataset_name" in split_dict_asdict["train"]
assert split_dict_asdict["train"]["dataset_name"] == split_info.dataset_name
| 532 | 1 |
import copy
import os
import cva
import numpy as np
from matplotlib import pyplot as plt
class __UpperCamelCase :
def __init__( self ):
_UpperCAmelCase = ''''''
_UpperCAmelCase = ''''''
_UpperCAmelCase = []
_UpperCAmelCase = 0
_UpperCAmelCase = 256
_UpperCAmelCase = 0
_UpperCAmelCase = 0
_UpperCAmelCase = 0
_UpperCAmelCase = 0
def UpperCamelCase( self , _UpperCamelCase ):
_UpperCAmelCase = cva.imread(_UpperCamelCase , 0 )
_UpperCAmelCase = copy.deepcopy(self.img )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = plt.hist(self.img.ravel() , 256 , [0, 256] , label='''x''' )
_UpperCAmelCase = np.sum(_UpperCamelCase )
for i in range(len(_UpperCamelCase ) ):
_UpperCAmelCase = x[i] / self.k
self.sk += prk
_UpperCAmelCase = (self.L - 1) * self.sk
if self.rem != 0:
_UpperCAmelCase = int(last % last )
_UpperCAmelCase = int(last + 1 if self.rem >= 0.5 else last )
self.last_list.append(_UpperCamelCase )
_UpperCAmelCase = int(np.ma.count(self.img ) / self.img[1].size )
_UpperCAmelCase = self.img[1].size
for i in range(self.number_of_cols ):
for j in range(self.number_of_rows ):
_UpperCAmelCase = self.img[j][i]
if num != self.last_list[num]:
_UpperCAmelCase = self.last_list[num]
cva.imwrite('''output_data/output.jpg''' , self.img )
def UpperCamelCase( self ):
plt.hist(self.img.ravel() , 256 , [0, 256] )
def UpperCamelCase( self ):
cva.imshow('''Output-Image''' , self.img )
cva.imshow('''Input-Image''' , self.original_image )
cva.waitKey(5000 )
cva.destroyAllWindows()
if __name__ == "__main__":
UpperCAmelCase_ = os.path.join(os.path.basename(__file__), "image_data/input.jpg")
UpperCAmelCase_ = ConstantStretch()
stretcher.stretch(file_path)
stretcher.plot_histogram()
stretcher.show_image() | 32 |
"""simple docstring"""
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
_lowercase = logging.get_logger(__name__)
_lowercase = {
'''Salesforce/codegen-350M-nl''': '''https://huggingface.co/Salesforce/codegen-350M-nl/resolve/main/config.json''',
'''Salesforce/codegen-350M-multi''': '''https://huggingface.co/Salesforce/codegen-350M-multi/resolve/main/config.json''',
'''Salesforce/codegen-350M-mono''': '''https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/config.json''',
'''Salesforce/codegen-2B-nl''': '''https://huggingface.co/Salesforce/codegen-2B-nl/resolve/main/config.json''',
'''Salesforce/codegen-2B-multi''': '''https://huggingface.co/Salesforce/codegen-2B-multi/resolve/main/config.json''',
'''Salesforce/codegen-2B-mono''': '''https://huggingface.co/Salesforce/codegen-2B-mono/resolve/main/config.json''',
'''Salesforce/codegen-6B-nl''': '''https://huggingface.co/Salesforce/codegen-6B-nl/resolve/main/config.json''',
'''Salesforce/codegen-6B-multi''': '''https://huggingface.co/Salesforce/codegen-6B-multi/resolve/main/config.json''',
'''Salesforce/codegen-6B-mono''': '''https://huggingface.co/Salesforce/codegen-6B-mono/resolve/main/config.json''',
'''Salesforce/codegen-16B-nl''': '''https://huggingface.co/Salesforce/codegen-16B-nl/resolve/main/config.json''',
'''Salesforce/codegen-16B-multi''': '''https://huggingface.co/Salesforce/codegen-16B-multi/resolve/main/config.json''',
'''Salesforce/codegen-16B-mono''': '''https://huggingface.co/Salesforce/codegen-16B-mono/resolve/main/config.json''',
}
class __a ( __a ):
'''simple docstring'''
_lowerCamelCase : List[Any] = """codegen"""
_lowerCamelCase : List[str] = {
"""max_position_embeddings""": """n_positions""",
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self , _lowerCamelCase=50_400 , _lowerCamelCase=2_048 , _lowerCamelCase=2_048 , _lowerCamelCase=4_096 , _lowerCamelCase=28 , _lowerCamelCase=16 , _lowerCamelCase=64 , _lowerCamelCase=None , _lowerCamelCase="gelu_new" , _lowerCamelCase=0.0 , _lowerCamelCase=0.0 , _lowerCamelCase=0.0 , _lowerCamelCase=1e-5 , _lowerCamelCase=0.02 , _lowerCamelCase=True , _lowerCamelCase=50_256 , _lowerCamelCase=50_256 , _lowerCamelCase=False , **_lowerCamelCase , ) -> Union[str, Any]:
'''simple docstring'''
__lowercase = vocab_size
__lowercase = n_ctx
__lowercase = n_positions
__lowercase = n_embd
__lowercase = n_layer
__lowercase = n_head
__lowercase = n_inner
__lowercase = rotary_dim
__lowercase = activation_function
__lowercase = resid_pdrop
__lowercase = embd_pdrop
__lowercase = attn_pdrop
__lowercase = layer_norm_epsilon
__lowercase = initializer_range
__lowercase = use_cache
__lowercase = bos_token_id
__lowercase = eos_token_id
super().__init__(
bos_token_id=_lowerCamelCase , eos_token_id=_lowerCamelCase , tie_word_embeddings=_lowerCamelCase , **_lowerCamelCase )
class __a ( __a ):
'''simple docstring'''
def __init__( self , _lowerCamelCase , _lowerCamelCase = "default" , _lowerCamelCase = None , _lowerCamelCase = False , ) -> str:
'''simple docstring'''
super().__init__(_lowerCamelCase , task=_lowerCamelCase , patching_specs=_lowerCamelCase , use_past=_lowerCamelCase )
if not getattr(self._config , "pad_token_id" , _lowerCamelCase ):
# TODO: how to do that better?
__lowercase = 0
@property
def SCREAMING_SNAKE_CASE ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
__lowercase = OrderedDict({"input_ids": {0: "batch", 1: "sequence"}} )
if self.use_past:
self.fill_with_past_key_values_(_lowerCamelCase , direction="inputs" )
__lowercase = {0: "batch", 1: "past_sequence + sequence"}
else:
__lowercase = {0: "batch", 1: "sequence"}
return common_inputs
@property
def SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
return self._config.n_layer
@property
def SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
return self._config.n_head
def SCREAMING_SNAKE_CASE ( self , _lowerCamelCase , _lowerCamelCase = -1 , _lowerCamelCase = -1 , _lowerCamelCase = False , _lowerCamelCase = None , ) -> Mapping[str, Any]:
'''simple docstring'''
__lowercase = super(_lowerCamelCase , self ).generate_dummy_inputs(
_lowerCamelCase , batch_size=_lowerCamelCase , seq_length=_lowerCamelCase , is_pair=_lowerCamelCase , framework=_lowerCamelCase )
# We need to order the input in the way they appears in the forward()
__lowercase = OrderedDict({"input_ids": common_inputs["input_ids"]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
__lowercase , __lowercase = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
__lowercase = seqlen + 2
__lowercase = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
__lowercase = [
(torch.zeros(_lowerCamelCase ), torch.zeros(_lowerCamelCase )) for _ in range(self.num_layers )
]
__lowercase = common_inputs["attention_mask"]
if self.use_past:
__lowercase = ordered_inputs["attention_mask"].dtype
__lowercase = torch.cat(
[ordered_inputs["attention_mask"], torch.ones(_lowerCamelCase , _lowerCamelCase , dtype=_lowerCamelCase )] , dim=1 )
return ordered_inputs
@property
def SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
return 13
| 118 | 0 |
import json
import os
import unittest
from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast
from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCAmelCase_ ( __lowercase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : Tuple = LEDTokenizer
UpperCAmelCase__ : List[str] = LEDTokenizerFast
UpperCAmelCase__ : str = True
def __lowercase ( self ) -> Optional[Any]:
super().setUp()
_a : int = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
_a : str = dict(zip(_a , range(len(_a ) ) ) )
_a : List[Any] = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
_a : Any = {'''unk_token''': '''<unk>'''}
_a : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
_a : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_a ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(_a ) )
def __lowercase ( self , **_a ) -> Tuple:
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **_a )
def __lowercase ( self , **_a ) -> Any:
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **_a )
def __lowercase ( self , _a ) -> Dict:
return "lower newer", "lower newer"
@cached_property
def __lowercase ( self ) -> Optional[Any]:
return LEDTokenizer.from_pretrained('''allenai/led-base-16384''' )
@cached_property
def __lowercase ( self ) -> str:
return LEDTokenizerFast.from_pretrained('''allenai/led-base-16384''' )
@require_torch
def __lowercase ( self ) -> Union[str, Any]:
_a : List[str] = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
_a : Optional[int] = [0, 2_5_0, 2_5_1, 1_7_8_1_8, 1_3, 3_9_1_8_6, 1_9_3_8, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_a : Dict = tokenizer(_a , max_length=len(_a ) , padding=_a , return_tensors='''pt''' )
self.assertIsInstance(_a , _a )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
_a : Optional[Any] = batch.input_ids.tolist()[0]
self.assertListEqual(_a , _a )
@require_torch
def __lowercase ( self ) -> Optional[Any]:
_a : int = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_a : Optional[int] = tokenizer(_a , padding=_a , return_tensors='''pt''' )
self.assertIn('''input_ids''' , _a )
self.assertIn('''attention_mask''' , _a )
self.assertNotIn('''labels''' , _a )
self.assertNotIn('''decoder_attention_mask''' , _a )
@require_torch
def __lowercase ( self ) -> str:
_a : Tuple = [
'''Summary of the text.''',
'''Another summary.''',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_a : List[Any] = tokenizer(text_target=_a , max_length=3_2 , padding='''max_length''' , return_tensors='''pt''' )
self.assertEqual(3_2 , targets['''input_ids'''].shape[1] )
@require_torch
def __lowercase ( self ) -> Tuple:
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_a : str = tokenizer(
['''I am a small frog''' * 1_0_2_4, '''I am a small frog'''] , padding=_a , truncation=_a , return_tensors='''pt''' )
self.assertIsInstance(_a , _a )
self.assertEqual(batch.input_ids.shape , (2, 5_1_2_2) )
@require_torch
def __lowercase ( self ) -> int:
_a : Tuple = ['''A long paragraph for summarization.''']
_a : Optional[Any] = [
'''Summary of the text.''',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_a : Optional[Any] = tokenizer(_a , return_tensors='''pt''' )
_a : int = tokenizer(text_target=_a , return_tensors='''pt''' )
_a : Any = inputs['''input_ids''']
_a : int = targets['''input_ids''']
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
@require_torch
def __lowercase ( self ) -> List[str]:
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_a : str = ['''Summary of the text.''', '''Another summary.''']
_a : Any = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]]
_a : Dict = tokenizer(_a , padding=_a )
_a : Optional[int] = [[0] * len(_a ) for x in encoded_output['''input_ids''']]
_a : int = tokenizer.pad(_a )
self.assertSequenceEqual(outputs['''global_attention_mask'''] , _a )
def __lowercase ( self ) -> str:
pass
def __lowercase ( self ) -> str:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
_a : Tuple = self.rust_tokenizer_class.from_pretrained(_a , **_a )
_a : Any = self.tokenizer_class.from_pretrained(_a , **_a )
_a : List[str] = '''A, <mask> AllenNLP sentence.'''
_a : Union[str, Any] = tokenizer_r.encode_plus(_a , add_special_tokens=_a , return_token_type_ids=_a )
_a : List[Any] = tokenizer_p.encode_plus(_a , add_special_tokens=_a , return_token_type_ids=_a )
self.assertEqual(sum(tokens_r['''token_type_ids'''] ) , sum(tokens_p['''token_type_ids'''] ) )
self.assertEqual(
sum(tokens_r['''attention_mask'''] ) / len(tokens_r['''attention_mask'''] ) , sum(tokens_p['''attention_mask'''] ) / len(tokens_p['''attention_mask'''] ) , )
_a : Optional[Any] = tokenizer_r.convert_ids_to_tokens(tokens_r['''input_ids'''] )
_a : str = tokenizer_p.convert_ids_to_tokens(tokens_p['''input_ids'''] )
self.assertSequenceEqual(tokens_p['''input_ids'''] , [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2] )
self.assertSequenceEqual(tokens_r['''input_ids'''] , [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2] )
self.assertSequenceEqual(
_a , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
self.assertSequenceEqual(
_a , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
| 578 |
import sacrebleu as scb
from packaging import version
from sacrebleu import CHRF
import datasets
a__ = '''\
@inproceedings{popovic-2015-chrf,
title = "chr{F}: character n-gram {F}-score for automatic {MT} evaluation",
author = "Popovi{\'c}, Maja",
booktitle = "Proceedings of the Tenth Workshop on Statistical Machine Translation",
month = sep,
year = "2015",
address = "Lisbon, Portugal",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/W15-3049",
doi = "10.18653/v1/W15-3049",
pages = "392--395",
}
@inproceedings{popovic-2017-chrf,
title = "chr{F}++: words helping character n-grams",
author = "Popovi{\'c}, Maja",
booktitle = "Proceedings of the Second Conference on Machine Translation",
month = sep,
year = "2017",
address = "Copenhagen, Denmark",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/W17-4770",
doi = "10.18653/v1/W17-4770",
pages = "612--618",
}
@inproceedings{post-2018-call,
title = "A Call for Clarity in Reporting {BLEU} Scores",
author = "Post, Matt",
booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",
month = oct,
year = "2018",
address = "Belgium, Brussels",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/W18-6319",
pages = "186--191",
}
'''
a__ = '''\
ChrF and ChrF++ are two MT evaluation metrics. They both use the F-score statistic for character n-gram matches,
and ChrF++ adds word n-grams as well which correlates more strongly with direct assessment. We use the implementation
that is already present in sacrebleu.
The implementation here is slightly different from sacrebleu in terms of the required input format. The length of
the references and hypotheses lists need to be the same, so you may need to transpose your references compared to
sacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534
See the README.md file at https://github.com/mjpost/sacreBLEU#chrf--chrf for more information.
'''
a__ = '''
Produces ChrF(++) scores for hypotheses given reference translations.
Args:
predictions (list of str): The predicted sentences.
references (list of list of str): The references. There should be one reference sub-list for each prediction sentence.
char_order (int): Character n-gram order. Defaults to `6`.
word_order (int): Word n-gram order. If equals to `2`, the metric is referred to as chrF++. Defaults to `0`.
beta (int): Determine the importance of recall w.r.t precision. Defaults to `2`.
lowercase (bool): if `True`, enables case-insensitivity. Defaults to `False`.
whitespace (bool): If `True`, include whitespaces when extracting character n-grams.
eps_smoothing (bool): If `True`, applies epsilon smoothing similar
to reference chrF++.py, NLTK and Moses implementations. If `False`,
it takes into account effective match order similar to sacreBLEU < 2.0.0. Defaults to `False`.
Returns:
\'score\' (float): The chrF (chrF++) score,
\'char_order\' (int): The character n-gram order,
\'word_order\' (int): The word n-gram order. If equals to 2, the metric is referred to as chrF++,
\'beta\' (int): Determine the importance of recall w.r.t precision
Examples:
Example 1--a simple example of calculating chrF:
>>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]
>>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]
>>> chrf = datasets.load_metric("chrf")
>>> results = chrf.compute(predictions=prediction, references=reference)
>>> print(results)
{\'score\': 84.64214891738334, \'char_order\': 6, \'word_order\': 0, \'beta\': 2}
Example 2--the same example, but with the argument word_order=2, to calculate chrF++ instead of chrF:
>>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]
>>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]
>>> chrf = datasets.load_metric("chrf")
>>> results = chrf.compute(predictions=prediction,
... references=reference,
... word_order=2)
>>> print(results)
{\'score\': 82.87263732906315, \'char_order\': 6, \'word_order\': 2, \'beta\': 2}
Example 3--the same chrF++ example as above, but with `lowercase=True` to normalize all case:
>>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]
>>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]
>>> chrf = datasets.load_metric("chrf")
>>> results = chrf.compute(predictions=prediction,
... references=reference,
... word_order=2,
... lowercase=True)
>>> print(results)
{\'score\': 92.12853119829202, \'char_order\': 6, \'word_order\': 2, \'beta\': 2}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase_ ( datasets.Metric ):
"""simple docstring"""
def __lowercase ( self ) -> Any:
if version.parse(scb.__version__ ) < version.parse('''1.4.12''' ):
raise ImportWarning(
'''To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn\'t match this condition.\n'''
'''You can install it with `pip install "sacrebleu>=1.4.12"`.''' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='''https://github.com/mjpost/sacreBLEU#chrf--chrf''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Sequence(datasets.Value('''string''' , id='''sequence''' ) , id='''references''' ),
} ) , codebase_urls=['''https://github.com/mjpost/sacreBLEU#chrf--chrf'''] , reference_urls=[
'''https://github.com/m-popovic/chrF''',
] , )
def __lowercase ( self , _a , _a , _a = CHRF.CHAR_ORDER , _a = CHRF.WORD_ORDER , _a = CHRF.BETA , _a = False , _a = False , _a = False , ) -> Union[str, Any]:
_a : int = len(references[0] )
if any(len(_a ) != references_per_prediction for refs in references ):
raise ValueError('''Sacrebleu requires the same number of references for each prediction''' )
_a : int = [[refs[i] for refs in references] for i in range(_a )]
_a : str = CHRF(_a , _a , _a , _a , _a , _a )
_a : Optional[Any] = sb_chrf.corpus_score(_a , _a )
return {
"score": output.score,
"char_order": output.char_order,
"word_order": output.word_order,
"beta": output.beta,
}
| 578 | 1 |
import os
import pytest
import yaml
from datasets.features.features import Features, Value
from datasets.info import DatasetInfo, DatasetInfosDict
@pytest.mark.parametrize(
'''files''' , [
['''full:README.md''', '''dataset_infos.json'''],
['''empty:README.md''', '''dataset_infos.json'''],
['''dataset_infos.json'''],
['''full:README.md'''],
] , )
def a ( snake_case__: Dict , snake_case__: Optional[int] ):
'''simple docstring'''
lowercase_ = tmp_path_factory.mktemp('''dset_infos_dir''' )
if "full:README.md" in files:
with open(dataset_infos_dir / '''README.md''' , '''w''' ) as f:
f.write('''---\ndataset_info:\n dataset_size: 42\n---''' )
if "empty:README.md" in files:
with open(dataset_infos_dir / '''README.md''' , '''w''' ) as f:
f.write('''''' )
# we want to support dataset_infos.json for backward compatibility
if "dataset_infos.json" in files:
with open(dataset_infos_dir / '''dataset_infos.json''' , '''w''' ) as f:
f.write('''{"default": {"dataset_size": 42}}''' )
lowercase_ = DatasetInfosDict.from_directory(snake_case__ )
assert dataset_infos
assert dataset_infos["default"].dataset_size == 42
@pytest.mark.parametrize(
'''dataset_info''' , [
DatasetInfo(),
DatasetInfo(
description='''foo''' , features=Features({'''a''': Value('''int32''' )} ) , builder_name='''builder''' , config_name='''config''' , version='''1.0.0''' , splits=[{'''name''': '''train'''}] , download_size=42 , ),
] , )
def a ( snake_case__: Optional[Any] , snake_case__: DatasetInfo ):
'''simple docstring'''
lowercase_ = str(snake_case__ )
dataset_info.write_to_directory(snake_case__ )
lowercase_ = DatasetInfo.from_directory(snake_case__ )
assert dataset_info == reloaded
assert os.path.exists(os.path.join(snake_case__ , '''dataset_info.json''' ) )
def a ( ):
'''simple docstring'''
lowercase_ = DatasetInfo(
description='''foo''' , citation='''bar''' , homepage='''https://foo.bar''' , license='''CC0''' , features=Features({'''a''': Value('''int32''' )} ) , post_processed={} , supervised_keys=() , task_templates=[] , builder_name='''builder''' , config_name='''config''' , version='''1.0.0''' , splits=[{'''name''': '''train''', '''num_examples''': 42}] , download_checksums={} , download_size=1_337 , post_processing_size=442 , dataset_size=1_234 , size_in_bytes=1_337 + 442 + 1_234 , )
lowercase_ = dataset_info._to_yaml_dict()
assert sorted(snake_case__ ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML )
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
assert key in dataset_info_yaml_dict
assert isinstance(dataset_info_yaml_dict[key] , (list, dict, int, str) )
lowercase_ = yaml.safe_dump(snake_case__ )
lowercase_ = yaml.safe_load(snake_case__ )
assert dataset_info_yaml_dict == reloaded
def a ( ):
'''simple docstring'''
lowercase_ = DatasetInfo()
lowercase_ = dataset_info._to_yaml_dict()
assert dataset_info_yaml_dict == {}
@pytest.mark.parametrize(
'''dataset_infos_dict''' , [
DatasetInfosDict(),
DatasetInfosDict({'''default''': DatasetInfo()} ),
DatasetInfosDict({'''my_config_name''': DatasetInfo()} ),
DatasetInfosDict(
{
'''default''': DatasetInfo(
description='''foo''' , features=Features({'''a''': Value('''int32''' )} ) , builder_name='''builder''' , config_name='''config''' , version='''1.0.0''' , splits=[{'''name''': '''train'''}] , download_size=42 , )
} ),
DatasetInfosDict(
{
'''v1''': DatasetInfo(dataset_size=42 ),
'''v2''': DatasetInfo(dataset_size=1_337 ),
} ),
] , )
def a ( snake_case__: Optional[Any] , snake_case__: DatasetInfosDict ):
'''simple docstring'''
lowercase_ = str(snake_case__ )
dataset_infos_dict.write_to_directory(snake_case__ )
lowercase_ = DatasetInfosDict.from_directory(snake_case__ )
# the config_name of the dataset_infos_dict take over the attribute
for config_name, dataset_info in dataset_infos_dict.items():
lowercase_ = config_name
# the yaml representation doesn't include fields like description or citation
# so we just test that we can recover what we can from the yaml
lowercase_ = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() )
assert dataset_infos_dict == reloaded
if dataset_infos_dict:
assert os.path.exists(os.path.join(snake_case__ , '''README.md''' ) )
| 97 |
import os
# Precomputes a list of the 100 first triangular numbers
__a = [int(0.5 * n * (n + 1)) for n in range(1, 1_0_1)]
def a ( ):
'''simple docstring'''
lowercase_ = os.path.dirname(os.path.realpath(snake_case__ ) )
lowercase_ = os.path.join(snake_case__ , '''words.txt''' )
lowercase_ = ''''''
with open(snake_case__ ) as f:
lowercase_ = f.readline()
lowercase_ = [word.strip('''"''' ) for word in words.strip('''\r\n''' ).split(''',''' )]
lowercase_ = [
word
for word in [sum(ord(snake_case__ ) - 64 for x in word ) for word in words]
if word in TRIANGULAR_NUMBERS
]
return len(snake_case__ )
if __name__ == "__main__":
print(solution())
| 97 | 1 |
from __future__ import annotations
from bisect import bisect_left
from functools import total_ordering
from heapq import merge
@total_ordering
class UpperCamelCase__ ( UpperCAmelCase__):
'''simple docstring'''
def __lt__( self , A ) ->Tuple:
return self[-1] < other[-1]
def __eq__( self , A ) ->List[Any]:
return self[-1] == other[-1]
def A ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCAmelCase__ :list[Stack] = []
# sort into stacks
for element in collection:
UpperCAmelCase__ :List[Any] = Stack([element] )
UpperCAmelCase__ :Optional[int] = bisect_left(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if i != len(SCREAMING_SNAKE_CASE ):
stacks[i].append(SCREAMING_SNAKE_CASE )
else:
stacks.append(SCREAMING_SNAKE_CASE )
# use a heap-based merge to merge stack efficiently
UpperCAmelCase__ :Optional[Any] = merge(*(reversed(SCREAMING_SNAKE_CASE ) for stack in stacks) )
return collection
if __name__ == "__main__":
__snake_case : Any = input('Enter numbers separated by a comma:\n').strip()
__snake_case : int = [int(item) for item in user_input.split(',')]
print(patience_sort(unsorted))
| 433 |
import json
import os
import pickle
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers import is_faiss_available
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bart.tokenization_bart import BartTokenizer
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.dpr.tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_sentencepiece, require_tokenizers, require_torch
if is_faiss_available():
import faiss
@require_faiss
class UpperCamelCase__ ( UpperCAmelCase__):
'''simple docstring'''
def A__ ( self ) ->Union[str, Any]:
UpperCAmelCase__ :List[Any] = tempfile.mkdtemp()
UpperCAmelCase__ :Union[str, Any] = 8
# DPR tok
UpperCAmelCase__ :List[Any] = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
UpperCAmelCase__ :str = os.path.join(self.tmpdirname , 'dpr_tokenizer' )
os.makedirs(A , exist_ok=A )
UpperCAmelCase__ :Tuple = os.path.join(A , DPR_VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
# BART tok
UpperCAmelCase__ :Optional[Any] = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
]
UpperCAmelCase__ :List[Any] = dict(zip(A , range(len(A ) ) ) )
UpperCAmelCase__ :Dict = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
UpperCAmelCase__ :Optional[Any] = {'unk_token': '<unk>'}
UpperCAmelCase__ :int = os.path.join(self.tmpdirname , 'bart_tokenizer' )
os.makedirs(A , exist_ok=A )
UpperCAmelCase__ :Any = os.path.join(A , BART_VOCAB_FILES_NAMES['vocab_file'] )
UpperCAmelCase__ :Optional[int] = os.path.join(A , BART_VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(A ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(A ) )
def A__ ( self ) ->DPRQuestionEncoderTokenizer:
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , 'dpr_tokenizer' ) )
def A__ ( self ) ->DPRContextEncoderTokenizer:
return DPRContextEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , 'dpr_tokenizer' ) )
def A__ ( self ) ->BartTokenizer:
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , 'bart_tokenizer' ) )
def A__ ( self ) ->Dict:
shutil.rmtree(self.tmpdirname )
def A__ ( self ) ->Any:
UpperCAmelCase__ :int = Dataset.from_dict(
{
'id': ['0', '1'],
'text': ['foo', 'bar'],
'title': ['Foo', 'Bar'],
'embeddings': [np.ones(self.retrieval_vector_size ), 2 * np.ones(self.retrieval_vector_size )],
} )
dataset.add_faiss_index('embeddings' , string_factory='Flat' , metric_type=faiss.METRIC_INNER_PRODUCT )
return dataset
def A__ ( self ) ->List[Any]:
UpperCAmelCase__ :Any = self.get_dummy_dataset()
UpperCAmelCase__ :Dict = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , )
with patch('transformers.models.rag.retrieval_rag.load_dataset' ) as mock_load_dataset:
UpperCAmelCase__ :int = dataset
UpperCAmelCase__ :List[str] = RagRetriever(
A , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
return retriever
def A__ ( self , A ) ->List[Any]:
UpperCAmelCase__ :Optional[Any] = self.get_dummy_dataset()
UpperCAmelCase__ :Optional[Any] = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='custom' , )
if from_disk:
UpperCAmelCase__ :Union[str, Any] = os.path.join(self.tmpdirname , 'dataset' )
UpperCAmelCase__ :int = os.path.join(self.tmpdirname , 'index.faiss' )
dataset.get_index('embeddings' ).save(os.path.join(self.tmpdirname , 'index.faiss' ) )
dataset.drop_index('embeddings' )
dataset.save_to_disk(os.path.join(self.tmpdirname , 'dataset' ) )
del dataset
UpperCAmelCase__ :Optional[Any] = RagRetriever(
A , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
else:
UpperCAmelCase__ :List[str] = RagRetriever(
A , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , index=CustomHFIndex(config.retrieval_vector_size , A ) , )
return retriever
def A__ ( self ) ->str:
UpperCAmelCase__ :Union[str, Any] = Dataset.from_dict(
{
'id': ['0', '1'],
'text': ['foo', 'bar'],
'title': ['Foo', 'Bar'],
'embeddings': [np.ones(self.retrieval_vector_size + 1 ), 2 * np.ones(self.retrieval_vector_size + 1 )],
} )
dataset.add_faiss_index('embeddings' , string_factory='Flat' , metric_type=faiss.METRIC_INNER_PRODUCT )
UpperCAmelCase__ :Optional[int] = os.path.join(self.tmpdirname , 'hf_bert_base.hnswSQ8_correct_phi_128.c_index' )
dataset.save_faiss_index('embeddings' , index_file_name + '.index.dpr' )
pickle.dump(dataset['id'] , open(index_file_name + '.index_meta.dpr' , 'wb' ) )
UpperCAmelCase__ :int = os.path.join(self.tmpdirname , 'psgs_w100.tsv.pkl' )
UpperCAmelCase__ :List[str] = {sample['id']: [sample['text'], sample['title']] for sample in dataset}
pickle.dump(A , open(A , 'wb' ) )
UpperCAmelCase__ :List[str] = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='legacy' , index_path=self.tmpdirname , )
UpperCAmelCase__ :Dict = RagRetriever(
A , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() )
return retriever
def A__ ( self ) ->Optional[Any]:
UpperCAmelCase__ :Optional[int] = 1
UpperCAmelCase__ :Tuple = self.get_dummy_canonical_hf_index_retriever()
UpperCAmelCase__ :List[str] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ :int = retriever.retrieve(A , n_docs=A )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(A ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['embeddings', 'id', 'text', 'title'] )
self.assertEqual(len(doc_dicts[0]['id'] ) , A )
self.assertEqual(doc_dicts[0]['id'][0] , '1' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['id'][0] , '0' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def A__ ( self ) ->Dict:
UpperCAmelCase__ :List[Any] = self.get_dummy_canonical_hf_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
with patch('transformers.models.rag.retrieval_rag.load_dataset' ) as mock_load_dataset:
UpperCAmelCase__ :List[str] = self.get_dummy_dataset()
retriever.save_pretrained(A )
UpperCAmelCase__ :List[Any] = RagRetriever.from_pretrained(A )
self.assertIsInstance(A , A )
UpperCAmelCase__ :Optional[int] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
UpperCAmelCase__ :List[Any] = retriever.retrieve(A , n_docs=1 )
self.assertTrue(out is not None )
def A__ ( self ) ->Tuple:
UpperCAmelCase__ :Tuple = 1
UpperCAmelCase__ :Union[str, Any] = self.get_dummy_custom_hf_index_retriever(from_disk=A )
UpperCAmelCase__ :Any = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ :List[str] = retriever.retrieve(A , n_docs=A )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(A ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['embeddings', 'id', 'text', 'title'] )
self.assertEqual(len(doc_dicts[0]['id'] ) , A )
self.assertEqual(doc_dicts[0]['id'][0] , '1' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['id'][0] , '0' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def A__ ( self ) ->Tuple:
UpperCAmelCase__ :str = self.get_dummy_custom_hf_index_retriever(from_disk=A )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(A )
UpperCAmelCase__ :List[Any] = RagRetriever.from_pretrained(A )
self.assertIsInstance(A , A )
UpperCAmelCase__ :int = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
UpperCAmelCase__ :str = retriever.retrieve(A , n_docs=1 )
self.assertTrue(out is not None )
def A__ ( self ) ->Tuple:
UpperCAmelCase__ :Any = 1
UpperCAmelCase__ :Optional[Any] = self.get_dummy_custom_hf_index_retriever(from_disk=A )
UpperCAmelCase__ :Dict = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ :Any = retriever.retrieve(A , n_docs=A )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(A ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['embeddings', 'id', 'text', 'title'] )
self.assertEqual(len(doc_dicts[0]['id'] ) , A )
self.assertEqual(doc_dicts[0]['id'][0] , '1' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['id'][0] , '0' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def A__ ( self ) ->Dict:
UpperCAmelCase__ :Optional[int] = self.get_dummy_custom_hf_index_retriever(from_disk=A )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(A )
UpperCAmelCase__ :int = RagRetriever.from_pretrained(A )
self.assertIsInstance(A , A )
UpperCAmelCase__ :Optional[Any] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
UpperCAmelCase__ :Union[str, Any] = retriever.retrieve(A , n_docs=1 )
self.assertTrue(out is not None )
def A__ ( self ) ->List[str]:
UpperCAmelCase__ :str = 1
UpperCAmelCase__ :List[str] = self.get_dummy_legacy_index_retriever()
UpperCAmelCase__ :str = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ :Dict = retriever.retrieve(A , n_docs=A )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(A ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['text', 'title'] )
self.assertEqual(len(doc_dicts[0]['text'] ) , A )
self.assertEqual(doc_dicts[0]['text'][0] , 'bar' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['text'][0] , 'foo' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def A__ ( self ) ->List[str]:
UpperCAmelCase__ :Any = self.get_dummy_legacy_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(A )
UpperCAmelCase__ :str = RagRetriever.from_pretrained(A )
self.assertIsInstance(A , A )
UpperCAmelCase__ :List[Any] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
UpperCAmelCase__ :int = retriever.retrieve(A , n_docs=1 )
self.assertTrue(out is not None )
@require_torch
@require_tokenizers
@require_sentencepiece
def A__ ( self ) ->List[str]:
import torch
UpperCAmelCase__ :Optional[Any] = 1
UpperCAmelCase__ :Optional[int] = self.get_dummy_canonical_hf_index_retriever()
UpperCAmelCase__ :Union[str, Any] = [[5, 7], [10, 11]]
UpperCAmelCase__ :Any = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
UpperCAmelCase__ :List[str] = retriever(A , A , prefix=retriever.config.generator.prefix , n_docs=A )
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ :List[Any] = (
out['context_input_ids'],
out['context_attention_mask'],
out['retrieved_doc_embeds'],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(A , A )
self.assertIsInstance(A , A )
self.assertIsInstance(A , np.ndarray )
UpperCAmelCase__ :List[str] = retriever(
A , A , prefix=retriever.config.generator.prefix , n_docs=A , return_tensors='pt' , )
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ :List[str] = ( # noqa: F841
out['context_input_ids'],
out['context_attention_mask'],
out['retrieved_doc_embeds'],
out['doc_ids'],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(A , torch.Tensor )
self.assertIsInstance(A , torch.Tensor )
self.assertIsInstance(A , torch.Tensor )
@require_torch
@require_tokenizers
@require_sentencepiece
def A__ ( self ) ->List[Any]:
UpperCAmelCase__ :Tuple = self.get_dpr_ctx_encoder_tokenizer()
UpperCAmelCase__ :Tuple = 1
UpperCAmelCase__ :Optional[int] = self.get_dummy_custom_hf_index_retriever(from_disk=A )
retriever.set_ctx_encoder_tokenizer(A )
UpperCAmelCase__ :Tuple = [[5, 7], [10, 11]]
UpperCAmelCase__ :Any = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
UpperCAmelCase__ :Any = retriever(A , A , prefix=retriever.config.generator.prefix , n_docs=A )
self.assertEqual(
len(A ) , 6 ) # check whether the retriever output consist of 6 attributes including tokenized docs
self.assertEqual(
all(k in out for k in ('tokenized_doc_ids', 'tokenized_doc_attention_mask') ) , A ) # check for doc token related keys in dictionary.
| 433 | 1 |
import math
def a ( snake_case__: float , snake_case__: float ):
'''simple docstring'''
return math.pow(snake_case__ , 2 ) - a
def a ( snake_case__: float ):
'''simple docstring'''
return 2 * x
def a ( snake_case__: float ):
'''simple docstring'''
lowercase_ = 2.0
while start <= a:
lowercase_ = math.pow(snake_case__ , 2 )
return start
def a ( snake_case__: float , snake_case__: int = 9_999 , snake_case__: float = 0.0_0_0_0_0_0_0_0_0_0_0_0_0_1 ):
'''simple docstring'''
if a < 0:
raise ValueError('''math domain error''' )
lowercase_ = get_initial_point(snake_case__ )
for _ in range(snake_case__ ):
lowercase_ = value
lowercase_ = value - fx(snake_case__ , snake_case__ ) / fx_derivative(snake_case__ )
if abs(prev_value - value ) < tolerance:
return value
return value
if __name__ == "__main__":
from doctest import testmod
testmod()
| 97 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_lowerCamelCase : Tuple = {
'''configuration_squeezebert''': [
'''SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''SqueezeBertConfig''',
'''SqueezeBertOnnxConfig''',
],
'''tokenization_squeezebert''': ['''SqueezeBertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : List[str] = ['''SqueezeBertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Tuple = [
'''SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''SqueezeBertForMaskedLM''',
'''SqueezeBertForMultipleChoice''',
'''SqueezeBertForQuestionAnswering''',
'''SqueezeBertForSequenceClassification''',
'''SqueezeBertForTokenClassification''',
'''SqueezeBertModel''',
'''SqueezeBertModule''',
'''SqueezeBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_squeezebert import (
SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
SqueezeBertConfig,
SqueezeBertOnnxConfig,
)
from .tokenization_squeezebert import SqueezeBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_squeezebert import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
SqueezeBertModule,
SqueezeBertPreTrainedModel,
)
else:
import sys
_lowerCamelCase : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 184 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
snake_case = {"""configuration_swin""": ["""SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP""", """SwinConfig""", """SwinOnnxConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = [
"""SWIN_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""SwinForImageClassification""",
"""SwinForMaskedImageModeling""",
"""SwinModel""",
"""SwinPreTrainedModel""",
"""SwinBackbone""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = [
"""TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFSwinForImageClassification""",
"""TFSwinForMaskedImageModeling""",
"""TFSwinModel""",
"""TFSwinPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_swin import SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinConfig, SwinOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swin import (
SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinBackbone,
SwinForImageClassification,
SwinForMaskedImageModeling,
SwinModel,
SwinPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_swin import (
TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSwinForImageClassification,
TFSwinForMaskedImageModeling,
TFSwinModel,
TFSwinPreTrainedModel,
)
else:
import sys
snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 568 |
'''simple docstring'''
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline
from diffusers.utils import floats_tensor, nightly, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
class lowerCAmelCase ( unittest.TestCase ):
def _A ( self : List[str] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def _A ( self : int ):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = 1
lowerCAmelCase__ : Union[str, Any] = 3
lowerCAmelCase__ : Any = (32, 32)
lowerCAmelCase__ : Dict = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(a__ )
return image
@property
def _A ( self : Optional[int] ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase__ : Union[str, Any] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
return model
@property
def _A ( self : Optional[Any] ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase__ : int = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
return model
@property
def _A ( self : Optional[Any] ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase__ : List[str] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModel(a__ )
@property
def _A ( self : int ):
'''simple docstring'''
def extract(*a__ : List[Any] , **a__ : int ):
class lowerCAmelCase :
def __init__( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = torch.ones([0] )
def _A ( self : str , a__ : str ):
'''simple docstring'''
self.pixel_values.to(a__ )
return self
return Out()
return extract
def _A ( self : Any ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = "cpu" # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase__ : Union[str, Any] = self.dummy_cond_unet
lowerCAmelCase__ : Union[str, Any] = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=a__ , set_alpha_to_one=a__ , )
lowerCAmelCase__ : Optional[Any] = self.dummy_vae
lowerCAmelCase__ : Tuple = self.dummy_text_encoder
lowerCAmelCase__ : Dict = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
# make sure here that pndm scheduler skips prk
lowerCAmelCase__ : List[Any] = StableDiffusionPipeline(
unet=a__ , scheduler=a__ , vae=a__ , text_encoder=a__ , tokenizer=a__ , safety_checker=a__ , feature_extractor=self.dummy_extractor , )
lowerCAmelCase__ : Dict = sd_pipe.to(a__ )
sd_pipe.set_progress_bar_config(disable=a__ )
lowerCAmelCase__ : List[str] = "A painting of a squirrel eating a burger"
lowerCAmelCase__ : str = torch.Generator(device=a__ ).manual_seed(0 )
lowerCAmelCase__ : Optional[int] = sd_pipe([prompt] , generator=a__ , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" )
lowerCAmelCase__ : Optional[Any] = output.images
lowerCAmelCase__ : Union[str, Any] = torch.Generator(device=a__ ).manual_seed(0 )
lowerCAmelCase__ : Optional[int] = sd_pipe(
[prompt] , generator=a__ , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" , return_dict=a__ , )[0]
lowerCAmelCase__ : Any = image[0, -3:, -3:, -1]
lowerCAmelCase__ : Optional[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase__ : Any = np.array([0.5756, 0.6118, 0.5005, 0.5041, 0.5471, 0.4726, 0.4976, 0.4865, 0.4864] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def _A ( self : Any ):
'''simple docstring'''
lowerCAmelCase__ : Dict = "cpu" # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase__ : int = self.dummy_cond_unet
lowerCAmelCase__ : Optional[int] = PNDMScheduler(skip_prk_steps=a__ )
lowerCAmelCase__ : Optional[int] = self.dummy_vae
lowerCAmelCase__ : Tuple = self.dummy_text_encoder
lowerCAmelCase__ : List[str] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
# make sure here that pndm scheduler skips prk
lowerCAmelCase__ : List[str] = StableDiffusionPipeline(
unet=a__ , scheduler=a__ , vae=a__ , text_encoder=a__ , tokenizer=a__ , safety_checker=a__ , feature_extractor=self.dummy_extractor , )
lowerCAmelCase__ : Optional[int] = sd_pipe.to(a__ )
sd_pipe.set_progress_bar_config(disable=a__ )
lowerCAmelCase__ : Tuple = "A painting of a squirrel eating a burger"
lowerCAmelCase__ : str = torch.Generator(device=a__ ).manual_seed(0 )
lowerCAmelCase__ : Optional[int] = sd_pipe([prompt] , generator=a__ , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" )
lowerCAmelCase__ : Union[str, Any] = output.images
lowerCAmelCase__ : Tuple = torch.Generator(device=a__ ).manual_seed(0 )
lowerCAmelCase__ : Tuple = sd_pipe(
[prompt] , generator=a__ , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" , return_dict=a__ , )[0]
lowerCAmelCase__ : Optional[Any] = image[0, -3:, -3:, -1]
lowerCAmelCase__ : Optional[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase__ : Optional[Any] = np.array([0.5125, 0.5716, 0.4828, 0.5060, 0.5650, 0.4768, 0.5185, 0.4895, 0.4993] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def _A ( self : str ):
'''simple docstring'''
lowerCAmelCase__ : str = StableDiffusionPipeline.from_pretrained(
"hf-internal-testing/tiny-stable-diffusion-lms-pipe" , safety_checker=a__ )
assert isinstance(a__ , a__ )
assert isinstance(pipe.scheduler , a__ )
assert pipe.safety_checker is None
lowerCAmelCase__ : List[Any] = pipe("example prompt" , num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(a__ )
lowerCAmelCase__ : List[Any] = StableDiffusionPipeline.from_pretrained(a__ )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
lowerCAmelCase__ : List[Any] = pipe("example prompt" , num_inference_steps=2 ).images[0]
assert image is not None
@unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" )
def _A ( self : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = self.dummy_cond_unet
lowerCAmelCase__ : Tuple = PNDMScheduler(skip_prk_steps=a__ )
lowerCAmelCase__ : Dict = self.dummy_vae
lowerCAmelCase__ : Optional[int] = self.dummy_text_encoder
lowerCAmelCase__ : Optional[Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
# put models in fp16
lowerCAmelCase__ : List[Any] = unet.half()
lowerCAmelCase__ : Dict = vae.half()
lowerCAmelCase__ : List[str] = bert.half()
# make sure here that pndm scheduler skips prk
lowerCAmelCase__ : List[str] = StableDiffusionPipeline(
unet=a__ , scheduler=a__ , vae=a__ , text_encoder=a__ , tokenizer=a__ , safety_checker=a__ , feature_extractor=self.dummy_extractor , )
lowerCAmelCase__ : str = sd_pipe.to(a__ )
sd_pipe.set_progress_bar_config(disable=a__ )
lowerCAmelCase__ : Dict = "A painting of a squirrel eating a burger"
lowerCAmelCase__ : Any = sd_pipe([prompt] , num_inference_steps=2 , output_type="np" ).images
assert image.shape == (1, 64, 64, 3)
@nightly
@require_torch_gpu
class lowerCAmelCase ( unittest.TestCase ):
def _A ( self : Dict ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _A ( self : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5" , safety_checker=a__ )
lowerCAmelCase__ : Any = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
lowerCAmelCase__ : Tuple = sd_pipe.to(a__ )
sd_pipe.set_progress_bar_config(disable=a__ )
lowerCAmelCase__ : Any = (
"portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle"
" coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with"
" anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and"
" children from bahnhof zoo, detailed "
)
lowerCAmelCase__ : List[str] = 40_0366_0346
lowerCAmelCase__ : int = 7
# without safety guidance (sld_guidance_scale = 0)
lowerCAmelCase__ : Dict = torch.manual_seed(a__ )
lowerCAmelCase__ : List[str] = sd_pipe(
[prompt] , generator=a__ , guidance_scale=a__ , num_inference_steps=50 , output_type="np" , width=512 , height=512 , sld_guidance_scale=0 , )
lowerCAmelCase__ : List[Any] = output.images
lowerCAmelCase__ : Optional[Any] = image[0, -3:, -3:, -1]
lowerCAmelCase__ : Union[str, Any] = [0.2278, 0.2231, 0.2249, 0.2333, 0.2303, 0.1885, 0.2273, 0.2144, 0.2176]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
# without safety guidance (strong configuration)
lowerCAmelCase__ : Tuple = torch.manual_seed(a__ )
lowerCAmelCase__ : Tuple = sd_pipe(
[prompt] , generator=a__ , guidance_scale=a__ , num_inference_steps=50 , output_type="np" , width=512 , height=512 , sld_guidance_scale=2000 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
lowerCAmelCase__ : List[Any] = output.images
lowerCAmelCase__ : Union[str, Any] = image[0, -3:, -3:, -1]
lowerCAmelCase__ : List[str] = [0.2383, 0.2276, 0.236, 0.2192, 0.2186, 0.2053, 0.1971, 0.1901, 0.1719]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _A ( self : Any ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5" , safety_checker=a__ )
lowerCAmelCase__ : str = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
lowerCAmelCase__ : Optional[Any] = sd_pipe.to(a__ )
sd_pipe.set_progress_bar_config(disable=a__ )
lowerCAmelCase__ : int = "padme amidala taking a bath artwork, safe for work, no nudity"
lowerCAmelCase__ : Union[str, Any] = 27_3497_1755
lowerCAmelCase__ : str = 7
lowerCAmelCase__ : Any = torch.manual_seed(a__ )
lowerCAmelCase__ : Dict = sd_pipe(
[prompt] , generator=a__ , guidance_scale=a__ , num_inference_steps=50 , output_type="np" , width=512 , height=512 , sld_guidance_scale=0 , )
lowerCAmelCase__ : Optional[Any] = output.images
lowerCAmelCase__ : Any = image[0, -3:, -3:, -1]
lowerCAmelCase__ : Optional[Any] = [0.3502, 0.3622, 0.3396, 0.3642, 0.3478, 0.3318, 0.35, 0.3348, 0.3297]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
lowerCAmelCase__ : Optional[int] = torch.manual_seed(a__ )
lowerCAmelCase__ : Dict = sd_pipe(
[prompt] , generator=a__ , guidance_scale=a__ , num_inference_steps=50 , output_type="np" , width=512 , height=512 , sld_guidance_scale=2000 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
lowerCAmelCase__ : Optional[int] = output.images
lowerCAmelCase__ : Union[str, Any] = image[0, -3:, -3:, -1]
lowerCAmelCase__ : Union[str, Any] = [0.5531, 0.5206, 0.4895, 0.5156, 0.5182, 0.4751, 0.4802, 0.4803, 0.4443]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _A ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5" )
lowerCAmelCase__ : List[str] = sd_pipe.to(a__ )
sd_pipe.set_progress_bar_config(disable=a__ )
lowerCAmelCase__ : Dict = (
"the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c."
" leyendecker"
)
lowerCAmelCase__ : Union[str, Any] = 10_4435_5234
lowerCAmelCase__ : Optional[int] = 12
lowerCAmelCase__ : Tuple = torch.manual_seed(a__ )
lowerCAmelCase__ : str = sd_pipe(
[prompt] , generator=a__ , guidance_scale=a__ , num_inference_steps=50 , output_type="np" , width=512 , height=512 , sld_guidance_scale=0 , )
lowerCAmelCase__ : Union[str, Any] = output.images
lowerCAmelCase__ : int = image[0, -3:, -3:, -1]
lowerCAmelCase__ : str = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] )
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-7
lowerCAmelCase__ : List[Any] = torch.manual_seed(a__ )
lowerCAmelCase__ : List[Any] = sd_pipe(
[prompt] , generator=a__ , guidance_scale=a__ , num_inference_steps=50 , output_type="np" , width=512 , height=512 , sld_guidance_scale=2000 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
lowerCAmelCase__ : List[Any] = output.images
lowerCAmelCase__ : Optional[Any] = image[0, -3:, -3:, -1]
lowerCAmelCase__ : int = np.array([0.5818, 0.6285, 0.6835, 0.6019, 0.625, 0.6754, 0.6096, 0.6334, 0.6561] )
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 568 | 1 |
_lowerCamelCase : str = 'Tobias Carryer'
from time import time
class snake_case__ :
'''simple docstring'''
def __init__( self : Tuple , lowerCAmelCase_ : int , lowerCAmelCase_ : Any , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Any=int(time() ) ) -> int: # noqa: B008
UpperCAmelCase_ = multiplier
UpperCAmelCase_ = increment
UpperCAmelCase_ = modulo
UpperCAmelCase_ = seed
def UpperCamelCase ( self : str ) -> int:
UpperCAmelCase_ = (self.multiplier * self.seed + self.increment) % self.modulo
return self.seed
if __name__ == "__main__":
# Show the LCG in action.
_lowerCamelCase : str = LinearCongruentialGenerator(166_4525, 10_1390_4223, 2 << 31)
while True:
print(lcg.next_number())
| 121 |
import argparse
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
_lowerCamelCase : Optional[Any] = 16
_lowerCamelCase : List[Any] = 32
def _lowerCAmelCase ( __magic_name__ :Accelerator , __magic_name__ :int = 1_6 ):
UpperCAmelCase_ = AutoTokenizer.from_pretrained('''bert-base-cased''' )
UpperCAmelCase_ = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(__magic_name__ :int ):
# max_length=None => use the model max length (it's actually the default)
UpperCAmelCase_ = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=__magic_name__ , max_length=__magic_name__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
UpperCAmelCase_ = datasets.map(
__magic_name__ , batched=__magic_name__ , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
UpperCAmelCase_ = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(__magic_name__ :List[Any] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
UpperCAmelCase_ = 1_2_8 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
UpperCAmelCase_ = 1_6
elif accelerator.mixed_precision != "no":
UpperCAmelCase_ = 8
else:
UpperCAmelCase_ = None
return tokenizer.pad(
__magic_name__ , padding='''longest''' , max_length=__magic_name__ , pad_to_multiple_of=__magic_name__ , return_tensors='''pt''' , )
# Instantiate dataloaders.
UpperCAmelCase_ = DataLoader(
tokenized_datasets['''train'''] , shuffle=__magic_name__ , collate_fn=__magic_name__ , batch_size=__magic_name__ , drop_last=__magic_name__ )
UpperCAmelCase_ = DataLoader(
tokenized_datasets['''validation'''] , shuffle=__magic_name__ , collate_fn=__magic_name__ , batch_size=__magic_name__ , drop_last=(accelerator.mixed_precision == '''fp8''') , )
return train_dataloader, eval_dataloader
def _lowerCAmelCase ( __magic_name__ :Tuple , __magic_name__ :List[Any] ):
# Initialize accelerator
UpperCAmelCase_ = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCAmelCase_ = config['''lr''']
UpperCAmelCase_ = int(config['''num_epochs'''] )
UpperCAmelCase_ = int(config['''seed'''] )
UpperCAmelCase_ = int(config['''batch_size'''] )
UpperCAmelCase_ = evaluate.load('''glue''' , '''mrpc''' )
# If the batch size is too big we use gradient accumulation
UpperCAmelCase_ = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
UpperCAmelCase_ = batch_size // MAX_GPU_BATCH_SIZE
UpperCAmelCase_ = MAX_GPU_BATCH_SIZE
set_seed(__magic_name__ )
UpperCAmelCase_, UpperCAmelCase_ = get_dataloaders(__magic_name__ , __magic_name__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCAmelCase_ = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=__magic_name__ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
UpperCAmelCase_ = model.to(accelerator.device )
# Instantiate optimizer
UpperCAmelCase_ = AdamW(params=model.parameters() , lr=__magic_name__ )
# Instantiate scheduler
UpperCAmelCase_ = get_linear_schedule_with_warmup(
optimizer=__magic_name__ , num_warmup_steps=1_0_0 , num_training_steps=(len(__magic_name__ ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_ = accelerator.prepare(
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
# Now we train the model
for epoch in range(__magic_name__ ):
model.train()
for step, batch in enumerate(__magic_name__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
UpperCAmelCase_ = model(**__magic_name__ )
UpperCAmelCase_ = outputs.loss
UpperCAmelCase_ = loss / gradient_accumulation_steps
accelerator.backward(__magic_name__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(__magic_name__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
UpperCAmelCase_ = model(**__magic_name__ )
UpperCAmelCase_ = outputs.logits.argmax(dim=-1 )
UpperCAmelCase_, UpperCAmelCase_ = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=__magic_name__ , references=__magic_name__ , )
UpperCAmelCase_ = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}:''' , __magic_name__ )
def _lowerCAmelCase ( ):
UpperCAmelCase_ = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' , type=__magic_name__ , default=__magic_name__ , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
UpperCAmelCase_ = parser.parse_args()
UpperCAmelCase_ = {'''lr''': 2e-5, '''num_epochs''': 3, '''seed''': 4_2, '''batch_size''': 1_6}
training_function(__magic_name__ , __magic_name__ )
if __name__ == "__main__":
main()
| 121 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
A__ : Dict = {
"""configuration_biogpt""": ["""BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """BioGptConfig"""],
"""tokenization_biogpt""": ["""BioGptTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Optional[int] = [
"""BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BioGptForCausalLM""",
"""BioGptForTokenClassification""",
"""BioGptForSequenceClassification""",
"""BioGptModel""",
"""BioGptPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig
from .tokenization_biogpt import BioGptTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_biogpt import (
BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST,
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptPreTrainedModel,
)
else:
import sys
A__ : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 660 |
"""simple docstring"""
import warnings
from pathlib import Path
from typing import List, Tuple, Union
import fire
from torch import nn
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer, PreTrainedModel
from transformers.utils import logging
A__ : Tuple = logging.get_logger(__name__)
def a__ ( lowerCAmelCase : nn.ModuleList , lowerCAmelCase : nn.ModuleList , lowerCAmelCase : List[int] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = nn.ModuleList([src_layers[i] for i in layers_to_copy] )
assert len(lowerCAmelCase ) == len(lowerCAmelCase ), F"{len(lowerCAmelCase )} != {len(lowerCAmelCase )}"
dest_layers.load_state_dict(layers_to_copy.state_dict() )
A__ : List[Any] = {
# maps num layers in teacher -> num_layers in student -> which teacher layers to copy.
# 12: bart, 16: pegasus, 6: marian/Helsinki-NLP
12: {
1: [0], # This says that if the teacher has 12 layers and the student has 1, copy layer 0 of the teacher
2: [0, 6],
3: [0, 6, 11],
4: [0, 4, 8, 11],
6: [0, 2, 4, 7, 9, 11],
9: [0, 1, 2, 4, 5, 7, 9, 10, 11],
12: list(range(12)),
},
16: { # maps num layers in student -> which teacher layers to copy
1: [0],
2: [0, 15],
3: [0, 8, 15],
4: [0, 5, 10, 15],
6: [0, 3, 6, 9, 12, 15],
8: [0, 2, 4, 6, 8, 10, 12, 15],
9: [0, 1, 3, 5, 7, 9, 11, 13, 15],
12: [0, 1, 2, 3, 4, 5, 6, 7, 9, 11, 13, 15],
16: list(range(16)),
},
6: {1: [0], 2: [0, 5], 3: [0, 2, 5], 4: [0, 1, 3, 5], 6: list(range(6))},
}
A__ : List[Any] = {
# maps num layers in student -> which teacher layers to copy.
6: {1: [5], 2: [3, 5], 3: [1, 4, 5], 4: [1, 2, 4, 5]},
12: {1: [11], 2: [5, 11], 3: [3, 7, 11], 6: [1, 3, 5, 8, 10, 11]},
16: {1: [15], 4: [4, 9, 12, 15], 8: [1, 3, 5, 7, 9, 11, 13, 15]},
}
def a__ ( lowerCAmelCase : Dict , lowerCAmelCase : Dict ):
'''simple docstring'''
try:
UpperCAmelCase__ : Tuple = LAYERS_TO_COPY[n_teacher][n_student]
return val
except KeyError:
if n_student != n_teacher:
warnings.warn(
F"no hardcoded layers to copy for teacher {n_teacher} -> student {n_student}, defaulting to first"
F" {n_student}" )
return list(range(lowerCAmelCase ) )
def a__ ( lowerCAmelCase : int , lowerCAmelCase : Tuple ):
'''simple docstring'''
if n_student > n_teacher:
raise ValueError(F"Cannot perform intermediate supervision for student {n_student} > teacher {n_teacher}" )
elif n_teacher == n_student:
return list(range(lowerCAmelCase ) )
elif n_student == 1:
return [n_teacher - 1]
else:
return LAYERS_TO_SUPERVISE[n_teacher][n_student]
def a__ ( lowerCAmelCase : Union[str, PreTrainedModel] , lowerCAmelCase : Union[str, Path] = "student" , lowerCAmelCase : Union[int, None] = None , lowerCAmelCase : Union[int, None] = None , lowerCAmelCase : List[str]=False , lowerCAmelCase : List[str]=None , lowerCAmelCase : List[str]=None , **lowerCAmelCase : List[str] , ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = "encoder_layers and decoder_layers cannot be both None-- you would just have an identical teacher."
assert (e is not None) or (d is not None), _msg
if isinstance(lowerCAmelCase , lowerCAmelCase ):
AutoTokenizer.from_pretrained(lowerCAmelCase ).save_pretrained(lowerCAmelCase ) # purely for convenience
UpperCAmelCase__ : List[str] = AutoModelForSeqaSeqLM.from_pretrained(lowerCAmelCase ).eval()
else:
assert isinstance(lowerCAmelCase , lowerCAmelCase ), F"teacher must be a model or string got type {type(lowerCAmelCase )}"
UpperCAmelCase__ : int = teacher.config.to_diff_dict()
try:
UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = teacher.config.encoder_layers, teacher.config.decoder_layers
if e is None:
UpperCAmelCase__ : Tuple = teacher_e
if d is None:
UpperCAmelCase__ : str = teacher_d
init_kwargs.update({"encoder_layers": e, "decoder_layers": d} )
except AttributeError: # T5
if hasattr(teacher.config , "num_encoder_layers" ):
UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = teacher.config.num_encoder_layers, teacher.config.num_decoder_layers
else:
UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = teacher.config.num_layers, teacher.config.num_decoder_layers
if e is None:
UpperCAmelCase__ : Optional[Any] = teacher_e
if d is None:
UpperCAmelCase__ : Optional[Any] = teacher_d
if hasattr(teacher.config , "num_encoder_layers" ):
init_kwargs.update({"num_encoder_layers": e, "num_decoder_layers": d} )
else:
init_kwargs.update({"num_layers": e, "num_decoder_layers": d} )
# Kwargs to instantiate student: teacher kwargs with updated layer numbers + **extra_config_kwargs
init_kwargs.update(lowerCAmelCase )
# Copy weights
UpperCAmelCase__ : Tuple = teacher.config_class(**lowerCAmelCase )
UpperCAmelCase__ : List[str] = AutoModelForSeqaSeqLM.from_config(lowerCAmelCase )
# Start by copying the full teacher state dict this will copy the first N teacher layers to the student.
UpperCAmelCase__ : Optional[int] = student.load_state_dict(teacher.state_dict() , strict=lowerCAmelCase )
assert info.missing_keys == [], info.missing_keys # every student key should have a teacher keys.
if copy_first_teacher_layers: # Our copying is done. We just log and save
UpperCAmelCase__ , UpperCAmelCase__ : int = list(range(lowerCAmelCase ) ), list(range(lowerCAmelCase ) )
logger.info(
F"Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to"
F" {save_path}" )
student.save_pretrained(lowerCAmelCase )
return student, e_layers_to_copy, d_layers_to_copy
# Decide which layers of the teacher to copy. Not exactly alternating -- we try to keep first and last layer.
if e_layers_to_copy is None:
UpperCAmelCase__ : List[int] = pick_layers_to_copy(lowerCAmelCase , lowerCAmelCase )
if d_layers_to_copy is None:
UpperCAmelCase__ : List[int] = pick_layers_to_copy(lowerCAmelCase , lowerCAmelCase )
try:
if hasattr(
lowerCAmelCase , "prophetnet" ): # For ProphetNet, student.model.encoder.layers is called student.prophetnet.encoder.layers
copy_layers(teacher.prophetnet.encoder.layers , student.prophetnet.encoder.layers , lowerCAmelCase )
copy_layers(teacher.prophetnet.decoder.layers , student.prophetnet.decoder.layers , lowerCAmelCase )
else:
copy_layers(teacher.model.encoder.layers , student.model.encoder.layers , lowerCAmelCase )
copy_layers(teacher.model.decoder.layers , student.model.decoder.layers , lowerCAmelCase )
except AttributeError: # For t5, student.model.encoder.layers is called student.encoder.block
copy_layers(teacher.encoder.block , student.encoder.block , lowerCAmelCase )
copy_layers(teacher.decoder.block , student.decoder.block , lowerCAmelCase )
logger.info(
F"Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to {save_path}" )
UpperCAmelCase__ : int = {
"teacher_type": teacher.config.model_type,
"copied_encoder_layers": e_layers_to_copy,
"copied_decoder_layers": d_layers_to_copy,
}
student.save_pretrained(lowerCAmelCase )
# Save information about copying for easier reproducibility
return student, e_layers_to_copy, d_layers_to_copy
if __name__ == "__main__":
fire.Fire(create_student_by_copying_alternating_layers)
| 660 | 1 |
"""simple docstring"""
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
__lowercase : Optional[int] = len(__UpperCamelCase )
__lowercase : Dict = len(__UpperCamelCase )
__lowercase : int = [[False for _ in range(m + 1 )] for _ in range(n + 1 )]
__lowercase : Dict = True
for i in range(__UpperCamelCase ):
for j in range(m + 1 ):
if dp[i][j]:
if j < m and a[i].upper() == b[j]:
__lowercase : Union[str, Any] = True
if a[i].islower():
__lowercase : List[str] = True
return dp[n][m]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 76 |
from __future__ import annotations
def _UpperCAmelCase ( UpperCAmelCase : str , UpperCAmelCase : str ):
"""simple docstring"""
__lowerCamelCase : int = get_failure_array(UpperCAmelCase )
# 2) Step through text searching for pattern
__lowerCamelCase , __lowerCamelCase : Any = 0, 0 # index into text, pattern
while i < len(UpperCAmelCase ):
if pattern[j] == text[i]:
if j == (len(UpperCAmelCase ) - 1):
return True
j += 1
# if this is a prefix in our pattern
# just go back far enough to continue
elif j > 0:
__lowerCamelCase : Optional[Any] = failure[j - 1]
continue
i += 1
return False
def _UpperCAmelCase ( UpperCAmelCase : str ):
"""simple docstring"""
__lowerCamelCase : List[str] = [0]
__lowerCamelCase : Dict = 0
__lowerCamelCase : List[Any] = 1
while j < len(UpperCAmelCase ):
if pattern[i] == pattern[j]:
i += 1
elif i > 0:
__lowerCamelCase : Any = failure[i - 1]
continue
j += 1
failure.append(UpperCAmelCase )
return failure
if __name__ == "__main__":
# Test 1)
__UpperCamelCase : List[str] = 'abc1abc12'
__UpperCamelCase : Dict = 'alskfjaldsabc1abc1abc12k23adsfabcabc'
__UpperCamelCase : Optional[int] = 'alskfjaldsk23adsfabcabc'
assert kmp(pattern, texta) and not kmp(pattern, texta)
# Test 2)
__UpperCamelCase : Any = 'ABABX'
__UpperCamelCase : List[str] = 'ABABZABABYABABX'
assert kmp(pattern, text)
# Test 3)
__UpperCamelCase : List[Any] = 'AAAB'
__UpperCamelCase : Any = 'ABAAAAAB'
assert kmp(pattern, text)
# Test 4)
__UpperCamelCase : Union[str, Any] = 'abcdabcy'
__UpperCamelCase : Tuple = 'abcxabcdabxabcdabcdabcy'
assert kmp(pattern, text)
# Test 5)
__UpperCamelCase : Union[str, Any] = 'aabaabaaa'
assert get_failure_array(pattern) == [0, 1, 0, 1, 2, 3, 4, 5, 2]
| 519 | 0 |
def A_ ( lowercase_ = 1_0_0_0 ) ->Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE = -1
SCREAMING_SNAKE_CASE = 0
for a in range(1 , n // 3 ):
# Solving the two equations a**2+b**2=c**2 and a+b+c=N eliminating c
SCREAMING_SNAKE_CASE = (n * n - 2 * a * n) // (2 * n - 2 * a)
SCREAMING_SNAKE_CASE = n - a - b
if c * c == (a * a + b * b):
SCREAMING_SNAKE_CASE = a * b * c
if candidate >= product:
SCREAMING_SNAKE_CASE = candidate
return product
if __name__ == "__main__":
print(f'{solution() = }')
| 721 |
import argparse
import os.path as osp
import re
import torch
from safetensors.torch import load_file, save_file
# =================#
# UNet Conversion #
# =================#
__UpperCAmelCase = [
# (stable-diffusion, HF Diffusers)
("time_embed.0.weight", "time_embedding.linear_1.weight"),
("time_embed.0.bias", "time_embedding.linear_1.bias"),
("time_embed.2.weight", "time_embedding.linear_2.weight"),
("time_embed.2.bias", "time_embedding.linear_2.bias"),
("input_blocks.0.0.weight", "conv_in.weight"),
("input_blocks.0.0.bias", "conv_in.bias"),
("out.0.weight", "conv_norm_out.weight"),
("out.0.bias", "conv_norm_out.bias"),
("out.2.weight", "conv_out.weight"),
("out.2.bias", "conv_out.bias"),
]
__UpperCAmelCase = [
# (stable-diffusion, HF Diffusers)
("in_layers.0", "norm1"),
("in_layers.2", "conv1"),
("out_layers.0", "norm2"),
("out_layers.3", "conv2"),
("emb_layers.1", "time_emb_proj"),
("skip_connection", "conv_shortcut"),
]
__UpperCAmelCase = []
# hardcoded number of downblocks and resnets/attentions...
# would need smarter logic for other networks.
for i in range(4):
# loop over downblocks/upblocks
for j in range(2):
# loop over resnets/attentions for downblocks
__UpperCAmelCase = f'down_blocks.{i}.resnets.{j}.'
__UpperCAmelCase = f'input_blocks.{3*i + j + 1}.0.'
unet_conversion_map_layer.append((sd_down_res_prefix, hf_down_res_prefix))
if i < 3:
# no attention layers in down_blocks.3
__UpperCAmelCase = f'down_blocks.{i}.attentions.{j}.'
__UpperCAmelCase = f'input_blocks.{3*i + j + 1}.1.'
unet_conversion_map_layer.append((sd_down_atn_prefix, hf_down_atn_prefix))
for j in range(3):
# loop over resnets/attentions for upblocks
__UpperCAmelCase = f'up_blocks.{i}.resnets.{j}.'
__UpperCAmelCase = f'output_blocks.{3*i + j}.0.'
unet_conversion_map_layer.append((sd_up_res_prefix, hf_up_res_prefix))
if i > 0:
# no attention layers in up_blocks.0
__UpperCAmelCase = f'up_blocks.{i}.attentions.{j}.'
__UpperCAmelCase = f'output_blocks.{3*i + j}.1.'
unet_conversion_map_layer.append((sd_up_atn_prefix, hf_up_atn_prefix))
if i < 3:
# no downsample in down_blocks.3
__UpperCAmelCase = f'down_blocks.{i}.downsamplers.0.conv.'
__UpperCAmelCase = f'input_blocks.{3*(i+1)}.0.op.'
unet_conversion_map_layer.append((sd_downsample_prefix, hf_downsample_prefix))
# no upsample in up_blocks.3
__UpperCAmelCase = f'up_blocks.{i}.upsamplers.0.'
__UpperCAmelCase = f'output_blocks.{3*i + 2}.{1 if i == 0 else 2}.'
unet_conversion_map_layer.append((sd_upsample_prefix, hf_upsample_prefix))
__UpperCAmelCase = "mid_block.attentions.0."
__UpperCAmelCase = "middle_block.1."
unet_conversion_map_layer.append((sd_mid_atn_prefix, hf_mid_atn_prefix))
for j in range(2):
__UpperCAmelCase = f'mid_block.resnets.{j}.'
__UpperCAmelCase = f'middle_block.{2*j}.'
unet_conversion_map_layer.append((sd_mid_res_prefix, hf_mid_res_prefix))
def A_ ( lowercase_ ) ->str:
"""simple docstring"""
SCREAMING_SNAKE_CASE = {k: k for k in unet_state_dict.keys()}
for sd_name, hf_name in unet_conversion_map:
SCREAMING_SNAKE_CASE = sd_name
for k, v in mapping.items():
if "resnets" in k:
for sd_part, hf_part in unet_conversion_map_resnet:
SCREAMING_SNAKE_CASE = v.replace(lowercase_ , lowercase_ )
SCREAMING_SNAKE_CASE = v
for k, v in mapping.items():
for sd_part, hf_part in unet_conversion_map_layer:
SCREAMING_SNAKE_CASE = v.replace(lowercase_ , lowercase_ )
SCREAMING_SNAKE_CASE = v
SCREAMING_SNAKE_CASE = {v: unet_state_dict[k] for k, v in mapping.items()}
return new_state_dict
# ================#
# VAE Conversion #
# ================#
__UpperCAmelCase = [
# (stable-diffusion, HF Diffusers)
("nin_shortcut", "conv_shortcut"),
("norm_out", "conv_norm_out"),
("mid.attn_1.", "mid_block.attentions.0."),
]
for i in range(4):
# down_blocks have two resnets
for j in range(2):
__UpperCAmelCase = f'encoder.down_blocks.{i}.resnets.{j}.'
__UpperCAmelCase = f'encoder.down.{i}.block.{j}.'
vae_conversion_map.append((sd_down_prefix, hf_down_prefix))
if i < 3:
__UpperCAmelCase = f'down_blocks.{i}.downsamplers.0.'
__UpperCAmelCase = f'down.{i}.downsample.'
vae_conversion_map.append((sd_downsample_prefix, hf_downsample_prefix))
__UpperCAmelCase = f'up_blocks.{i}.upsamplers.0.'
__UpperCAmelCase = f'up.{3-i}.upsample.'
vae_conversion_map.append((sd_upsample_prefix, hf_upsample_prefix))
# up_blocks have three resnets
# also, up blocks in hf are numbered in reverse from sd
for j in range(3):
__UpperCAmelCase = f'decoder.up_blocks.{i}.resnets.{j}.'
__UpperCAmelCase = f'decoder.up.{3-i}.block.{j}.'
vae_conversion_map.append((sd_up_prefix, hf_up_prefix))
# this part accounts for mid blocks in both the encoder and the decoder
for i in range(2):
__UpperCAmelCase = f'mid_block.resnets.{i}.'
__UpperCAmelCase = f'mid.block_{i+1}.'
vae_conversion_map.append((sd_mid_res_prefix, hf_mid_res_prefix))
__UpperCAmelCase = [
# (stable-diffusion, HF Diffusers)
("norm.", "group_norm."),
("q.", "query."),
("k.", "key."),
("v.", "value."),
("proj_out.", "proj_attn."),
]
def A_ ( lowercase_ ) ->Tuple:
"""simple docstring"""
return w.reshape(*w.shape , 1 , 1 )
def A_ ( lowercase_ ) ->Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE = {k: k for k in vae_state_dict.keys()}
for k, v in mapping.items():
for sd_part, hf_part in vae_conversion_map:
SCREAMING_SNAKE_CASE = v.replace(lowercase_ , lowercase_ )
SCREAMING_SNAKE_CASE = v
for k, v in mapping.items():
if "attentions" in k:
for sd_part, hf_part in vae_conversion_map_attn:
SCREAMING_SNAKE_CASE = v.replace(lowercase_ , lowercase_ )
SCREAMING_SNAKE_CASE = v
SCREAMING_SNAKE_CASE = {v: vae_state_dict[k] for k, v in mapping.items()}
SCREAMING_SNAKE_CASE = ['q', 'k', 'v', 'proj_out']
for k, v in new_state_dict.items():
for weight_name in weights_to_convert:
if f'''mid.attn_1.{weight_name}.weight''' in k:
print(f'''Reshaping {k} for SD format''' )
SCREAMING_SNAKE_CASE = reshape_weight_for_sd(lowercase_ )
return new_state_dict
# =========================#
# Text Encoder Conversion #
# =========================#
__UpperCAmelCase = [
# (stable-diffusion, HF Diffusers)
("resblocks.", "text_model.encoder.layers."),
("ln_1", "layer_norm1"),
("ln_2", "layer_norm2"),
(".c_fc.", ".fc1."),
(".c_proj.", ".fc2."),
(".attn", ".self_attn"),
("ln_final.", "transformer.text_model.final_layer_norm."),
("token_embedding.weight", "transformer.text_model.embeddings.token_embedding.weight"),
("positional_embedding", "transformer.text_model.embeddings.position_embedding.weight"),
]
__UpperCAmelCase = {re.escape(x[1]): x[0] for x in textenc_conversion_lst}
__UpperCAmelCase = re.compile("|".join(protected.keys()))
# Ordering is from https://github.com/pytorch/pytorch/blob/master/test/cpp/api/modules.cpp
__UpperCAmelCase = {"q": 0, "k": 1, "v": 2}
def A_ ( lowercase_ ) ->int:
"""simple docstring"""
SCREAMING_SNAKE_CASE = {}
SCREAMING_SNAKE_CASE = {}
SCREAMING_SNAKE_CASE = {}
for k, v in text_enc_dict.items():
if (
k.endswith('.self_attn.q_proj.weight' )
or k.endswith('.self_attn.k_proj.weight' )
or k.endswith('.self_attn.v_proj.weight' )
):
SCREAMING_SNAKE_CASE = k[: -len('.q_proj.weight' )]
SCREAMING_SNAKE_CASE = k[-len('q_proj.weight' )]
if k_pre not in capture_qkv_weight:
SCREAMING_SNAKE_CASE = [None, None, None]
SCREAMING_SNAKE_CASE = v
continue
if (
k.endswith('.self_attn.q_proj.bias' )
or k.endswith('.self_attn.k_proj.bias' )
or k.endswith('.self_attn.v_proj.bias' )
):
SCREAMING_SNAKE_CASE = k[: -len('.q_proj.bias' )]
SCREAMING_SNAKE_CASE = k[-len('q_proj.bias' )]
if k_pre not in capture_qkv_bias:
SCREAMING_SNAKE_CASE = [None, None, None]
SCREAMING_SNAKE_CASE = v
continue
SCREAMING_SNAKE_CASE = textenc_pattern.sub(lambda lowercase_ : protected[re.escape(m.group(0 ) )] , lowercase_ )
SCREAMING_SNAKE_CASE = v
for k_pre, tensors in capture_qkv_weight.items():
if None in tensors:
raise Exception('CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing' )
SCREAMING_SNAKE_CASE = textenc_pattern.sub(lambda lowercase_ : protected[re.escape(m.group(0 ) )] , lowercase_ )
SCREAMING_SNAKE_CASE = torch.cat(lowercase_ )
for k_pre, tensors in capture_qkv_bias.items():
if None in tensors:
raise Exception('CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing' )
SCREAMING_SNAKE_CASE = textenc_pattern.sub(lambda lowercase_ : protected[re.escape(m.group(0 ) )] , lowercase_ )
SCREAMING_SNAKE_CASE = torch.cat(lowercase_ )
return new_state_dict
def A_ ( lowercase_ ) ->Dict:
"""simple docstring"""
return text_enc_dict
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument("--model_path", default=None, type=str, required=True, help="Path to the model to convert.")
parser.add_argument("--checkpoint_path", default=None, type=str, required=True, help="Path to the output model.")
parser.add_argument("--half", action="store_true", help="Save weights in half precision.")
parser.add_argument(
"--use_safetensors", action="store_true", help="Save weights use safetensors, default is ckpt."
)
__UpperCAmelCase = parser.parse_args()
assert args.model_path is not None, "Must provide a model path!"
assert args.checkpoint_path is not None, "Must provide a checkpoint path!"
# Path for safetensors
__UpperCAmelCase = osp.join(args.model_path, "unet", "diffusion_pytorch_model.safetensors")
__UpperCAmelCase = osp.join(args.model_path, "vae", "diffusion_pytorch_model.safetensors")
__UpperCAmelCase = osp.join(args.model_path, "text_encoder", "model.safetensors")
# Load models from safetensors if it exists, if it doesn't pytorch
if osp.exists(unet_path):
__UpperCAmelCase = load_file(unet_path, device="cpu")
else:
__UpperCAmelCase = osp.join(args.model_path, "unet", "diffusion_pytorch_model.bin")
__UpperCAmelCase = torch.load(unet_path, map_location="cpu")
if osp.exists(vae_path):
__UpperCAmelCase = load_file(vae_path, device="cpu")
else:
__UpperCAmelCase = osp.join(args.model_path, "vae", "diffusion_pytorch_model.bin")
__UpperCAmelCase = torch.load(vae_path, map_location="cpu")
if osp.exists(text_enc_path):
__UpperCAmelCase = load_file(text_enc_path, device="cpu")
else:
__UpperCAmelCase = osp.join(args.model_path, "text_encoder", "pytorch_model.bin")
__UpperCAmelCase = torch.load(text_enc_path, map_location="cpu")
# Convert the UNet model
__UpperCAmelCase = convert_unet_state_dict(unet_state_dict)
__UpperCAmelCase = {"model.diffusion_model." + k: v for k, v in unet_state_dict.items()}
# Convert the VAE model
__UpperCAmelCase = convert_vae_state_dict(vae_state_dict)
__UpperCAmelCase = {"first_stage_model." + k: v for k, v in vae_state_dict.items()}
# Easiest way to identify v2.0 model seems to be that the text encoder (OpenCLIP) is deeper
__UpperCAmelCase = "text_model.encoder.layers.22.layer_norm2.bias" in text_enc_dict
if is_vaa_model:
# Need to add the tag 'transformer' in advance so we can knock it out from the final layer-norm
__UpperCAmelCase = {"transformer." + k: v for k, v in text_enc_dict.items()}
__UpperCAmelCase = convert_text_enc_state_dict_vaa(text_enc_dict)
__UpperCAmelCase = {"cond_stage_model.model." + k: v for k, v in text_enc_dict.items()}
else:
__UpperCAmelCase = convert_text_enc_state_dict(text_enc_dict)
__UpperCAmelCase = {"cond_stage_model.transformer." + k: v for k, v in text_enc_dict.items()}
# Put together new checkpoint
__UpperCAmelCase = {**unet_state_dict, **vae_state_dict, **text_enc_dict}
if args.half:
__UpperCAmelCase = {k: v.half() for k, v in state_dict.items()}
if args.use_safetensors:
save_file(state_dict, args.checkpoint_path)
else:
__UpperCAmelCase = {"state_dict": state_dict}
torch.save(state_dict, args.checkpoint_path)
| 259 | 0 |
import argparse
import os
import re
import torch
from flax.traverse_util import flatten_dict
from tax import checkpoints
from transformers import (
AutoTokenizer,
PixaStructConfig,
PixaStructForConditionalGeneration,
PixaStructImageProcessor,
PixaStructProcessor,
PixaStructTextConfig,
PixaStructVisionConfig,
)
def lowerCamelCase__ ( __A :str ):
"""simple docstring"""
__snake_case = checkpoints.load_tax_checkpoint(__A )
__snake_case = flatten_dict(__A )
return flax_params
def lowerCamelCase__ ( __A :Dict ):
"""simple docstring"""
__snake_case = {}
__snake_case = {
"""token_embedder""": """embeddings""",
"""encoder_norm""": """layernorm""",
"""kernel""": """weight""",
""".out""": """.output""",
"""scale""": """weight""",
"""embedders_0.pos_embedding""": """row_embedder.weight""",
"""embedders_1.pos_embedding""": """column_embedder.weight""",
}
__snake_case = {
"""query""": """attention.query""",
"""key""": """attention.key""",
"""value""": """attention.value""",
"""output.dense""": """output""",
"""encoder_decoder_attention.o""": """encoder_decoder_attention.attention.o""",
"""pre_self_attention_layer_norm""": """self_attention.layer_norm""",
"""pre_cross_attention_layer_norm""": """encoder_decoder_attention.layer_norm""",
"""mlp.""": """mlp.DenseReluDense.""",
"""pre_mlp_layer_norm""": """mlp.layer_norm""",
"""self_attention.o""": """self_attention.attention.o""",
"""decoder.embeddings.embedding""": """decoder.embed_tokens.weight""",
"""decoder.relpos_bias.rel_embedding""": """decoder.layer.0.self_attention.attention.relative_attention_bias.weight""",
"""decoder.decoder_norm.weight""": """decoder.final_layer_norm.weight""",
"""decoder.logits_dense.weight""": """decoder.lm_head.weight""",
}
for key in flax_dict.keys():
if "target" in key:
# remove the first prefix from the key
__snake_case = """.""".join(key[1:] )
# rename the key
for old, new in CONVERSION_MAPPING.items():
__snake_case = new_key.replace(__A ,__A )
if "decoder" in new_key:
for old, new in DECODER_CONVERSION_MAPPING.items():
__snake_case = new_key.replace(__A ,__A )
if "layers" in new_key and "decoder" not in new_key:
# use regex to replace the layer number
__snake_case = re.sub(r"""layers_(\d+)""" ,r"""layer.\1""" ,__A )
__snake_case = new_key.replace("""encoder""" ,"""encoder.encoder""" )
elif "layers" in new_key and "decoder" in new_key:
# use regex to replace the layer number
__snake_case = re.sub(r"""layers_(\d+)""" ,r"""layer.\1""" ,__A )
__snake_case = flax_dict[key]
__snake_case = {}
# convert converted_dict into torch format
for key in converted_dict.keys():
if ("embed_tokens" not in key) and ("embedder" not in key):
__snake_case = torch.from_numpy(converted_dict[key].T )
else:
__snake_case = torch.from_numpy(converted_dict[key] )
return converted_torch_dict
def lowerCamelCase__ ( __A :Union[str, Any] ,__A :Dict ,__A :List[Any]=False ,__A :str=False ):
"""simple docstring"""
__snake_case = get_flax_param(__A )
if not use_large:
__snake_case = PixaStructVisionConfig()
__snake_case = PixaStructTextConfig()
else:
__snake_case = PixaStructVisionConfig(
hidden_size=1_5_3_6 ,d_ff=3_9_6_8 ,num_attention_heads=2_4 ,num_hidden_layers=1_8 )
__snake_case = PixaStructTextConfig(hidden_size=1_5_3_6 ,d_ff=3_9_6_8 ,num_heads=2_4 ,num_layers=1_8 )
__snake_case = PixaStructConfig(
vision_config=encoder_config.to_dict() ,text_config=decoder_config.to_dict() ,is_vqa=__A )
__snake_case = PixaStructForConditionalGeneration(__A )
__snake_case = rename_and_convert_flax_params(__A )
model.load_state_dict(__A )
__snake_case = AutoTokenizer.from_pretrained("""ybelkada/test-pix2struct-tokenizer""" )
__snake_case = PixaStructImageProcessor()
__snake_case = PixaStructProcessor(image_processor=__A ,tokenizer=__A )
if use_large:
__snake_case = 4_0_9_6
__snake_case = True
# mkdir if needed
os.makedirs(__A ,exist_ok=__A )
model.save_pretrained(__A )
processor.save_pretrained(__A )
print("""Model saved in {}""".format(__A ) )
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
parser.add_argument('''--t5x_checkpoint_path''', default=None, type=str, help='''Path to the original T5x checkpoint.''')
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--use_large''', action='''store_true''', help='''Use large model.''')
parser.add_argument('''--is_vqa''', action='''store_true''', help='''Use large model.''')
UpperCamelCase__ = parser.parse_args()
convert_pixastruct_original_pytorch_checkpoint_to_hf(
args.tax_checkpoint_path, args.pytorch_dump_folder_path, args.use_large
)
| 268 |
import enum
import os
from hashlib import shaaaa
from typing import Optional
from .. import config
from .logging import get_logger
UpperCamelCase__ = get_logger(__name__)
class __snake_case ( enum.Enum ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = "all_checks"
__SCREAMING_SNAKE_CASE = "basic_checks"
__SCREAMING_SNAKE_CASE = "no_checks"
class __snake_case ( snake_case__ ):
"""simple docstring"""
class __snake_case ( snake_case__ ):
"""simple docstring"""
class __snake_case ( snake_case__ ):
"""simple docstring"""
class __snake_case ( snake_case__ ):
"""simple docstring"""
def lowerCamelCase__ ( __A :Optional[dict] ,__A :dict ,__A :Any=None ):
"""simple docstring"""
if expected_checksums is None:
logger.info("""Unable to verify checksums.""" )
return
if len(set(__A ) - set(__A ) ) > 0:
raise ExpectedMoreDownloadedFiles(str(set(__A ) - set(__A ) ) )
if len(set(__A ) - set(__A ) ) > 0:
raise UnexpectedDownloadedFile(str(set(__A ) - set(__A ) ) )
__snake_case = [url for url in expected_checksums if expected_checksums[url] != recorded_checksums[url]]
__snake_case = """ for """ + verification_name if verification_name is not None else """"""
if len(__A ) > 0:
raise NonMatchingChecksumError(
F'Checksums didn\'t match{for_verification_name}:\n'
F'{bad_urls}\n'
"""Set `verification_mode='no_checks'` to skip checksums verification and ignore this error""" )
logger.info("""All the checksums matched successfully""" + for_verification_name )
class __snake_case ( snake_case__ ):
"""simple docstring"""
class __snake_case ( snake_case__ ):
"""simple docstring"""
class __snake_case ( snake_case__ ):
"""simple docstring"""
class __snake_case ( snake_case__ ):
"""simple docstring"""
def lowerCamelCase__ ( __A :Optional[dict] ,__A :dict ):
"""simple docstring"""
if expected_splits is None:
logger.info("""Unable to verify splits sizes.""" )
return
if len(set(__A ) - set(__A ) ) > 0:
raise ExpectedMoreSplits(str(set(__A ) - set(__A ) ) )
if len(set(__A ) - set(__A ) ) > 0:
raise UnexpectedSplits(str(set(__A ) - set(__A ) ) )
__snake_case = [
{"""expected""": expected_splits[name], """recorded""": recorded_splits[name]}
for name in expected_splits
if expected_splits[name].num_examples != recorded_splits[name].num_examples
]
if len(__A ) > 0:
raise NonMatchingSplitsSizesError(str(__A ) )
logger.info("""All the splits matched successfully.""" )
def lowerCamelCase__ ( __A :str ,__A :bool = True ):
"""simple docstring"""
if record_checksum:
__snake_case = shaaaa()
with open(__A ,"""rb""" ) as f:
for chunk in iter(lambda: f.read(1 << 2_0 ) ,b"""""" ):
m.update(__A )
__snake_case = m.hexdigest()
else:
__snake_case = None
return {"num_bytes": os.path.getsize(__A ), "checksum": checksum}
def lowerCamelCase__ ( __A :Optional[int] ):
"""simple docstring"""
if dataset_size and config.IN_MEMORY_MAX_SIZE:
return dataset_size < config.IN_MEMORY_MAX_SIZE
else:
return False
| 268 | 1 |
import argparse
import torch
from torch import nn
from transformers import SpeechaTextConfig, SpeechaTextForConditionalGeneration
def __a ( __UpperCAmelCase : Union[str, Any] ) -> Any:
"""simple docstring"""
lowerCamelCase_ : Union[str, Any] = [
"encoder.version",
"decoder.version",
"model.encoder.version",
"model.decoder.version",
"decoder.output_projection.weight",
"_float_tensor",
"encoder.embed_positions._float_tensor",
"decoder.embed_positions._float_tensor",
]
for k in ignore_keys:
state_dict.pop(__UpperCAmelCase , __UpperCAmelCase )
def __a ( __UpperCAmelCase : Optional[int] ) -> Any:
"""simple docstring"""
lowerCamelCase_ : Union[str, Any] = list(s_dict.keys() )
for key in keys:
if "transformer_layers" in key:
lowerCamelCase_ : List[Any] = s_dict.pop(__UpperCAmelCase )
elif "subsample" in key:
lowerCamelCase_ : Dict = s_dict.pop(__UpperCAmelCase )
def __a ( __UpperCAmelCase : Optional[Any] ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ , lowerCamelCase_ : str = emb.weight.shape
lowerCamelCase_ : Any = nn.Linear(__UpperCAmelCase , __UpperCAmelCase , bias=__UpperCAmelCase )
lowerCamelCase_ : Dict = emb.weight.data
return lin_layer
def __a ( __UpperCAmelCase : List[Any] , __UpperCAmelCase : List[str] ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ : int = torch.load(__UpperCAmelCase , map_location="cpu" )
lowerCamelCase_ : str = mam_aaa["args"]
lowerCamelCase_ : List[str] = mam_aaa["model"]
lowerCamelCase_ : List[str] = state_dict["decoder.output_projection.weight"]
remove_ignore_keys_(__UpperCAmelCase )
rename_keys(__UpperCAmelCase )
lowerCamelCase_ : Tuple = state_dict["decoder.embed_tokens.weight"].shape[0]
lowerCamelCase_ : Optional[int] = args.share_decoder_input_output_embed
lowerCamelCase_ : Optional[int] = [int(__UpperCAmelCase ) for i in args.conv_kernel_sizes.split("," )]
lowerCamelCase_ : Tuple = SpeechaTextConfig(
vocab_size=__UpperCAmelCase , max_source_positions=args.max_source_positions , max_target_positions=args.max_target_positions , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function="relu" , num_conv_layers=len(__UpperCAmelCase ) , conv_channels=args.conv_channels , conv_kernel_sizes=__UpperCAmelCase , input_feat_per_channel=args.input_feat_per_channel , input_channels=args.input_channels , tie_word_embeddings=__UpperCAmelCase , num_beams=5 , max_length=200 , use_cache=__UpperCAmelCase , decoder_start_token_id=2 , early_stopping=__UpperCAmelCase , )
lowerCamelCase_ : Optional[Any] = SpeechaTextForConditionalGeneration(__UpperCAmelCase )
lowerCamelCase_ , lowerCamelCase_ : Dict = model.model.load_state_dict(__UpperCAmelCase , strict=__UpperCAmelCase )
if len(__UpperCAmelCase ) > 0 and not set(__UpperCAmelCase ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
"Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,"
f" but all the following weights are missing {missing}" )
if tie_embeds:
lowerCamelCase_ : Tuple = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
lowerCamelCase_ : List[Any] = lm_head_weights
model.save_pretrained(__UpperCAmelCase )
if __name__ == "__main__":
snake_case_ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--fairseq_path", type=str, help="Path to the fairseq model (.pt) file.")
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
snake_case_ : Union[str, Any] = parser.parse_args()
convert_fairseq_sat_checkpoint_to_tfms(args.fairseq_path, args.pytorch_dump_folder_path)
| 253 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
snake_case_ : Optional[Any] = {"configuration_opt": ["OPT_PRETRAINED_CONFIG_ARCHIVE_MAP", "OPTConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Any = [
"OPT_PRETRAINED_MODEL_ARCHIVE_LIST",
"OPTForCausalLM",
"OPTModel",
"OPTPreTrainedModel",
"OPTForSequenceClassification",
"OPTForQuestionAnswering",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : List[str] = ["TFOPTForCausalLM", "TFOPTModel", "TFOPTPreTrainedModel"]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : int = [
"FlaxOPTForCausalLM",
"FlaxOPTModel",
"FlaxOPTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_opt import OPT_PRETRAINED_CONFIG_ARCHIVE_MAP, OPTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_opt import (
OPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OPTForCausalLM,
OPTForQuestionAnswering,
OPTForSequenceClassification,
OPTModel,
OPTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_opt import TFOPTForCausalLM, TFOPTModel, TFOPTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_opt import FlaxOPTForCausalLM, FlaxOPTModel, FlaxOPTPreTrainedModel
else:
import sys
snake_case_ : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 253 | 1 |
from __future__ import annotations
def UpperCAmelCase ( lowercase , lowercase = None , lowercase = None , lowercase = False , ):
"""simple docstring"""
__lowercase = cipher_alphabet or [chr(lowercase ) for i in range(97 , 123 )]
# If the argument is None or the user provided an empty dictionary
if not frequencies_dict:
# Frequencies of letters in the english language (how much they show up)
__lowercase = {
'''a''': 0.08497,
'''b''': 0.01492,
'''c''': 0.02202,
'''d''': 0.04253,
'''e''': 0.11162,
'''f''': 0.02228,
'''g''': 0.02015,
'''h''': 0.06094,
'''i''': 0.07546,
'''j''': 0.00153,
'''k''': 0.01292,
'''l''': 0.04025,
'''m''': 0.02406,
'''n''': 0.06749,
'''o''': 0.07507,
'''p''': 0.01929,
'''q''': 0.00095,
'''r''': 0.07587,
'''s''': 0.06327,
'''t''': 0.09356,
'''u''': 0.02758,
'''v''': 0.00978,
'''w''': 0.02560,
'''x''': 0.00150,
'''y''': 0.01994,
'''z''': 0.00077,
}
else:
# Custom frequencies dictionary
__lowercase = frequencies_dict
if not case_sensitive:
__lowercase = ciphertext.lower()
# Chi squared statistic values
__lowercase = {}
# cycle through all of the shifts
for shift in range(len(lowercase ) ):
__lowercase = ''''''
# decrypt the message with the shift
for letter in ciphertext:
try:
# Try to index the letter in the alphabet
__lowercase = (alphabet_letters.index(letter.lower() ) - shift) % len(
lowercase )
decrypted_with_shift += (
alphabet_letters[new_key].upper()
if case_sensitive and letter.isupper()
else alphabet_letters[new_key]
)
except ValueError:
# Append the character if it isn't in the alphabet
decrypted_with_shift += letter
__lowercase = 0.0
# Loop through each letter in the decoded message with the shift
for letter in decrypted_with_shift:
if case_sensitive:
__lowercase = letter.lower()
if letter in frequencies:
# Get the amount of times the letter occurs in the message
__lowercase = decrypted_with_shift.lower().count(lowercase )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
__lowercase = frequencies[letter] * occurrences
# Complete the chi squared statistic formula
__lowercase = ((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
else:
if letter.lower() in frequencies:
# Get the amount of times the letter occurs in the message
__lowercase = decrypted_with_shift.count(lowercase )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
__lowercase = frequencies[letter] * occurrences
# Complete the chi squared statistic formula
__lowercase = ((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
# Add the data to the chi_squared_statistic_values dictionary
__lowercase = (
chi_squared_statistic,
decrypted_with_shift,
)
# Get the most likely cipher by finding the cipher with the smallest chi squared
# statistic
def chi_squared_statistic_values_sorting_key(lowercase ) -> tuple[float, str]:
return chi_squared_statistic_values[key]
__lowercase = min(
lowercase , key=lowercase , )
# Get all the data from the most likely cipher (key, decoded message)
(
(
__lowercase
) , (
__lowercase
) ,
) = chi_squared_statistic_values[most_likely_cipher]
# Return the data on the most likely shift
return (
most_likely_cipher,
most_likely_cipher_chi_squared_value,
decoded_most_likely_cipher,
) | 534 | def UpperCAmelCase ( lowercase , lowercase ):
"""simple docstring"""
__lowercase = len(lowercase )
__lowercase = len(lowercase )
__lowercase = [[False for _ in range(m + 1 )] for _ in range(n + 1 )]
__lowercase = True
for i in range(lowercase ):
for j in range(m + 1 ):
if dp[i][j]:
if j < m and a[i].upper() == b[j]:
__lowercase = True
if a[i].islower():
__lowercase = True
return dp[n][m]
if __name__ == "__main__":
import doctest
doctest.testmod() | 534 | 1 |
from __future__ import annotations
from collections.abc import Callable
from typing import Generic, TypeVar
_snake_case = TypeVar('''T''')
_snake_case = TypeVar('''U''')
class UpperCAmelCase_ ( Generic[T, U] ):
'''simple docstring'''
def __init__( self , __A , __A ):
"""simple docstring"""
lowerCamelCase : int = key
lowerCamelCase : Tuple = val
lowerCamelCase : DoubleLinkedListNode[T, U] | None = None
lowerCamelCase : DoubleLinkedListNode[T, U] | None = None
def __repr__( self ):
"""simple docstring"""
return (
F"""Node: key: {self.key}, val: {self.val}, """
F"""has next: {bool(self.next )}, has prev: {bool(self.prev )}"""
)
class UpperCAmelCase_ ( Generic[T, U] ):
'''simple docstring'''
def __init__( self ):
"""simple docstring"""
lowerCamelCase : DoubleLinkedListNode[T, U] = DoubleLinkedListNode(__A , __A )
lowerCamelCase : DoubleLinkedListNode[T, U] = DoubleLinkedListNode(__A , __A )
lowerCamelCase , lowerCamelCase : str = self.rear, self.head
def __repr__( self ):
"""simple docstring"""
lowerCamelCase : Optional[Any] = ["DoubleLinkedList"]
lowerCamelCase : Dict = self.head
while node.next is not None:
rep.append(str(__A ) )
lowerCamelCase : Union[str, Any] = node.next
rep.append(str(self.rear ) )
return ",\n ".join(__A )
def _snake_case ( self , __A ):
"""simple docstring"""
lowerCamelCase : Dict = self.rear.prev
# All nodes other than self.head are guaranteed to have non-None previous
assert previous is not None
lowerCamelCase : Any = node
lowerCamelCase : Optional[int] = previous
lowerCamelCase : Optional[int] = node
lowerCamelCase : int = self.rear
def _snake_case ( self , __A ):
"""simple docstring"""
if node.prev is None or node.next is None:
return None
lowerCamelCase : Any = node.next
lowerCamelCase : Any = node.prev
lowerCamelCase : Union[str, Any] = None
lowerCamelCase : List[Any] = None
return node
class UpperCAmelCase_ ( Generic[T, U] ):
'''simple docstring'''
__A : dict[Callable[[T], U], LRUCache[T, U]] = {}
def __init__( self , __A ):
"""simple docstring"""
lowerCamelCase : DoubleLinkedList[T, U] = DoubleLinkedList()
lowerCamelCase : Any = capacity
lowerCamelCase : Optional[Any] = 0
lowerCamelCase : Union[str, Any] = 0
lowerCamelCase : int = 0
lowerCamelCase : dict[T, DoubleLinkedListNode[T, U]] = {}
def __repr__( self ):
"""simple docstring"""
return (
F"""CacheInfo(hits={self.hits}, misses={self.miss}, """
F"""capacity={self.capacity}, current size={self.num_keys})"""
)
def __contains__( self , __A ):
"""simple docstring"""
return key in self.cache
def _snake_case ( self , __A ):
"""simple docstring"""
if key in self.cache:
self.hits += 1
lowerCamelCase : DoubleLinkedListNode[T, U] = self.cache[key]
lowerCamelCase : Dict = self.list.remove(self.cache[key] )
assert node == value_node
# node is guaranteed not None because it is in self.cache
assert node is not None
self.list.add(__A )
return node.val
self.miss += 1
return None
def _snake_case ( self , __A , __A ):
"""simple docstring"""
if key not in self.cache:
if self.num_keys >= self.capacity:
# delete first node (oldest) when over capacity
lowerCamelCase : Dict = self.list.head.next
# guaranteed to have a non-None first node when num_keys > 0
# explain to type checker via assertions
assert first_node is not None
assert first_node.key is not None
assert (
self.list.remove(__A ) is not None
) # node guaranteed to be in list assert node.key is not None
del self.cache[first_node.key]
self.num_keys -= 1
lowerCamelCase : Any = DoubleLinkedListNode(__A , __A )
self.list.add(self.cache[key] )
self.num_keys += 1
else:
# bump node to the end of the list, update value
lowerCamelCase : int = self.list.remove(self.cache[key] )
assert node is not None # node guaranteed to be in list
lowerCamelCase : str = value
self.list.add(__A )
@classmethod
def _snake_case ( cls , __A = 128 ):
"""simple docstring"""
def cache_decorator_inner(__A ) -> Callable[..., U]:
def cache_decorator_wrapper(*__A ) -> U:
if func not in cls.decorator_function_to_instance_map:
lowerCamelCase : Dict = LRUCache(__A )
lowerCamelCase : int = cls.decorator_function_to_instance_map[func].get(args[0] )
if result is None:
lowerCamelCase : Optional[int] = func(*__A )
cls.decorator_function_to_instance_map[func].put(args[0] , __A )
return result
def cache_info() -> LRUCache[T, U]:
return cls.decorator_function_to_instance_map[func]
setattr(__A , "cache_info" , __A ) # noqa: B010
return cache_decorator_wrapper
return cache_decorator_inner
if __name__ == "__main__":
import doctest
doctest.testmod()
| 231 |
from __future__ import annotations
import numpy as np
def lowercase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
lowerCamelCase , lowerCamelCase : Dict = np.shape(SCREAMING_SNAKE_CASE_ )
if rows != columns:
lowerCamelCase : int = (
"'table' has to be of square shaped array but got a "
f"""{rows}x{columns} array:\n{table}"""
)
raise ValueError(SCREAMING_SNAKE_CASE_ )
lowerCamelCase : Dict = np.zeros((rows, columns) )
lowerCamelCase : List[str] = np.zeros((rows, columns) )
for i in range(SCREAMING_SNAKE_CASE_ ):
for j in range(SCREAMING_SNAKE_CASE_ ):
lowerCamelCase : Dict = sum(lower[i][k] * upper[k][j] for k in range(SCREAMING_SNAKE_CASE_ ) )
if upper[j][j] == 0:
raise ArithmeticError("No LU decomposition exists" )
lowerCamelCase : Dict = (table[i][j] - total) / upper[j][j]
lowerCamelCase : Dict = 1
for j in range(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowerCamelCase : int = sum(lower[i][k] * upper[k][j] for k in range(SCREAMING_SNAKE_CASE_ ) )
lowerCamelCase : Any = table[i][j] - total
return lower, upper
if __name__ == "__main__":
import doctest
doctest.testmod()
| 231 | 1 |
from __future__ import annotations
import unittest
import numpy as np
from transformers import BlipTextConfig
from transformers.testing_utils import require_tf, slow
from transformers.utils import is_tf_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
if is_tf_available():
import tensorflow as tf
from transformers import TFBlipTextModel
from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self : Dict , _snake_case : Union[str, Any] , _snake_case : Optional[Any]=12 , _snake_case : Any=7 , _snake_case : List[str]=True , _snake_case : int=True , _snake_case : int=True , _snake_case : Tuple=99 , _snake_case : List[Any]=32 , _snake_case : Optional[int]=32 , _snake_case : List[str]=2 , _snake_case : List[str]=4 , _snake_case : List[Any]=37 , _snake_case : Union[str, Any]=0.1 , _snake_case : Tuple=0.1 , _snake_case : Dict=5_12 , _snake_case : Union[str, Any]=0.02 , _snake_case : Any=0 , _snake_case : Optional[Any]=None , ):
"""simple docstring"""
A__ = parent
A__ = batch_size
A__ = seq_length
A__ = is_training
A__ = use_input_mask
A__ = use_labels
A__ = vocab_size
A__ = hidden_size
A__ = projection_dim
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = intermediate_size
A__ = dropout
A__ = attention_dropout
A__ = max_position_embeddings
A__ = initializer_range
A__ = scope
A__ = bos_token_id
def _a ( self : Optional[Any] ):
"""simple docstring"""
A__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A__ = None
if self.use_input_mask:
A__ = random_attention_mask([self.batch_size, self.seq_length] )
if input_mask is not None:
A__ = input_mask.numpy()
A__ , A__ = input_mask.shape
A__ = np.random.randint(1 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(_snake_case ):
A__ = 1
A__ = 0
A__ = self.get_config()
return config, input_ids, tf.convert_to_tensor(_snake_case )
def _a ( self : Tuple ):
"""simple docstring"""
return BlipTextConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , projection_dim=self.projection_dim , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , dropout=self.dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , bos_token_id=self.bos_token_id , )
def _a ( self : int , _snake_case : Union[str, Any] , _snake_case : Any , _snake_case : List[str] ):
"""simple docstring"""
A__ = TFBlipTextModel(config=_snake_case )
A__ = model(_snake_case , attention_mask=_snake_case , training=_snake_case )
A__ = model(_snake_case , training=_snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _a ( self : str ):
"""simple docstring"""
A__ = self.prepare_config_and_inputs()
A__ , A__ , A__ = config_and_inputs
A__ = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class __lowerCAmelCase ( UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
A__ : Tuple = (TFBlipTextModel,) if is_tf_available() else ()
A__ : Optional[int] = False
A__ : Union[str, Any] = False
A__ : Union[str, Any] = False
def _a ( self : Any ):
"""simple docstring"""
A__ = BlipTextModelTester(self )
A__ = ConfigTester(self , config_class=_snake_case , hidden_size=37 )
def _a ( self : List[str] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def _a ( self : Union[str, Any] ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case )
def _a ( self : Tuple ):
"""simple docstring"""
pass
def _a ( self : int ):
"""simple docstring"""
pass
@unittest.skip(reason='Blip does not use inputs_embeds' )
def _a ( self : Any ):
"""simple docstring"""
pass
@unittest.skip(reason='BlipTextModel has no base class and is not available in MODEL_MAPPING' )
def _a ( self : str ):
"""simple docstring"""
pass
@unittest.skip(reason='BlipTextModel has no base class and is not available in MODEL_MAPPING' )
def _a ( self : Optional[Any] ):
"""simple docstring"""
pass
@slow
def _a ( self : Union[str, Any] ):
"""simple docstring"""
for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ = TFBlipTextModel.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
def _a ( self : int , _snake_case : int=True ):
"""simple docstring"""
super().test_pt_tf_model_equivalence(allow_missing_keys=_snake_case )
| 9 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_squeezebert import SqueezeBertTokenizer
__magic_name__: Optional[int] = logging.get_logger(__name__)
__magic_name__: List[Any] = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
__magic_name__: Optional[Any] = {
"vocab_file": {
"squeezebert/squeezebert-uncased": (
"https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt"
),
"squeezebert/squeezebert-mnli": "https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt",
"squeezebert/squeezebert-mnli-headless": (
"https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"squeezebert/squeezebert-uncased": (
"https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json"
),
"squeezebert/squeezebert-mnli": (
"https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json"
),
"squeezebert/squeezebert-mnli-headless": (
"https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json"
),
},
}
__magic_name__: List[Any] = {
"squeezebert/squeezebert-uncased": 512,
"squeezebert/squeezebert-mnli": 512,
"squeezebert/squeezebert-mnli-headless": 512,
}
__magic_name__: Union[str, Any] = {
"squeezebert/squeezebert-uncased": {"do_lower_case": True},
"squeezebert/squeezebert-mnli": {"do_lower_case": True},
"squeezebert/squeezebert-mnli-headless": {"do_lower_case": True},
}
class snake_case__ ( _lowerCAmelCase ):
lowercase__ : Optional[int] = VOCAB_FILES_NAMES
lowercase__ : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
lowercase__ : Tuple = PRETRAINED_INIT_CONFIGURATION
lowercase__ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ : Dict = SqueezeBertTokenizer
def __init__( self , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=True , lowerCAmelCase__="[UNK]" , lowerCAmelCase__="[SEP]" , lowerCAmelCase__="[PAD]" , lowerCAmelCase__="[CLS]" , lowerCAmelCase__="[MASK]" , lowerCAmelCase__=True , lowerCAmelCase__=None , **lowerCAmelCase__ , ) -> List[str]:
super().__init__(
lowerCAmelCase__ , tokenizer_file=lowerCAmelCase__ , do_lower_case=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , tokenize_chinese_chars=lowerCAmelCase__ , strip_accents=lowerCAmelCase__ , **lowerCAmelCase__ , )
__magic_name__ : Dict = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , lowerCAmelCase__ ) != do_lower_case
or normalizer_state.get("""strip_accents""" , lowerCAmelCase__ ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , lowerCAmelCase__ ) != tokenize_chinese_chars
):
__magic_name__ : Any = getattr(lowerCAmelCase__ , normalizer_state.pop("""type""" ) )
__magic_name__ : Any = do_lower_case
__magic_name__ : List[str] = strip_accents
__magic_name__ : int = tokenize_chinese_chars
__magic_name__ : int = normalizer_class(**lowerCAmelCase__ )
__magic_name__ : Optional[int] = do_lower_case
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__=None ) -> List[Any]:
__magic_name__ : Optional[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> List[int]:
__magic_name__ : int = [self.sep_token_id]
__magic_name__ : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> Tuple[str]:
__magic_name__ : Optional[Any] = self._tokenizer.model.save(lowerCAmelCase__ , name=lowerCAmelCase__ )
return tuple(lowerCAmelCase__ )
| 324 | 0 |
"""simple docstring"""
from datetime import datetime
import requests
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
_UpperCAmelCase = """https://downloadgram.net/wp-json/wppress/video-downloader/video?url="""
_UpperCAmelCase = requests.get(base_url + url ).json()[0]["""urls"""][0]["""src"""]
return requests.get(lowercase ).content
if __name__ == "__main__":
UpperCAmelCase__ = input("""Enter Video/IGTV url: """).strip()
UpperCAmelCase__ = F'''{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4'''
with open(file_name, """wb""") as fp:
fp.write(download_video(url))
print(F'''Done. Video saved to disk as {file_name}.''')
| 275 | """simple docstring"""
from __future__ import annotations
from random import choice
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
return choice(lowercase )
def __UpperCAmelCase ( lowercase ,lowercase ):
"""simple docstring"""
_UpperCAmelCase = random_pivot(lowercase )
# partition based on pivot
# linear time
_UpperCAmelCase = [e for e in lst if e < pivot]
_UpperCAmelCase = [e for e in lst if e > pivot]
# if we get lucky, pivot might be the element we want.
# we can easily see this:
# small (elements smaller than k)
# + pivot (kth element)
# + big (elements larger than k)
if len(lowercase ) == k - 1:
return pivot
# pivot is in elements bigger than k
elif len(lowercase ) < k - 1:
return kth_number(lowercase ,k - len(lowercase ) - 1 )
# pivot is in elements smaller than k
else:
return kth_number(lowercase ,lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 275 | 1 |
import inspect
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
import torch.utils.checkpoint
from ...models import UNetaDModel, VQModel
from ...schedulers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
)
from ...utils import PIL_INTERPOLATION, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
def _SCREAMING_SNAKE_CASE ( __snake_case ) -> List[Any]:
_UpperCAmelCase , _UpperCAmelCase = image.size
_UpperCAmelCase , _UpperCAmelCase = (x - x % 3_2 for x in (w, h)) # resize to integer multiple of 32
_UpperCAmelCase = image.resize((w, h) , resample=PIL_INTERPOLATION["""lanczos"""] )
_UpperCAmelCase = np.array(__snake_case ).astype(np.floataa ) / 255.0
_UpperCAmelCase = image[None].transpose(0 , 3 , 1 , 2 )
_UpperCAmelCase = torch.from_numpy(__snake_case )
return 2.0 * image - 1.0
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
def __init__( self : Optional[int] , lowerCamelCase : VQModel , lowerCamelCase : UNetaDModel , lowerCamelCase : Union[
DDIMScheduler,
PNDMScheduler,
LMSDiscreteScheduler,
EulerDiscreteScheduler,
EulerAncestralDiscreteScheduler,
DPMSolverMultistepScheduler,
] , ) -> Any:
"""simple docstring"""
super().__init__()
self.register_modules(vqvae=lowerCamelCase , unet=lowerCamelCase , scheduler=lowerCamelCase )
@torch.no_grad()
def __call__( self : Tuple , lowerCamelCase : Union[torch.Tensor, PIL.Image.Image] = None , lowerCamelCase : Optional[int] = 1 , lowerCamelCase : Optional[int] = 100 , lowerCamelCase : Optional[float] = 0.0 , lowerCamelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowerCamelCase : Optional[str] = "pil" , lowerCamelCase : bool = True , ) -> Union[Tuple, ImagePipelineOutput]:
"""simple docstring"""
if isinstance(lowerCamelCase , PIL.Image.Image ):
_UpperCAmelCase = 1
elif isinstance(lowerCamelCase , torch.Tensor ):
_UpperCAmelCase = image.shape[0]
else:
raise ValueError(f"""`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(lowerCamelCase )}""" )
if isinstance(lowerCamelCase , PIL.Image.Image ):
_UpperCAmelCase = preprocess(lowerCamelCase )
_UpperCAmelCase , _UpperCAmelCase = image.shape[-2:]
# in_channels should be 6: 3 for latents, 3 for low resolution image
_UpperCAmelCase = (batch_size, self.unet.config.in_channels // 2, height, width)
_UpperCAmelCase = next(self.unet.parameters() ).dtype
_UpperCAmelCase = randn_tensor(lowerCamelCase , generator=lowerCamelCase , device=self.device , dtype=lowerCamelCase )
_UpperCAmelCase = image.to(device=self.device , dtype=lowerCamelCase )
# set timesteps and move to the correct device
self.scheduler.set_timesteps(lowerCamelCase , device=self.device )
_UpperCAmelCase = self.scheduler.timesteps
# scale the initial noise by the standard deviation required by the scheduler
_UpperCAmelCase = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature.
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
_UpperCAmelCase = """eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
_UpperCAmelCase = {}
if accepts_eta:
_UpperCAmelCase = eta
for t in self.progress_bar(lowerCamelCase ):
# concat latents and low resolution image in the channel dimension.
_UpperCAmelCase = torch.cat([latents, image] , dim=1 )
_UpperCAmelCase = self.scheduler.scale_model_input(lowerCamelCase , lowerCamelCase )
# predict the noise residual
_UpperCAmelCase = self.unet(lowerCamelCase , lowerCamelCase ).sample
# compute the previous noisy sample x_t -> x_t-1
_UpperCAmelCase = self.scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase ).prev_sample
# decode the image latents with the VQVAE
_UpperCAmelCase = self.vqvae.decode(lowerCamelCase ).sample
_UpperCAmelCase = torch.clamp(lowerCamelCase , -1.0 , 1.0 )
_UpperCAmelCase = image / 2 + 0.5
_UpperCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
_UpperCAmelCase = self.numpy_to_pil(lowerCamelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowerCamelCase ) | 108 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_squeezebert import SqueezeBertTokenizer
UpperCamelCase : List[str] = logging.get_logger(__name__)
UpperCamelCase : Dict = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
UpperCamelCase : int = {
'vocab_file': {
'squeezebert/squeezebert-uncased': (
'https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt'
),
'squeezebert/squeezebert-mnli': 'https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt',
'squeezebert/squeezebert-mnli-headless': (
'https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'squeezebert/squeezebert-uncased': (
'https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json'
),
'squeezebert/squeezebert-mnli': (
'https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json'
),
'squeezebert/squeezebert-mnli-headless': (
'https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json'
),
},
}
UpperCamelCase : Tuple = {
'squeezebert/squeezebert-uncased': 5_12,
'squeezebert/squeezebert-mnli': 5_12,
'squeezebert/squeezebert-mnli-headless': 5_12,
}
UpperCamelCase : Dict = {
'squeezebert/squeezebert-uncased': {'do_lower_case': True},
'squeezebert/squeezebert-mnli': {'do_lower_case': True},
'squeezebert/squeezebert-mnli-headless': {'do_lower_case': True},
}
class UpperCamelCase__ (a ):
'''simple docstring'''
_UpperCamelCase = VOCAB_FILES_NAMES
_UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase = PRETRAINED_INIT_CONFIGURATION
_UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase = SqueezeBertTokenizer
def __init__( self ,_lowerCAmelCase=None ,_lowerCAmelCase=None ,_lowerCAmelCase=True ,_lowerCAmelCase="[UNK]" ,_lowerCAmelCase="[SEP]" ,_lowerCAmelCase="[PAD]" ,_lowerCAmelCase="[CLS]" ,_lowerCAmelCase="[MASK]" ,_lowerCAmelCase=True ,_lowerCAmelCase=None ,**_lowerCAmelCase ,):
super().__init__(
_lowerCAmelCase ,tokenizer_file=_lowerCAmelCase ,do_lower_case=_lowerCAmelCase ,unk_token=_lowerCAmelCase ,sep_token=_lowerCAmelCase ,pad_token=_lowerCAmelCase ,cls_token=_lowerCAmelCase ,mask_token=_lowerCAmelCase ,tokenize_chinese_chars=_lowerCAmelCase ,strip_accents=_lowerCAmelCase ,**_lowerCAmelCase ,)
lowerCamelCase__ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" ,_lowerCAmelCase ) != do_lower_case
or normalizer_state.get("""strip_accents""" ,_lowerCAmelCase ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" ,_lowerCAmelCase ) != tokenize_chinese_chars
):
lowerCamelCase__ = getattr(_lowerCAmelCase ,normalizer_state.pop("""type""" ) )
lowerCamelCase__ = do_lower_case
lowerCamelCase__ = strip_accents
lowerCamelCase__ = tokenize_chinese_chars
lowerCamelCase__ = normalizer_class(**_lowerCAmelCase )
lowerCamelCase__ = do_lower_case
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase=None ):
lowerCamelCase__ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = None ):
lowerCamelCase__ = [self.sep_token_id]
lowerCamelCase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = None ):
lowerCamelCase__ = self._tokenizer.model.save(_lowerCAmelCase ,name=_lowerCAmelCase )
return tuple(_lowerCAmelCase )
| 50 | 0 |
'''simple docstring'''
import unittest
from transformers import PegasusConfig, PegasusTokenizer, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
lowerCAmelCase_ = 'platform'
import jax
import jax.numpy as jnp
import numpy as np
from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel
@require_flax
class UpperCAmelCase_ :
"""simple docstring"""
__SCREAMING_SNAKE_CASE = PegasusConfig
__SCREAMING_SNAKE_CASE = {}
__SCREAMING_SNAKE_CASE = '''gelu'''
def __init__( self , lowerCamelCase , lowerCamelCase=13 , lowerCamelCase=7 , lowerCamelCase=True , lowerCamelCase=False , lowerCamelCase=99 , lowerCamelCase=32 , lowerCamelCase=5 , lowerCamelCase=4 , lowerCamelCase=37 , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=20 , lowerCamelCase=2 , lowerCamelCase=1 , lowerCamelCase=0 , ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase : int = parent
UpperCamelCase : str = batch_size
UpperCamelCase : Dict = seq_length
UpperCamelCase : Optional[Any] = is_training
UpperCamelCase : List[Any] = use_labels
UpperCamelCase : Tuple = vocab_size
UpperCamelCase : Any = hidden_size
UpperCamelCase : Any = num_hidden_layers
UpperCamelCase : List[Any] = num_attention_heads
UpperCamelCase : Dict = intermediate_size
UpperCamelCase : List[Any] = hidden_dropout_prob
UpperCamelCase : Dict = attention_probs_dropout_prob
UpperCamelCase : int = max_position_embeddings
UpperCamelCase : str = eos_token_id
UpperCamelCase : Any = pad_token_id
UpperCamelCase : List[Any] = bos_token_id
def SCREAMING_SNAKE_CASE__ ( self ) -> Any:
'''simple docstring'''
UpperCamelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ).clip(3 , self.vocab_size )
UpperCamelCase : Dict = np.expand_dims(np.array([self.eos_token_id] * self.batch_size ) , 1 )
UpperCamelCase : Any = np.concatenate([input_ids, eos_tensor] , axis=1 )
UpperCamelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase : str = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
UpperCamelCase : Union[str, Any] = prepare_pegasus_inputs_dict(lowerCamelCase , lowerCamelCase , lowerCamelCase )
return config, inputs_dict
def SCREAMING_SNAKE_CASE__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase : Tuple = 20
UpperCamelCase : List[str] = model_class_name(lowerCamelCase )
UpperCamelCase : List[Any] = model.encode(inputs_dict["input_ids"] )
UpperCamelCase , UpperCamelCase : str = (
inputs_dict["decoder_input_ids"],
inputs_dict["decoder_attention_mask"],
)
UpperCamelCase : List[Any] = model.init_cache(decoder_input_ids.shape[0] , lowerCamelCase , lowerCamelCase )
UpperCamelCase : Union[str, Any] = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="i4" )
UpperCamelCase : List[str] = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
UpperCamelCase : List[Any] = model.decode(
decoder_input_ids[:, :-1] , lowerCamelCase , decoder_attention_mask=lowerCamelCase , past_key_values=lowerCamelCase , decoder_position_ids=lowerCamelCase , )
UpperCamelCase : int = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="i4" )
UpperCamelCase : List[Any] = model.decode(
decoder_input_ids[:, -1:] , lowerCamelCase , decoder_attention_mask=lowerCamelCase , past_key_values=outputs_cache.past_key_values , decoder_position_ids=lowerCamelCase , )
UpperCamelCase : str = model.decode(lowerCamelCase , lowerCamelCase )
UpperCamelCase : Dict = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f'''Max diff is {diff}''' )
def SCREAMING_SNAKE_CASE__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> Dict:
'''simple docstring'''
UpperCamelCase : int = 20
UpperCamelCase : List[Any] = model_class_name(lowerCamelCase )
UpperCamelCase : List[str] = model.encode(inputs_dict["input_ids"] )
UpperCamelCase , UpperCamelCase : Optional[int] = (
inputs_dict["decoder_input_ids"],
inputs_dict["decoder_attention_mask"],
)
UpperCamelCase : List[str] = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
UpperCamelCase : Tuple = model.init_cache(decoder_input_ids.shape[0] , lowerCamelCase , lowerCamelCase )
UpperCamelCase : int = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
UpperCamelCase : Optional[int] = model.decode(
decoder_input_ids[:, :-1] , lowerCamelCase , decoder_attention_mask=lowerCamelCase , past_key_values=lowerCamelCase , decoder_position_ids=lowerCamelCase , )
UpperCamelCase : List[Any] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="i4" )
UpperCamelCase : str = model.decode(
decoder_input_ids[:, -1:] , lowerCamelCase , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=lowerCamelCase , decoder_position_ids=lowerCamelCase , )
UpperCamelCase : List[str] = model.decode(lowerCamelCase , lowerCamelCase , decoder_attention_mask=lowerCamelCase )
UpperCamelCase : str = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f'''Max diff is {diff}''' )
def A__ ( A : int , A : Union[str, Any] , A : Dict , A : List[str]=None , A : Optional[Any]=None , ):
'''simple docstring'''
if attention_mask is None:
UpperCamelCase : List[str] = np.not_equal(A , config.pad_token_id).astype(np.inta)
if decoder_attention_mask is None:
UpperCamelCase : Optional[Any] = np.concatenate(
[
np.ones(decoder_input_ids[:, :1].shape , dtype=np.inta),
np.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id).astype(np.inta),
] , axis=-1 , )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
}
@require_flax
class UpperCAmelCase_ ( lowerCamelCase_ , unittest.TestCase ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = (
(
FlaxPegasusForConditionalGeneration,
FlaxPegasusModel,
)
if is_flax_available()
else ()
)
__SCREAMING_SNAKE_CASE = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else ()
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
'''simple docstring'''
UpperCamelCase : Optional[int] = FlaxPegasusModelTester(self )
UpperCamelCase : Union[str, Any] = ConfigTester(self , config_class=lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
'''simple docstring'''
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
'''simple docstring'''
UpperCamelCase , UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(lowerCamelCase , lowerCamelCase , lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self ) -> Any:
'''simple docstring'''
UpperCamelCase , UpperCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(lowerCamelCase , lowerCamelCase , lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase , UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
UpperCamelCase : Optional[Any] = self._prepare_for_class(lowerCamelCase , lowerCamelCase )
UpperCamelCase : List[str] = model_class(lowerCamelCase )
@jax.jit
def encode_jitted(lowerCamelCase , lowerCamelCase=None , **lowerCamelCase ):
return model.encode(input_ids=lowerCamelCase , attention_mask=lowerCamelCase )
with self.subTest("JIT Enabled" ):
UpperCamelCase : Tuple = encode_jitted(**lowerCamelCase ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
UpperCamelCase : int = encode_jitted(**lowerCamelCase ).to_tuple()
self.assertEqual(len(lowerCamelCase ) , len(lowerCamelCase ) )
for jitted_output, output in zip(lowerCamelCase , lowerCamelCase ):
self.assertEqual(jitted_output.shape , output.shape )
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
'''simple docstring'''
UpperCamelCase , UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
UpperCamelCase : Dict = model_class(lowerCamelCase )
UpperCamelCase : List[Any] = model.encode(inputs_dict["input_ids"] , inputs_dict["attention_mask"] )
UpperCamelCase : Any = {
"decoder_input_ids": inputs_dict["decoder_input_ids"],
"decoder_attention_mask": inputs_dict["decoder_attention_mask"],
"encoder_outputs": encoder_outputs,
}
@jax.jit
def decode_jitted(lowerCamelCase , lowerCamelCase , lowerCamelCase ):
return model.decode(
decoder_input_ids=lowerCamelCase , decoder_attention_mask=lowerCamelCase , encoder_outputs=lowerCamelCase , )
with self.subTest("JIT Enabled" ):
UpperCamelCase : List[Any] = decode_jitted(**lowerCamelCase ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
UpperCamelCase : Any = decode_jitted(**lowerCamelCase ).to_tuple()
self.assertEqual(len(lowerCamelCase ) , len(lowerCamelCase ) )
for jitted_output, output in zip(lowerCamelCase , lowerCamelCase ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
'''simple docstring'''
for model_class_name in self.all_model_classes:
UpperCamelCase : Tuple = model_class_name.from_pretrained("google/pegasus-large" , from_pt=lowerCamelCase )
UpperCamelCase : Optional[Any] = np.ones((1, 1) )
UpperCamelCase : Union[str, Any] = model(lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase : Dict = FlaxPegasusForConditionalGeneration.from_pretrained("google/pegasus-xsum" )
UpperCamelCase : List[Any] = PegasusTokenizer.from_pretrained("google/pegasus-xsum" )
UpperCamelCase : int = [
" PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.",
" The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" ",
]
UpperCamelCase : Union[str, Any] = [
"California's largest electricity provider has turned off power to hundreds of thousands of customers.",
"Pop group N-Dubz have revealed they were surprised to get four nominations for this year's Mobo Awards.",
]
UpperCamelCase : Optional[int] = tokenizer(lowerCamelCase , return_tensors="np" , truncation=lowerCamelCase , max_length=5_12 , padding=lowerCamelCase )
UpperCamelCase : List[Any] = model.generate(**lowerCamelCase , num_beams=2 ).sequences
UpperCamelCase : Tuple = tokenizer.batch_decode(lowerCamelCase , skip_special_tokens=lowerCamelCase )
assert tgt_text == decoded
| 435 |
'''simple docstring'''
# limitations under the License.
from typing import Optional, Tuple, Union
import torch
from diffusers import DiffusionPipeline, ImagePipelineOutput
class UpperCAmelCase_ ( lowerCamelCase_ ):
"""simple docstring"""
def __init__( self , lowerCamelCase , lowerCamelCase ) -> Optional[int]:
'''simple docstring'''
super().__init__()
self.register_modules(unet=lowerCamelCase , scheduler=lowerCamelCase )
@torch.no_grad()
def __call__( self , lowerCamelCase = 1 , lowerCamelCase = None , lowerCamelCase = 50 , lowerCamelCase = "pil" , lowerCamelCase = True , **lowerCamelCase , ) -> Union[ImagePipelineOutput, Tuple]:
'''simple docstring'''
UpperCamelCase : Dict = torch.randn(
(batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , generator=lowerCamelCase , )
UpperCamelCase : Optional[int] = image.to(self.device )
# set step values
self.scheduler.set_timesteps(lowerCamelCase )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
UpperCamelCase : Dict = self.unet(lowerCamelCase , lowerCamelCase ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
UpperCamelCase : Any = self.scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase ).prev_sample
UpperCamelCase : str = (image / 2 + 0.5).clamp(0 , 1 )
UpperCamelCase : str = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
UpperCamelCase : List[Any] = self.numpy_to_pil(lowerCamelCase )
if not return_dict:
return (image,), "This is a local test"
return ImagePipelineOutput(images=lowerCamelCase ), "This is a local test"
| 435 | 1 |
"""simple docstring"""
from __future__ import annotations
from collections import deque
from collections.abc import Sequence
from dataclasses import dataclass
from typing import Any
@dataclass
class __lowercase :
'''simple docstring'''
__lowerCAmelCase = 42
__lowerCAmelCase = None
__lowerCAmelCase = None
def __A ( ) -> Node | None:
__a : List[str] = Node(1)
__a : Any = Node(2)
__a : Tuple = Node(3)
__a : str = Node(4)
__a : Dict = Node(5)
return tree
def __A ( a_ :Node | None) -> list[int]:
return [root.data, *preorder(root.left), *preorder(root.right)] if root else []
def __A ( a_ :Node | None) -> list[int]:
return postorder(root.left) + postorder(root.right) + [root.data] if root else []
def __A ( a_ :Node | None) -> list[int]:
return [*inorder(root.left), root.data, *inorder(root.right)] if root else []
def __A ( a_ :Node | None) -> int:
return (max(height(root.left) , height(root.right)) + 1) if root else 0
def __A ( a_ :Node | None) -> Sequence[Node | None]:
__a : list[Any] = []
if root is None:
return output
__a : List[Any] = deque([root])
while process_queue:
__a : str = process_queue.popleft()
output.append(node.data)
if node.left:
process_queue.append(node.left)
if node.right:
process_queue.append(node.right)
return output
def __A ( a_ :Node | None , a_ :int) -> Sequence[Node | None]:
__a : list[Any] = []
def populate_output(a_ :Node | None , a_ :int) -> None:
if not root:
return
if level == 1:
output.append(root.data)
elif level > 1:
populate_output(root.left , level - 1)
populate_output(root.right , level - 1)
populate_output(a_ , a_)
return output
def __A ( a_ :Node | None , a_ :int) -> Sequence[Node | None]:
__a : list[Any] = []
def populate_output(a_ :Node | None , a_ :int) -> None:
if root is None:
return
if level == 1:
output.append(root.data)
elif level > 1:
populate_output(root.right , level - 1)
populate_output(root.left , level - 1)
populate_output(a_ , a_)
return output
def __A ( a_ :Node | None) -> Sequence[Node | None] | list[Any]:
if root is None:
return []
__a : list[Sequence[Node | None]] = []
__a : int = 0
__a : Optional[int] = height(a_)
for h in range(1 , height_tree + 1):
if not flag:
output.append(get_nodes_from_left_to_right(a_ , a_))
__a : Any = 1
else:
output.append(get_nodes_from_right_to_left(a_ , a_))
__a : Optional[int] = 0
return output
def __A ( ) -> None: # Main function for testing.
__a : str = make_tree()
print(F"""In-order Traversal: {inorder(a_)}""")
print(F"""Pre-order Traversal: {preorder(a_)}""")
print(F"""Post-order Traversal: {postorder(a_)}""" , '''\n''')
print(F"""Height of Tree: {height(a_)}""" , '''\n''')
print('''Complete Level Order Traversal: ''')
print(level_order(a_) , '''\n''')
print('''Level-wise order Traversal: ''')
for level in range(1 , height(a_) + 1):
print(F"""Level {level}:""" , get_nodes_from_left_to_right(a_ , level=a_))
print('''\nZigZag order Traversal: ''')
print(zigzag(a_))
if __name__ == "__main__":
import doctest
doctest.testmod()
main() | 52 |
import argparse
import json
import subprocess
def UpperCamelCase ( _A, _A ):
"""simple docstring"""
__magic_name__ : Union[str, Any] = []
__magic_name__ : Optional[int] = (
f'curl -H "Accept: application/vnd.github+json" -H "Authorization: Bearer {token}"'
""" https://api.github.com/repos/huggingface/transformers/actions/runners"""
)
__magic_name__ : Any = subprocess.run(_A, shell=_A, stdout=subprocess.PIPE )
__magic_name__ : int = output.stdout.decode("""utf-8""" )
__magic_name__ : Optional[int] = json.loads(_A )
__magic_name__ : Tuple = status["""runners"""]
for runner in runners:
if runner["name"] in target_runners:
if runner["status"] == "offline":
offline_runners.append(_A )
# save the result so we can report them on Slack
with open("""offline_runners.txt""", """w""" ) as fp:
fp.write(json.dumps(_A ) )
if len(_A ) > 0:
__magic_name__ : Optional[Any] = """\n""".join([x["""name"""] for x in offline_runners] )
raise ValueError(f'The following runners are offline:\n{failed}' )
if __name__ == "__main__":
def UpperCamelCase ( _A ):
"""simple docstring"""
return values.split(""",""" )
__magic_name__: List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--target_runners",
default=None,
type=list_str,
required=True,
help="Comma-separated list of runners to check status.",
)
parser.add_argument(
"--token", default=None, type=str, required=True, help="A token that has actions:read permission."
)
__magic_name__: Dict = parser.parse_args()
get_runner_status(args.target_runners, args.token)
| 324 | 0 |
import datasets
import faiss
import numpy as np
import streamlit as st
import torch
from elasticsearch import Elasticsearch
from elia_utils import (
embed_questions_for_retrieval,
make_qa_sas_model,
qa_sas_generate,
query_es_index,
query_qa_dense_index,
)
import transformers
from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer
__lowercase = '''bart'''
__lowercase = True
@st.cache(allow_output_mutation=SCREAMING_SNAKE_CASE )
def lowerCamelCase ( ):
'''simple docstring'''
if LOAD_DENSE_INDEX:
__UpperCamelCase :Union[str, Any] = AutoTokenizer.from_pretrained('''yjernite/retribert-base-uncased''' )
__UpperCamelCase :Union[str, Any] = AutoModel.from_pretrained('''yjernite/retribert-base-uncased''' ).to('''cuda:0''' )
__UpperCamelCase :Optional[int] = qar_model.eval()
else:
__UpperCamelCase , __UpperCamelCase :Optional[Any] = (None, None)
if MODEL_TYPE == "bart":
__UpperCamelCase :str = AutoTokenizer.from_pretrained('''yjernite/bart_eli5''' )
__UpperCamelCase :List[Any] = AutoModelForSeqaSeqLM.from_pretrained('''yjernite/bart_eli5''' ).to('''cuda:0''' )
__UpperCamelCase :Any = torch.load('''seq2seq_models/eli5_bart_model_blm_2.pth''' )
sas_model.load_state_dict(save_dict['''model'''] )
__UpperCamelCase :List[str] = sas_model.eval()
else:
__UpperCamelCase , __UpperCamelCase :Tuple = make_qa_sas_model(
model_name='''t5-small''' , from_file='''seq2seq_models/eli5_t5_model_1024_4.pth''' , device='''cuda:0''' )
return (qar_tokenizer, qar_model, sas_tokenizer, sas_model)
@st.cache(allow_output_mutation=SCREAMING_SNAKE_CASE )
def lowerCamelCase ( ):
'''simple docstring'''
if LOAD_DENSE_INDEX:
__UpperCamelCase :int = faiss.StandardGpuResources()
__UpperCamelCase :List[Any] = datasets.load_dataset(path='''wiki_snippets''' , name='''wiki40b_en_100_0''' )['''train''']
__UpperCamelCase :List[str] = np.memmap(
'''wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat''' , dtype='''float32''' , mode='''r''' , shape=(wikiaab_passages.num_rows, 128) , )
__UpperCamelCase :Tuple = faiss.IndexFlatIP(128 )
__UpperCamelCase :str = faiss.index_cpu_to_gpu(SCREAMING_SNAKE_CASE , 1 , SCREAMING_SNAKE_CASE )
wikiaab_gpu_index_flat.add(SCREAMING_SNAKE_CASE ) # TODO fix for larger GPU
else:
__UpperCamelCase , __UpperCamelCase :Any = (None, None)
__UpperCamelCase :Optional[Any] = Elasticsearch([{'''host''': '''localhost''', '''port''': '''9200'''}] )
return (wikiaab_passages, wikiaab_gpu_index_flat, es_client)
@st.cache(allow_output_mutation=SCREAMING_SNAKE_CASE )
def lowerCamelCase ( ):
'''simple docstring'''
__UpperCamelCase :Optional[Any] = datasets.load_dataset('''eli5''' , name='''LFQA_reddit''' )
__UpperCamelCase :int = elia['''train_eli5''']
__UpperCamelCase :List[str] = np.memmap(
'''eli5_questions_reps.dat''' , dtype='''float32''' , mode='''r''' , shape=(elia_train.num_rows, 128) )
__UpperCamelCase :Optional[Any] = faiss.IndexFlatIP(128 )
eli5_train_q_index.add(SCREAMING_SNAKE_CASE )
return (elia_train, eli5_train_q_index)
__lowercase , __lowercase , __lowercase = load_indexes()
__lowercase , __lowercase , __lowercase , __lowercase = load_models()
__lowercase , __lowercase = load_train_data()
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=10 ):
'''simple docstring'''
__UpperCamelCase :str = embed_questions_for_retrieval([question] , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__UpperCamelCase , __UpperCamelCase :Union[str, Any] = eli5_train_q_index.search(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__UpperCamelCase :Optional[Any] = [elia_train[int(SCREAMING_SNAKE_CASE )] for i in I[0]]
return nn_examples
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE="wiki40b" , SCREAMING_SNAKE_CASE="dense" , SCREAMING_SNAKE_CASE=10 ):
'''simple docstring'''
if source == "none":
__UpperCamelCase , __UpperCamelCase :Optional[int] = (''' <P> '''.join(['''''' for _ in range(11 )] ).strip(), [])
else:
if method == "dense":
__UpperCamelCase , __UpperCamelCase :List[str] = query_qa_dense_index(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
else:
__UpperCamelCase , __UpperCamelCase :Optional[int] = query_es_index(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , index_name='''english_wiki40b_snippets_100w''' , n_results=SCREAMING_SNAKE_CASE , )
__UpperCamelCase :int = [
(res['''article_title'''], res['''section_title'''].strip(), res['''score'''], res['''passage_text''']) for res in hit_lst
]
__UpperCamelCase :int = '''question: {} context: {}'''.format(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return question_doc, support_list
@st.cache(
hash_funcs={
torch.Tensor: (lambda SCREAMING_SNAKE_CASE : None),
transformers.models.bart.tokenization_bart.BartTokenizer: (lambda SCREAMING_SNAKE_CASE : None),
} )
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=64 , SCREAMING_SNAKE_CASE=256 , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=0.95 , SCREAMING_SNAKE_CASE=0.8 ):
'''simple docstring'''
with torch.no_grad():
__UpperCamelCase :Union[str, Any] = qa_sas_generate(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , num_answers=1 , num_beams=SCREAMING_SNAKE_CASE , min_len=SCREAMING_SNAKE_CASE , max_len=SCREAMING_SNAKE_CASE , do_sample=SCREAMING_SNAKE_CASE , temp=SCREAMING_SNAKE_CASE , top_p=SCREAMING_SNAKE_CASE , top_k=SCREAMING_SNAKE_CASE , max_input_length=1_024 , device='''cuda:0''' , )[0]
return (answer, support_list)
st.title('''Long Form Question Answering with ELI5''')
# Start sidebar
__lowercase = '''<img src=\'https://huggingface.co/front/assets/huggingface_logo.svg\'>'''
__lowercase = '''
<html>
<head>
<style>
.img-container {
padding-left: 90px;
padding-right: 90px;
padding-top: 50px;
padding-bottom: 50px;
background-color: #f0f3f9;
}
</style>
</head>
<body>
<span class="img-container"> <!-- Inline parent element -->
%s
</span>
</body>
</html>
''' % (
header_html,
)
st.sidebar.markdown(
header_full,
unsafe_allow_html=True,
)
# Long Form QA with ELI5 and Wikipedia
__lowercase = '''
This demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).
First, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,
a pre-processed fixed snapshot of Wikipedia.
'''
st.sidebar.markdown(description, unsafe_allow_html=True)
__lowercase = [
'''Answer the question''',
'''View the retrieved document only''',
'''View the most similar ELI5 question and answer''',
'''Show me everything, please!''',
]
__lowercase = st.sidebar.checkbox('''Demo options''')
if demo_options:
__lowercase = st.sidebar.selectbox(
'''''',
action_list,
index=3,
)
__lowercase = action_list.index(action_st)
__lowercase = st.sidebar.selectbox(
'''''',
['''Show full text of passages''', '''Show passage section titles'''],
index=0,
)
__lowercase = show_type == '''Show full text of passages'''
else:
__lowercase = 3
__lowercase = True
__lowercase = st.sidebar.checkbox('''Retrieval options''')
if retrieval_options:
__lowercase = '''
### Information retriever options
The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding
trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.
The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.
'''
st.sidebar.markdown(retriever_info)
__lowercase = st.sidebar.selectbox('''Which Wikipedia format should the model use?''', ['''wiki40b''', '''none'''])
__lowercase = st.sidebar.selectbox('''Which Wikipedia indexer should the model use?''', ['''dense''', '''sparse''', '''mixed'''])
else:
__lowercase = '''wiki40b'''
__lowercase = '''dense'''
__lowercase = '''beam'''
__lowercase = 2
__lowercase = 64
__lowercase = 256
__lowercase = None
__lowercase = None
__lowercase = st.sidebar.checkbox('''Generation options''')
if generate_options:
__lowercase = '''
### Answer generation options
The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)
weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with
**beam** search, or **sample** from the decoder\'s output probabilities.
'''
st.sidebar.markdown(generate_info)
__lowercase = st.sidebar.selectbox('''Would you like to use beam search or sample an answer?''', ['''beam''', '''sampled'''])
__lowercase = st.sidebar.slider(
'''Minimum generation length''', min_value=8, max_value=256, value=64, step=8, format=None, key=None
)
__lowercase = st.sidebar.slider(
'''Maximum generation length''', min_value=64, max_value=512, value=256, step=16, format=None, key=None
)
if sampled == "beam":
__lowercase = st.sidebar.slider('''Beam size''', min_value=1, max_value=8, value=2, step=None, format=None, key=None)
else:
__lowercase = st.sidebar.slider(
'''Nucleus sampling p''', min_value=0.1, max_value=1.0, value=0.9_5, step=0.0_1, format=None, key=None
)
__lowercase = st.sidebar.slider(
'''Temperature''', min_value=0.1, max_value=1.0, value=0.7, step=0.0_1, format=None, key=None
)
__lowercase = None
# start main text
__lowercase = [
'''<MY QUESTION>''',
'''How do people make chocolate?''',
'''Why do we get a fever when we are sick?''',
'''How can different animals perceive different colors?''',
'''What is natural language processing?''',
'''What\'s the best way to treat a sunburn?''',
'''What exactly are vitamins ?''',
'''How does nuclear energy provide electricity?''',
'''What\'s the difference between viruses and bacteria?''',
'''Why are flutes classified as woodwinds when most of them are made out of metal ?''',
'''Why do people like drinking coffee even though it tastes so bad?''',
'''What happens when wine ages? How does it make the wine taste better?''',
'''If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?''',
'''How can we set a date to the beginning or end of an artistic period? Doesn\'t the change happen gradually?''',
'''How does New Zealand have so many large bird predators?''',
]
__lowercase = st.selectbox(
'''What would you like to ask? ---- select <MY QUESTION> to enter a new query''',
questions_list,
index=1,
)
if question_s == "<MY QUESTION>":
__lowercase = st.text_input('''Enter your question here:''', '''''')
else:
__lowercase = question_s
if st.button('''Show me!'''):
if action in [0, 1, 3]:
if index_type == "mixed":
__lowercase , __lowercase = make_support(question, source=wiki_source, method='''dense''', n_results=10)
__lowercase , __lowercase = make_support(question, source=wiki_source, method='''sparse''', n_results=10)
__lowercase = []
for res_d, res_s in zip(support_list_dense, support_list_sparse):
if tuple(res_d) not in support_list:
support_list += [tuple(res_d)]
if tuple(res_s) not in support_list:
support_list += [tuple(res_s)]
__lowercase = support_list[:10]
__lowercase = '''<P> ''' + ''' <P> '''.join([res[-1] for res in support_list])
else:
__lowercase , __lowercase = make_support(question, source=wiki_source, method=index_type, n_results=10)
if action in [0, 3]:
__lowercase , __lowercase = answer_question(
question_doc,
sas_model,
sas_tokenizer,
min_len=min_len,
max_len=int(max_len),
sampling=(sampled == '''sampled'''),
n_beams=n_beams,
top_p=top_p,
temp=temp,
)
st.markdown('''### The model generated answer is:''')
st.write(answer)
if action in [0, 1, 3] and wiki_source != "none":
st.markdown('''--- \n ### The model is drawing information from the following Wikipedia passages:''')
for i, res in enumerate(support_list):
__lowercase = '''https://en.wikipedia.org/wiki/{}'''.format(res[0].replace(''' ''', '''_'''))
__lowercase = res[1].strip()
if sec_titles == "":
__lowercase = '''[{}]({})'''.format(res[0], wiki_url)
else:
__lowercase = sec_titles.split(''' & ''')
__lowercase = ''' & '''.join(
['''[{}]({}#{})'''.format(sec.strip(), wiki_url, sec.strip().replace(''' ''', '''_''')) for sec in sec_list]
)
st.markdown(
'''{0:02d} - **Article**: {1:<18} <br> _Section_: {2}'''.format(i + 1, res[0], sections),
unsafe_allow_html=True,
)
if show_passages:
st.write(
'''> <span style="font-family:arial; font-size:10pt;">''' + res[-1] + '''</span>''', unsafe_allow_html=True
)
if action in [2, 3]:
__lowercase = find_nearest_training(question)
__lowercase = nn_train_list[0]
st.markdown(
'''--- \n ### The most similar question in the ELI5 training set was: \n\n {}'''.format(train_exple['''title'''])
)
__lowercase = [
'''{}. {}'''.format(i + 1, ''' \n'''.join([line.strip() for line in ans.split('''\n''') if line.strip() != '''''']))
for i, (ans, sc) in enumerate(zip(train_exple['''answers''']['''text'''], train_exple['''answers''']['''score''']))
if i == 0 or sc > 2
]
st.markdown('''##### Its answers were: \n\n {}'''.format('''\n'''.join(answers_st)))
__lowercase = '''
---
**Disclaimer**
*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.
Evaluating biases of such a model and ensuring factual generations are still very much open research problems.
Therefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*
'''
st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
| 452 | import requests
from bsa import BeautifulSoup
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :List[Any] = BeautifulSoup(requests.get(SCREAMING_SNAKE_CASE , params=SCREAMING_SNAKE_CASE ).content , '''html.parser''' )
__UpperCamelCase :int = soup.find('''div''' , attrs={'''class''': '''gs_ri'''} )
__UpperCamelCase :Union[str, Any] = div.find('''div''' , attrs={'''class''': '''gs_fl'''} ).find_all('''a''' )
return anchors[2].get_text()
if __name__ == "__main__":
__lowercase = {
'''title''': (
'''Precisely geometry controlled microsupercapacitors for ultrahigh areal '''
'''capacitance, volumetric capacitance, and energy density'''
),
'''journal''': '''Chem. Mater.''',
'''volume''': 30,
'''pages''': '''3979-3990''',
'''year''': 2018,
'''hl''': '''en''',
}
print(get_citation('''https://scholar.google.com/scholar_lookup''', params=params))
| 452 | 1 |
"""simple docstring"""
import numpy as np
import torch
from torch.utils.data import DataLoader
from accelerate.utils.dataclasses import DistributedType
class lowerCamelCase :
'''simple docstring'''
def __init__( self : Optional[Any] , _snake_case : List[str]=2 , _snake_case : Optional[int]=3 , _snake_case : Union[str, Any]=64 , _snake_case : Optional[Any]=None ) -> Any:
SCREAMING_SNAKE_CASE__ = np.random.default_rng(snake_case_ )
SCREAMING_SNAKE_CASE__ = length
SCREAMING_SNAKE_CASE__ = rng.normal(size=(length,) ).astype(np.floataa )
SCREAMING_SNAKE_CASE__ = a * self.x + b + rng.normal(scale=0.1 , size=(length,) ).astype(np.floataa )
def __len__( self : List[Any] ) -> Union[str, Any]:
return self.length
def __getitem__( self : List[str] , _snake_case : int ) -> Dict:
return {"x": self.x[i], "y": self.y[i]}
class lowerCamelCase (torch.nn.Module ):
'''simple docstring'''
def __init__( self : int , _snake_case : str=0 , _snake_case : Optional[Any]=0 , _snake_case : Tuple=False ) -> Any:
super().__init__()
SCREAMING_SNAKE_CASE__ = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
SCREAMING_SNAKE_CASE__ = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
SCREAMING_SNAKE_CASE__ = True
def lowerCAmelCase_ ( self : int , _snake_case : str=None ) -> Tuple:
if self.first_batch:
print(F"""Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}""" )
SCREAMING_SNAKE_CASE__ = False
return x * self.a[0] + self.b[0]
class lowerCamelCase (torch.nn.Module ):
'''simple docstring'''
def __init__( self : List[str] , _snake_case : Tuple=0 , _snake_case : int=0 , _snake_case : int=False ) -> Optional[int]:
super().__init__()
SCREAMING_SNAKE_CASE__ = torch.nn.Parameter(torch.tensor(snake_case_ ).float() )
SCREAMING_SNAKE_CASE__ = torch.nn.Parameter(torch.tensor(snake_case_ ).float() )
SCREAMING_SNAKE_CASE__ = True
def lowerCAmelCase_ ( self : Union[str, Any] , _snake_case : List[str]=None ) -> int:
if self.first_batch:
print(F"""Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}""" )
SCREAMING_SNAKE_CASE__ = False
return x * self.a + self.b
def SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase = 16 ) -> List[Any]:
from datasets import load_dataset
from transformers import AutoTokenizer
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained("bert-base-cased" )
SCREAMING_SNAKE_CASE__ = {'''train''': '''tests/test_samples/MRPC/train.csv''', '''validation''': '''tests/test_samples/MRPC/dev.csv'''}
SCREAMING_SNAKE_CASE__ = load_dataset("csv" , data_files=__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ = datasets['''train'''].unique("label" )
SCREAMING_SNAKE_CASE__ = {v: i for i, v in enumerate(__lowerCAmelCase )}
def tokenize_function(__UpperCAmelCase ):
# max_length=None => use the model max length (it's actually the default)
SCREAMING_SNAKE_CASE__ = tokenizer(
examples["sentence1"] , examples["sentence2"] , truncation=__lowerCAmelCase , max_length=__lowerCAmelCase , padding="max_length" )
if "label" in examples:
SCREAMING_SNAKE_CASE__ = [label_to_id[l] for l in examples['''label''']]
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
SCREAMING_SNAKE_CASE__ = datasets.map(
__lowerCAmelCase , batched=__lowerCAmelCase , remove_columns=["sentence1", "sentence2", "label"] , )
def collate_fn(__UpperCAmelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(__lowerCAmelCase , padding="max_length" , max_length=128 , return_tensors="pt" )
return tokenizer.pad(__lowerCAmelCase , padding="longest" , return_tensors="pt" )
# Instantiate dataloaders.
SCREAMING_SNAKE_CASE__ = DataLoader(tokenized_datasets["train"] , shuffle=__lowerCAmelCase , collate_fn=__lowerCAmelCase , batch_size=2 )
SCREAMING_SNAKE_CASE__ = DataLoader(tokenized_datasets["validation"] , shuffle=__lowerCAmelCase , collate_fn=__lowerCAmelCase , batch_size=1 )
return train_dataloader, eval_dataloader
| 159 |
'''simple docstring'''
import math
def _a ( __lowerCAmelCase : int ):
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__lowerCAmelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def _a ( __lowerCAmelCase : int = 1_00_01 ):
"""simple docstring"""
try:
snake_case__ : List[Any] = int(__lowerCAmelCase )
except (TypeError, ValueError):
raise TypeError('''Parameter nth must be int or castable to int.''' ) from None
if nth <= 0:
raise ValueError('''Parameter nth must be greater than or equal to one.''' )
snake_case__ : list[int] = []
snake_case__ : Dict = 2
while len(__lowerCAmelCase ) < nth:
if is_prime(__lowerCAmelCase ):
primes.append(__lowerCAmelCase )
num += 1
else:
num += 1
return primes[len(__lowerCAmelCase ) - 1]
if __name__ == "__main__":
print(f"""{solution() = }""")
| 347 | 0 |
import argparse
import os
from accelerate.test_utils import execute_subprocess_async
def _UpperCAmelCase (UpperCamelCase_ : Union[str, Any]=None ):
'''simple docstring'''
if subparsers is not None:
_lowerCAmelCase : Optional[Any] = subparsers.add_parser("""test""" )
else:
_lowerCAmelCase : List[Any] = argparse.ArgumentParser("""Accelerate test command""" )
parser.add_argument(
"""--config_file""" , default=a_ , help=(
"""The path to use to store the config file. Will default to a file named default_config.yaml in the cache """
"""location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have """
"""such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed """
"""with \'huggingface\'."""
) , )
if subparsers is not None:
parser.set_defaults(func=a_ )
return parser
def _UpperCAmelCase (UpperCamelCase_ : Optional[int] ):
'''simple docstring'''
_lowerCAmelCase : List[str] = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ["""test_utils""", """scripts""", """test_script.py"""] )
if args.config_file is None:
_lowerCAmelCase : List[Any] = script_name
else:
_lowerCAmelCase : Dict = F"--config_file={args.config_file} {script_name}"
_lowerCAmelCase : Optional[Any] = ['''accelerate-launch'''] + test_args.split()
_lowerCAmelCase : List[Any] = execute_subprocess_async(a_ , env=os.environ.copy() )
if result.returncode == 0:
print("""Test is a success! You are ready for your distributed training!""" )
def _UpperCAmelCase ():
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = test_command_parser()
_lowerCAmelCase : Dict = parser.parse_args()
test_command(a_ )
if __name__ == "__main__":
main()
| 718 |
import pandas as pd
from matplotlib import pyplot as plt
from sklearn.linear_model import LinearRegression
# Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
# Fitting Polynomial Regression to the dataset
from sklearn.preprocessing import PolynomialFeatures
# Importing the dataset
_lowerCamelCase : Dict = pd.read_csv(
"https://s3.us-west-2.amazonaws.com/public.gamelab.fun/dataset/"
"position_salaries.csv"
)
_lowerCamelCase : Union[str, Any] = dataset.iloc[:, 1:2].values
_lowerCamelCase : Any = dataset.iloc[:, 2].values
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Optional[int] = train_test_split(X, y, test_size=0.2, random_state=0)
_lowerCamelCase : Optional[Any] = PolynomialFeatures(degree=4)
_lowerCamelCase : Optional[Any] = poly_reg.fit_transform(X)
_lowerCamelCase : Dict = LinearRegression()
pol_reg.fit(X_poly, y)
def _UpperCAmelCase ():
'''simple docstring'''
plt.scatter(UpperCamelCase_ , UpperCamelCase_ , color="""red""" )
plt.plot(UpperCamelCase_ , pol_reg.predict(poly_reg.fit_transform(UpperCamelCase_ ) ) , color="""blue""" )
plt.title("""Truth or Bluff (Linear Regression)""" )
plt.xlabel("""Position level""" )
plt.ylabel("""Salary""" )
plt.show()
if __name__ == "__main__":
viz_polymonial()
# Predicting a new result with Polymonial Regression
pol_reg.predict(poly_reg.fit_transform([[5.5]]))
# output should be 132148.43750003
| 196 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.