code stringlengths 82 53.2k | code_codestyle int64 0 721 | style_context stringlengths 91 41.9k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
'''simple docstring'''
import argparse
import hashlib # hashlib is only used inside the Test class
import struct
class __A :
def __init__(self : str , __a : Any ):
UpperCAmelCase_ = data
UpperCAmelCase_ = [0X67_45_23_01, 0Xef_cd_ab_89, 0X98_ba_dc_fe, 0X10_32_54_76, 0Xc3_d2_e1_f0]
@staticmethod
def _lowercase (__a : List[str] , __a : str ):
return ((n << b) | (n >> (32 - b))) & 0Xff_ff_ff_ff
def _lowercase (self : List[str] ):
UpperCAmelCase_ = B"\x80" + B"\x00" * (63 - (len(self.data ) + 8) % 64)
UpperCAmelCase_ = self.data + padding + struct.pack(">Q" , 8 * len(self.data ) )
return padded_data
def _lowercase (self : Optional[int] ):
return [
self.padded_data[i : i + 64] for i in range(0 , len(self.padded_data ) , 64 )
]
def _lowercase (self : Union[str, Any] , __a : Optional[int] ):
UpperCAmelCase_ = list(struct.unpack(">16L" , __a ) ) + [0] * 64
for i in range(16 , 80 ):
UpperCAmelCase_ = self.rotate((w[i - 3] ^ w[i - 8] ^ w[i - 14] ^ w[i - 16]) , 1 )
return w
def _lowercase (self : Union[str, Any] ):
UpperCAmelCase_ = self.padding()
UpperCAmelCase_ = self.split_blocks()
for block in self.blocks:
UpperCAmelCase_ = self.expand_block(__a )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = self.h
for i in range(0 , 80 ):
if 0 <= i < 20:
UpperCAmelCase_ = (b & c) | ((~b) & d)
UpperCAmelCase_ = 0X5a_82_79_99
elif 20 <= i < 40:
UpperCAmelCase_ = b ^ c ^ d
UpperCAmelCase_ = 0X6e_d9_eb_a1
elif 40 <= i < 60:
UpperCAmelCase_ = (b & c) | (b & d) | (c & d)
UpperCAmelCase_ = 0X8f_1b_bc_dc
elif 60 <= i < 80:
UpperCAmelCase_ = b ^ c ^ d
UpperCAmelCase_ = 0Xca_62_c1_d6
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = (
self.rotate(__a , 5 ) + f + e + k + expanded_block[i] & 0Xff_ff_ff_ff,
a,
self.rotate(__a , 30 ),
c,
d,
)
UpperCAmelCase_ = (
self.h[0] + a & 0Xff_ff_ff_ff,
self.h[1] + b & 0Xff_ff_ff_ff,
self.h[2] + c & 0Xff_ff_ff_ff,
self.h[3] + d & 0Xff_ff_ff_ff,
self.h[4] + e & 0Xff_ff_ff_ff,
)
return ("{:08x}" * 5).format(*self.h )
def lowerCAmelCase_ ( ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ = B"Test String"
assert SHAaHash(snake_case_ ).final_hash() == hashlib.shaa(snake_case_ ).hexdigest() # noqa: S324
def lowerCAmelCase_ ( ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ = argparse.ArgumentParser(description="Process some strings or files" )
parser.add_argument(
"--string" , dest="input_string" , default="Hello World!! Welcome to Cryptography" , help="Hash the string" , )
parser.add_argument("--file" , dest="input_file" , help="Hash contents of a file" )
UpperCAmelCase_ = parser.parse_args()
UpperCAmelCase_ = args.input_string
# In any case hash input should be a bytestring
if args.input_file:
with open(args.input_file , "rb" ) as f:
UpperCAmelCase_ = f.read()
else:
UpperCAmelCase_ = bytes(snake_case_ , "utf-8" )
print(SHAaHash(snake_case_ ).final_hash() )
if __name__ == "__main__":
main()
import doctest
doctest.testmod()
| 78 |
import json
import os
from typing import Dict, List, Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__a: Any = logging.get_logger(__name__)
__a: Dict = {
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
__a: int = {
'''vocab_file''': {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json'''
},
'''merges_file''': {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt'''
},
'''tokenizer_config_file''': {
'''facebook/blenderbot_small-90M''': (
'''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json'''
)
},
}
__a: str = {'''facebook/blenderbot_small-90M''': 512}
def _SCREAMING_SNAKE_CASE ( __snake_case ) -> List[str]:
_UpperCAmelCase = set()
_UpperCAmelCase = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_UpperCAmelCase = char
_UpperCAmelCase = set(__snake_case )
return pairs
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
_lowerCamelCase = VOCAB_FILES_NAMES
_lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase = ['''input_ids''', '''attention_mask''']
def __init__( self : List[Any] , lowerCamelCase : Optional[int] , lowerCamelCase : Dict , lowerCamelCase : List[str]="__start__" , lowerCamelCase : List[Any]="__end__" , lowerCamelCase : Any="__unk__" , lowerCamelCase : Optional[Any]="__null__" , **lowerCamelCase : Optional[Any] , ) -> Tuple:
"""simple docstring"""
super().__init__(unk_token=lowerCamelCase , bos_token=lowerCamelCase , eos_token=lowerCamelCase , pad_token=lowerCamelCase , **lowerCamelCase )
with open(lowerCamelCase , encoding="""utf-8""" ) as vocab_handle:
_UpperCAmelCase = json.load(lowerCamelCase )
_UpperCAmelCase = {v: k for k, v in self.encoder.items()}
with open(lowerCamelCase , encoding="""utf-8""" ) as merges_handle:
_UpperCAmelCase = merges_handle.read().split("""\n""" )[1:-1]
_UpperCAmelCase = [tuple(merge.split() ) for merge in merges]
_UpperCAmelCase = dict(zip(lowerCamelCase , range(len(lowerCamelCase ) ) ) )
_UpperCAmelCase = {}
@property
def lowerCamelCase ( self : str ) -> int:
"""simple docstring"""
return len(self.encoder )
def lowerCamelCase ( self : List[Any] ) -> Dict:
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def lowerCamelCase ( self : Optional[Any] , lowerCamelCase : str ) -> str:
"""simple docstring"""
if token in self.cache:
return self.cache[token]
_UpperCAmelCase = re.sub("""([.,!?()])""" , r""" \1""" , lowerCamelCase )
_UpperCAmelCase = re.sub("""(')""" , r""" \1 """ , lowerCamelCase )
_UpperCAmelCase = re.sub(r"""\s{2,}""" , """ """ , lowerCamelCase )
if "\n" in token:
_UpperCAmelCase = token.replace("""\n""" , """ __newln__""" )
_UpperCAmelCase = token.split(""" """ )
_UpperCAmelCase = []
for token in tokens:
if not len(lowerCamelCase ):
continue
_UpperCAmelCase = token.lower()
_UpperCAmelCase = tuple(lowerCamelCase )
_UpperCAmelCase = tuple(list(word[:-1] ) + [word[-1] + """</w>"""] )
_UpperCAmelCase = get_pairs(lowerCamelCase )
if not pairs:
words.append(lowerCamelCase )
continue
while True:
_UpperCAmelCase = min(lowerCamelCase , key=lambda lowerCamelCase : self.bpe_ranks.get(lowerCamelCase , float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
_UpperCAmelCase , _UpperCAmelCase = bigram
_UpperCAmelCase = []
_UpperCAmelCase = 0
while i < len(lowerCamelCase ):
try:
_UpperCAmelCase = word.index(lowerCamelCase , lowerCamelCase )
new_word.extend(word[i:j] )
_UpperCAmelCase = j
except ValueError:
new_word.extend(word[i:] )
break
if word[i] == first and i < len(lowerCamelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
_UpperCAmelCase = tuple(lowerCamelCase )
_UpperCAmelCase = new_word
if len(lowerCamelCase ) == 1:
break
else:
_UpperCAmelCase = get_pairs(lowerCamelCase )
_UpperCAmelCase = """@@ """.join(lowerCamelCase )
_UpperCAmelCase = word[:-4]
_UpperCAmelCase = word
words.append(lowerCamelCase )
return " ".join(lowerCamelCase )
def lowerCamelCase ( self : Any , lowerCamelCase : str ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = []
_UpperCAmelCase = re.findall(r"""\S+\n?""" , lowerCamelCase )
for token in words:
split_tokens.extend(list(self.bpe(lowerCamelCase ).split(""" """ ) ) )
return split_tokens
def lowerCamelCase ( self : Tuple , lowerCamelCase : str ) -> int:
"""simple docstring"""
_UpperCAmelCase = token.lower()
return self.encoder.get(lowerCamelCase , self.encoder.get(self.unk_token ) )
def lowerCamelCase ( self : List[str] , lowerCamelCase : int ) -> str:
"""simple docstring"""
return self.decoder.get(lowerCamelCase , self.unk_token )
def lowerCamelCase ( self : Dict , lowerCamelCase : List[str] ) -> str:
"""simple docstring"""
_UpperCAmelCase = """ """.join(lowerCamelCase ).replace("""@@ """ , """""" ).strip()
return out_string
def lowerCamelCase ( self : List[Any] , lowerCamelCase : str , lowerCamelCase : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(lowerCamelCase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
_UpperCAmelCase = os.path.join(
lowerCamelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
_UpperCAmelCase = os.path.join(
lowerCamelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(lowerCamelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCamelCase , ensure_ascii=lowerCamelCase ) + """\n""" )
_UpperCAmelCase = 0
with open(lowerCamelCase , """w""" , encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCamelCase : kv[1] ):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
""" Please check that the tokenizer is not corrupted!""" )
_UpperCAmelCase = token_index
writer.write(""" """.join(lowerCamelCase ) + """\n""" )
index += 1
return vocab_file, merge_file | 108 | 0 |
'''simple docstring'''
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class _lowercase ( UpperCAmelCase__ ):
'''simple docstring'''
@staticmethod
@abstractmethod
def a ( SCREAMING_SNAKE_CASE__ : ArgumentParser ) -> Optional[int]:
raise NotImplementedError()
@abstractmethod
def a ( self : int ) -> Optional[Any]:
raise NotImplementedError()
| 330 | '''simple docstring'''
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all feature extractors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...feature_extraction_utils import FeatureExtractionMixin
from ...utils import CONFIG_NAME, FEATURE_EXTRACTOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
_A : List[str] = logging.get_logger(__name__)
_A : Optional[Any] = OrderedDict(
[
('''audio-spectrogram-transformer''', '''ASTFeatureExtractor'''),
('''beit''', '''BeitFeatureExtractor'''),
('''chinese_clip''', '''ChineseCLIPFeatureExtractor'''),
('''clap''', '''ClapFeatureExtractor'''),
('''clip''', '''CLIPFeatureExtractor'''),
('''clipseg''', '''ViTFeatureExtractor'''),
('''conditional_detr''', '''ConditionalDetrFeatureExtractor'''),
('''convnext''', '''ConvNextFeatureExtractor'''),
('''cvt''', '''ConvNextFeatureExtractor'''),
('''data2vec-audio''', '''Wav2Vec2FeatureExtractor'''),
('''data2vec-vision''', '''BeitFeatureExtractor'''),
('''deformable_detr''', '''DeformableDetrFeatureExtractor'''),
('''deit''', '''DeiTFeatureExtractor'''),
('''detr''', '''DetrFeatureExtractor'''),
('''dinat''', '''ViTFeatureExtractor'''),
('''donut-swin''', '''DonutFeatureExtractor'''),
('''dpt''', '''DPTFeatureExtractor'''),
('''encodec''', '''EncodecFeatureExtractor'''),
('''flava''', '''FlavaFeatureExtractor'''),
('''glpn''', '''GLPNFeatureExtractor'''),
('''groupvit''', '''CLIPFeatureExtractor'''),
('''hubert''', '''Wav2Vec2FeatureExtractor'''),
('''imagegpt''', '''ImageGPTFeatureExtractor'''),
('''layoutlmv2''', '''LayoutLMv2FeatureExtractor'''),
('''layoutlmv3''', '''LayoutLMv3FeatureExtractor'''),
('''levit''', '''LevitFeatureExtractor'''),
('''maskformer''', '''MaskFormerFeatureExtractor'''),
('''mctct''', '''MCTCTFeatureExtractor'''),
('''mobilenet_v1''', '''MobileNetV1FeatureExtractor'''),
('''mobilenet_v2''', '''MobileNetV2FeatureExtractor'''),
('''mobilevit''', '''MobileViTFeatureExtractor'''),
('''nat''', '''ViTFeatureExtractor'''),
('''owlvit''', '''OwlViTFeatureExtractor'''),
('''perceiver''', '''PerceiverFeatureExtractor'''),
('''poolformer''', '''PoolFormerFeatureExtractor'''),
('''regnet''', '''ConvNextFeatureExtractor'''),
('''resnet''', '''ConvNextFeatureExtractor'''),
('''segformer''', '''SegformerFeatureExtractor'''),
('''sew''', '''Wav2Vec2FeatureExtractor'''),
('''sew-d''', '''Wav2Vec2FeatureExtractor'''),
('''speech_to_text''', '''Speech2TextFeatureExtractor'''),
('''speecht5''', '''SpeechT5FeatureExtractor'''),
('''swiftformer''', '''ViTFeatureExtractor'''),
('''swin''', '''ViTFeatureExtractor'''),
('''swinv2''', '''ViTFeatureExtractor'''),
('''table-transformer''', '''DetrFeatureExtractor'''),
('''timesformer''', '''VideoMAEFeatureExtractor'''),
('''tvlt''', '''TvltFeatureExtractor'''),
('''unispeech''', '''Wav2Vec2FeatureExtractor'''),
('''unispeech-sat''', '''Wav2Vec2FeatureExtractor'''),
('''van''', '''ConvNextFeatureExtractor'''),
('''videomae''', '''VideoMAEFeatureExtractor'''),
('''vilt''', '''ViltFeatureExtractor'''),
('''vit''', '''ViTFeatureExtractor'''),
('''vit_mae''', '''ViTFeatureExtractor'''),
('''vit_msn''', '''ViTFeatureExtractor'''),
('''wav2vec2''', '''Wav2Vec2FeatureExtractor'''),
('''wav2vec2-conformer''', '''Wav2Vec2FeatureExtractor'''),
('''wavlm''', '''Wav2Vec2FeatureExtractor'''),
('''whisper''', '''WhisperFeatureExtractor'''),
('''xclip''', '''CLIPFeatureExtractor'''),
('''yolos''', '''YolosFeatureExtractor'''),
]
)
_A : Optional[Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FEATURE_EXTRACTOR_MAPPING_NAMES)
def UpperCamelCase_ ( snake_case_ : str ) -> Dict:
'''simple docstring'''
for module_name, extractors in FEATURE_EXTRACTOR_MAPPING_NAMES.items():
if class_name in extractors:
__lowerCAmelCase = model_type_to_module_name(snake_case_ )
__lowerCAmelCase = importlib.import_module(f""".{module_name}""" , """transformers.models""" )
try:
return getattr(snake_case_ , snake_case_ )
except AttributeError:
continue
for _, extractor in FEATURE_EXTRACTOR_MAPPING._extra_content.items():
if getattr(snake_case_ , """__name__""" , snake_case_ ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
__lowerCAmelCase = importlib.import_module("""transformers""" )
if hasattr(snake_case_ , snake_case_ ):
return getattr(snake_case_ , snake_case_ )
return None
def UpperCamelCase_ ( snake_case_ : Union[str, os.PathLike] , snake_case_ : Optional[Union[str, os.PathLike]] = None , snake_case_ : bool = False , snake_case_ : bool = False , snake_case_ : Optional[Dict[str, str]] = None , snake_case_ : Optional[Union[bool, str]] = None , snake_case_ : Optional[str] = None , snake_case_ : bool = False , **snake_case_ : Any , ) -> int:
'''simple docstring'''
__lowerCAmelCase = get_file_from_repo(
snake_case_ , snake_case_ , cache_dir=snake_case_ , force_download=snake_case_ , resume_download=snake_case_ , proxies=snake_case_ , use_auth_token=snake_case_ , revision=snake_case_ , local_files_only=snake_case_ , )
if resolved_config_file is None:
logger.info(
"""Could not locate the feature extractor configuration file, will try to use the model config instead.""" )
return {}
with open(snake_case_ , encoding="""utf-8""" ) as reader:
return json.load(snake_case_ )
class _lowercase :
'''simple docstring'''
def __init__( self : List[str] ) -> int:
raise EnvironmentError(
"""AutoFeatureExtractor is designed to be instantiated """
"""using the `AutoFeatureExtractor.from_pretrained(pretrained_model_name_or_path)` method.""" )
@classmethod
@replace_list_option_in_docstrings(SCREAMING_SNAKE_CASE__ )
def a ( cls : Any , SCREAMING_SNAKE_CASE__ : Optional[Any] , **SCREAMING_SNAKE_CASE__ : List[str] ) -> List[Any]:
__lowerCAmelCase = kwargs.pop("""config""" , SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = kwargs.pop("""trust_remote_code""" , SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = True
__lowerCAmelCase , __lowerCAmelCase = FeatureExtractionMixin.get_feature_extractor_dict(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = config_dict.get("""feature_extractor_type""" , SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = None
if "AutoFeatureExtractor" in config_dict.get("""auto_map""" , {} ):
__lowerCAmelCase = config_dict["""auto_map"""]["""AutoFeatureExtractor"""]
# If we don't find the feature extractor class in the feature extractor config, let's try the model config.
if feature_extractor_class is None and feature_extractor_auto_map is None:
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__lowerCAmelCase = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
# It could be in `config.feature_extractor_type``
__lowerCAmelCase = getattr(SCREAMING_SNAKE_CASE__ , """feature_extractor_type""" , SCREAMING_SNAKE_CASE__ )
if hasattr(SCREAMING_SNAKE_CASE__ , """auto_map""" ) and "AutoFeatureExtractor" in config.auto_map:
__lowerCAmelCase = config.auto_map["""AutoFeatureExtractor"""]
if feature_extractor_class is not None:
__lowerCAmelCase = feature_extractor_class_from_name(SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = feature_extractor_auto_map is not None
__lowerCAmelCase = feature_extractor_class is not None or type(SCREAMING_SNAKE_CASE__ ) in FEATURE_EXTRACTOR_MAPPING
__lowerCAmelCase = resolve_trust_remote_code(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if has_remote_code and trust_remote_code:
__lowerCAmelCase = get_class_from_dynamic_module(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = kwargs.pop("""code_revision""" , SCREAMING_SNAKE_CASE__ )
if os.path.isdir(SCREAMING_SNAKE_CASE__ ):
feature_extractor_class.register_for_auto_class()
return feature_extractor_class.from_dict(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
elif feature_extractor_class is not None:
return feature_extractor_class.from_dict(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
# Last try: we use the FEATURE_EXTRACTOR_MAPPING.
elif type(SCREAMING_SNAKE_CASE__ ) in FEATURE_EXTRACTOR_MAPPING:
__lowerCAmelCase = FEATURE_EXTRACTOR_MAPPING[type(SCREAMING_SNAKE_CASE__ )]
return feature_extractor_class.from_dict(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
raise ValueError(
f"""Unrecognized feature extractor in {pretrained_model_name_or_path}. Should have a """
f"""`feature_extractor_type` key in its {FEATURE_EXTRACTOR_NAME} of {CONFIG_NAME}, or one of the following """
f"""`model_type` keys in its {CONFIG_NAME}: {", ".join(c for c in FEATURE_EXTRACTOR_MAPPING_NAMES.keys() )}""" )
@staticmethod
def a ( SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Dict:
FEATURE_EXTRACTOR_MAPPING.register(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
| 330 | 1 |
"""simple docstring"""
def __UpperCAmelCase ( lowercase = 60_08_51_47_51_43 ):
"""simple docstring"""
try:
_UpperCAmelCase = int(snake_case__ )
except (TypeError, ValueError):
raise TypeError("""Parameter n must be int or castable to int.""" )
if n <= 0:
raise ValueError("""Parameter n must be greater than or equal to one.""" )
_UpperCAmelCase = 2
_UpperCAmelCase = 0
if n == 2:
return 2
while n > 2:
while n % i != 0:
i += 1
_UpperCAmelCase = i
while n % i == 0:
_UpperCAmelCase = n // i
i += 1
return int(snake_case__ )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 277 | import string
from math import logaa
def snake_case ( snake_case__ :str , snake_case__ :str) -> int:
_A = document.translate(
str.maketrans("""""" , """""" , string.punctuation)).replace("""\n""" , """""")
_A = document_without_punctuation.split(""" """) # word tokenization
return len([word for word in tokenize_document if word.lower() == term.lower()])
def snake_case ( snake_case__ :str , snake_case__ :str) -> tuple[int, int]:
_A = corpus.lower().translate(
str.maketrans("""""" , """""" , string.punctuation)) # strip all punctuation and replace it with ''
_A = corpus_without_punctuation.split("""\n""")
_A = term.lower()
return (len([doc for doc in docs if term in doc]), len(snake_case__))
def snake_case ( snake_case__ :int , snake_case__ :int , snake_case__ :str=False) -> float:
if smoothing:
if n == 0:
raise ValueError("""log10(0) is undefined.""")
return round(1 + logaa(n / (1 + df)) , 3)
if df == 0:
raise ZeroDivisionError("""df must be > 0""")
elif n == 0:
raise ValueError("""log10(0) is undefined.""")
return round(logaa(n / df) , 3)
def snake_case ( snake_case__ :int , snake_case__ :int) -> float:
return round(tf * idf , 3)
| 401 | 0 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"camembert-base": "https://huggingface.co/camembert-base/resolve/main/config.json",
"umberto-commoncrawl-cased-v1": (
"https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json"
),
"umberto-wikipedia-uncased-v1": (
"https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json"
),
}
class UpperCAmelCase_ ( __lowerCAmelCase):
lowerCamelCase__ = '''camembert'''
def __init__( self, __a=3_0522, __a=768, __a=12, __a=12, __a=3072, __a="gelu", __a=0.1, __a=0.1, __a=512, __a=2, __a=0.02, __a=1E-12, __a=1, __a=0, __a=2, __a="absolute", __a=True, __a=None, **__a, ):
'''simple docstring'''
super().__init__(pad_token_id=lowerCAmelCase_, bos_token_id=lowerCAmelCase_, eos_token_id=lowerCAmelCase_, **lowerCAmelCase_)
_lowerCAmelCase : List[Any] = vocab_size
_lowerCAmelCase : Any = hidden_size
_lowerCAmelCase : Any = num_hidden_layers
_lowerCAmelCase : str = num_attention_heads
_lowerCAmelCase : Any = hidden_act
_lowerCAmelCase : List[str] = intermediate_size
_lowerCAmelCase : Optional[Any] = hidden_dropout_prob
_lowerCAmelCase : int = attention_probs_dropout_prob
_lowerCAmelCase : str = max_position_embeddings
_lowerCAmelCase : Dict = type_vocab_size
_lowerCAmelCase : Tuple = initializer_range
_lowerCAmelCase : Union[str, Any] = layer_norm_eps
_lowerCAmelCase : List[str] = position_embedding_type
_lowerCAmelCase : List[str] = use_cache
_lowerCAmelCase : str = classifier_dropout
class UpperCAmelCase_ ( __lowerCAmelCase):
@property
def snake_case__ ( self):
'''simple docstring'''
if self.task == "multiple-choice":
_lowerCAmelCase : Union[str, Any] = {0: "batch", 1: "choice", 2: "sequence"}
else:
_lowerCAmelCase : List[Any] = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
])
| 717 |
import baseaa
def A ( _lowerCamelCase ):
'''simple docstring'''
return baseaa.aaaencode(string.encode("utf-8" ) )
def A ( _lowerCamelCase ):
'''simple docstring'''
return baseaa.aaadecode(_lowerCamelCase ).decode("utf-8" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 658 | 0 |
def _A ( _lowercase ) -> list:
"""simple docstring"""
__UpperCamelCase = len(_lowercase )
for i in range(1 , _lowercase ):
__UpperCamelCase = collection[i]
__UpperCamelCase = 0
__UpperCamelCase = i - 1
while low <= high:
__UpperCamelCase = (low + high) // 2
if val < collection[mid]:
__UpperCamelCase = mid - 1
else:
__UpperCamelCase = mid + 1
for j in range(_lowercase , _lowercase , -1 ):
__UpperCamelCase = collection[j - 1]
__UpperCamelCase = val
return collection
if __name__ == "__main__":
__snake_case = input('''Enter numbers separated by a comma:\n''').strip()
__snake_case = [int(item) for item in user_input.split(''',''')]
print(binary_insertion_sort(unsorted))
| 1 |
import gc
import unittest
import numpy as np
import torch
from diffusers import StableDiffusionKDiffusionPipeline
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
@slow
@require_torch_gpu
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __UpperCamelCase ( self : Dict ) -> Tuple:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase ( self : Optional[int] ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = StableDiffusionKDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4" )
SCREAMING_SNAKE_CASE : str = sd_pipe.to(a )
sd_pipe.set_progress_bar_config(disable=a )
sd_pipe.set_scheduler("sample_euler" )
SCREAMING_SNAKE_CASE : Optional[int] = "A painting of a squirrel eating a burger"
SCREAMING_SNAKE_CASE : Any = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : List[Any] = sd_pipe([prompt] , generator=a , guidance_scale=9.0 , num_inference_steps=20 , output_type="np" )
SCREAMING_SNAKE_CASE : Tuple = output.images
SCREAMING_SNAKE_CASE : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE : Union[str, Any] = np.array([0.0447, 0.0492, 0.0468, 0.0408, 0.0383, 0.0408, 0.0354, 0.0380, 0.0339] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __UpperCamelCase ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = StableDiffusionKDiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2-1-base" )
SCREAMING_SNAKE_CASE : Tuple = sd_pipe.to(a )
sd_pipe.set_progress_bar_config(disable=a )
sd_pipe.set_scheduler("sample_euler" )
SCREAMING_SNAKE_CASE : List[str] = "A painting of a squirrel eating a burger"
SCREAMING_SNAKE_CASE : List[str] = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Union[str, Any] = sd_pipe([prompt] , generator=a , guidance_scale=9.0 , num_inference_steps=20 , output_type="np" )
SCREAMING_SNAKE_CASE : List[Any] = output.images
SCREAMING_SNAKE_CASE : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE : int = np.array([0.1237, 0.1320, 0.1438, 0.1359, 0.1390, 0.1132, 0.1277, 0.1175, 0.1112] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-1
def __UpperCamelCase ( self : Tuple ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = StableDiffusionKDiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2-1-base" )
SCREAMING_SNAKE_CASE : Union[str, Any] = sd_pipe.to(a )
sd_pipe.set_progress_bar_config(disable=a )
sd_pipe.set_scheduler("sample_dpmpp_2m" )
SCREAMING_SNAKE_CASE : str = "A painting of a squirrel eating a burger"
SCREAMING_SNAKE_CASE : Any = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : str = sd_pipe(
[prompt] , generator=a , guidance_scale=7.5 , num_inference_steps=15 , output_type="np" , use_karras_sigmas=a , )
SCREAMING_SNAKE_CASE : str = output.images
SCREAMING_SNAKE_CASE : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE : int = np.array(
[0.1138_1689, 0.1211_2921, 0.138_9457, 0.1254_9606, 0.124_4964, 0.1083_1517, 0.1156_2866, 0.1086_7816, 0.1049_9048] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 | 25 | 0 |
"""simple docstring"""
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
UNetaDConditionModel,
VideoToVideoSDPipeline,
)
from diffusers.utils import floats_tensor, is_xformers_available, skip_mps
from diffusers.utils.testing_utils import enable_full_determinism, slow, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class lowerCamelCase (_UpperCamelCase ,unittest.TestCase ):
lowerCamelCase__ : str = VideoToVideoSDPipeline
lowerCamelCase__ : Union[str, Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS.union({'video'} ) - {'image', 'width', 'height'}
lowerCamelCase__ : Dict = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'video'} ) - {'image'}
lowerCamelCase__ : Optional[int] = PipelineTesterMixin.required_optional_params - {'latents'}
lowerCamelCase__ : Dict = False
# No `output_type`.
lowerCamelCase__ : str = frozenset(
[
'num_inference_steps',
'generator',
'latents',
'return_dict',
'callback',
'callback_steps',
] )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> int:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = UNetaDConditionModel(
block_out_channels=(3_2, 6_4, 6_4, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=("""CrossAttnDownBlock3D""", """CrossAttnDownBlock3D""", """CrossAttnDownBlock3D""", """DownBlock3D""") , up_block_types=("""UpBlock3D""", """CrossAttnUpBlock3D""", """CrossAttnUpBlock3D""", """CrossAttnUpBlock3D""") , cross_attention_dim=3_2 , attention_head_dim=4 , )
SCREAMING_SNAKE_CASE__ = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=_UpperCAmelCase , set_alpha_to_one=_UpperCAmelCase , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=1_2_8 , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act="""gelu""" , projection_dim=5_1_2 , )
SCREAMING_SNAKE_CASE__ = CLIPTextModel(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
SCREAMING_SNAKE_CASE__ = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
}
return components
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Any=0 ) -> Optional[Any]:
# 3 frames
SCREAMING_SNAKE_CASE__ = floats_tensor((1, 3, 3, 3_2, 3_2) , rng=random.Random(_UpperCAmelCase ) ).to(_UpperCAmelCase )
if str(_UpperCAmelCase ).startswith("""mps""" ):
SCREAMING_SNAKE_CASE__ = torch.manual_seed(_UpperCAmelCase )
else:
SCREAMING_SNAKE_CASE__ = torch.Generator(device=_UpperCAmelCase ).manual_seed(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''video''': video,
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''pt''',
}
return inputs
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[str]:
SCREAMING_SNAKE_CASE__ = '''cpu''' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE__ = self.get_dummy_components()
SCREAMING_SNAKE_CASE__ = VideoToVideoSDPipeline(**_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = sd_pipe.to(_UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = self.get_dummy_inputs(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = '''np'''
SCREAMING_SNAKE_CASE__ = sd_pipe(**_UpperCAmelCase ).frames
SCREAMING_SNAKE_CASE__ = frames[0][-3:, -3:, -1]
assert frames[0].shape == (3_2, 3_2, 3)
SCREAMING_SNAKE_CASE__ = np.array([1_0_6, 1_1_7, 1_1_3, 1_7_4, 1_3_7, 1_1_2, 1_4_8, 1_5_1, 1_3_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def SCREAMING_SNAKE_CASE ( self : int ) -> List[str]:
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=_UpperCAmelCase , expected_max_diff=5e-3 )
@unittest.skip(reason="""Batching needs to be properly figured out first for this pipeline.""" )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Tuple:
pass
@unittest.skip(reason="""Batching needs to be properly figured out first for this pipeline.""" )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Tuple:
pass
@unittest.skip(reason="""`num_images_per_prompt` argument is not supported for this pipeline.""" )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Any:
pass
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[Any]:
return super().test_progress_bar()
@slow
@skip_mps
class lowerCamelCase (unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Any:
SCREAMING_SNAKE_CASE__ = VideoToVideoSDPipeline.from_pretrained("""cerspense/zeroscope_v2_XL""" , torch_dtype=torch.floataa )
pipe.enable_model_cpu_offload()
# 10 frames
SCREAMING_SNAKE_CASE__ = torch.Generator(device="""cpu""" ).manual_seed(0 )
SCREAMING_SNAKE_CASE__ = torch.randn((1, 1_0, 3, 1_0_2_4, 5_7_6) , generator=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = video.to("""cuda""" )
SCREAMING_SNAKE_CASE__ = '''Spiderman is surfing'''
SCREAMING_SNAKE_CASE__ = pipe(_UpperCAmelCase , video=_UpperCAmelCase , generator=_UpperCAmelCase , num_inference_steps=3 , output_type="""pt""" ).frames
SCREAMING_SNAKE_CASE__ = np.array([-1.0_458_984, -1.1_279_297, -0.9_663_086, -0.91_503_906, -0.75_097_656] )
assert np.abs(video_frames.cpu().numpy()[0, 0, 0, 0, -5:] - expected_array ).sum() < 1e-2
| 713 |
"""simple docstring"""
from .glue import GlueDataset, GlueDataTrainingArguments
from .language_modeling import (
LineByLineTextDataset,
LineByLineWithRefDataset,
LineByLineWithSOPTextDataset,
TextDataset,
TextDatasetForNextSentencePrediction,
)
from .squad import SquadDataset, SquadDataTrainingArguments
| 616 | 0 |
from sklearn.metrics import matthews_corrcoef
import datasets
__lowerCamelCase : Optional[int] = '''
Compute the Matthews correlation coefficient (MCC)
The Matthews correlation coefficient is used in machine learning as a
measure of the quality of binary and multiclass classifications. It takes
into account true and false positives and negatives and is generally
regarded as a balanced measure which can be used even if the classes are of
very different sizes. The MCC is in essence a correlation coefficient value
between -1 and +1. A coefficient of +1 represents a perfect prediction, 0
an average random prediction and -1 an inverse prediction. The statistic
is also known as the phi coefficient. [source: Wikipedia]
'''
__lowerCamelCase : Dict = '''
Args:
predictions (list of int): Predicted labels, as returned by a model.
references (list of int): Ground truth labels.
sample_weight (list of int, float, or bool): Sample weights. Defaults to `None`.
Returns:
matthews_correlation (dict containing float): Matthews correlation.
Examples:
Example 1, a basic example with only predictions and references as inputs:
>>> matthews_metric = datasets.load_metric("matthews_correlation")
>>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],
... predictions=[1, 2, 2, 0, 3, 3])
>>> print(round(results[\'matthews_correlation\'], 2))
0.54
Example 2, the same example as above, but also including sample weights:
>>> matthews_metric = datasets.load_metric("matthews_correlation")
>>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],
... predictions=[1, 2, 2, 0, 3, 3],
... sample_weight=[0.5, 3, 1, 1, 1, 2])
>>> print(round(results[\'matthews_correlation\'], 2))
0.1
Example 3, the same example as above, but with sample weights that cause a negative correlation:
>>> matthews_metric = datasets.load_metric("matthews_correlation")
>>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],
... predictions=[1, 2, 2, 0, 3, 3],
... sample_weight=[0.5, 1, 0, 0, 0, 1])
>>> print(round(results[\'matthews_correlation\'], 2))
-0.25
'''
__lowerCamelCase : str = '''\
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a__ ( datasets.Metric ):
def __UpperCamelCase ( self : Any ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION,citation=_CITATION,inputs_description=_KWARGS_DESCRIPTION,features=datasets.Features(
{
"predictions": datasets.Value("int32" ),
"references": datasets.Value("int32" ),
} ),reference_urls=[
"https://scikit-learn.org/stable/modules/generated/sklearn.metrics.matthews_corrcoef.html"
],)
def __UpperCamelCase ( self : List[str],_A : Tuple,_A : Tuple,_A : int=None ):
"""simple docstring"""
return {
"matthews_correlation": float(matthews_corrcoef(_A,_A,sample_weight=_A ) ),
}
| 216 | import enum
import os
from hashlib import shaaaa
from typing import Optional
from .. import config
from .logging import get_logger
__lowerCamelCase : str = get_logger(__name__)
class a__ ( enum.Enum ):
A = 'all_checks'
A = 'basic_checks'
A = 'no_checks'
class a__ ( A__ ):
pass
class a__ ( A__ ):
pass
class a__ ( A__ ):
pass
class a__ ( A__ ):
pass
def _snake_case ( lowerCAmelCase : Optional[dict] , lowerCAmelCase : dict , lowerCAmelCase : List[Any]=None ):
"""simple docstring"""
if expected_checksums is None:
logger.info("Unable to verify checksums." )
return
if len(set(lowerCAmelCase ) - set(lowerCAmelCase ) ) > 0:
raise ExpectedMoreDownloadedFiles(str(set(lowerCAmelCase ) - set(lowerCAmelCase ) ) )
if len(set(lowerCAmelCase ) - set(lowerCAmelCase ) ) > 0:
raise UnexpectedDownloadedFile(str(set(lowerCAmelCase ) - set(lowerCAmelCase ) ) )
SCREAMING_SNAKE_CASE_ : int = [url for url in expected_checksums if expected_checksums[url] != recorded_checksums[url]]
SCREAMING_SNAKE_CASE_ : List[str] = " for " + verification_name if verification_name is not None else ""
if len(lowerCAmelCase ) > 0:
raise NonMatchingChecksumError(
f'Checksums didn\'t match{for_verification_name}:\n'
f'{bad_urls}\n'
"Set `verification_mode='no_checks'` to skip checksums verification and ignore this error" )
logger.info("All the checksums matched successfully" + for_verification_name )
class a__ ( A__ ):
pass
class a__ ( A__ ):
pass
class a__ ( A__ ):
pass
class a__ ( A__ ):
pass
def _snake_case ( lowerCAmelCase : Optional[dict] , lowerCAmelCase : dict ):
"""simple docstring"""
if expected_splits is None:
logger.info("Unable to verify splits sizes." )
return
if len(set(lowerCAmelCase ) - set(lowerCAmelCase ) ) > 0:
raise ExpectedMoreSplits(str(set(lowerCAmelCase ) - set(lowerCAmelCase ) ) )
if len(set(lowerCAmelCase ) - set(lowerCAmelCase ) ) > 0:
raise UnexpectedSplits(str(set(lowerCAmelCase ) - set(lowerCAmelCase ) ) )
SCREAMING_SNAKE_CASE_ : Tuple = [
{"expected": expected_splits[name], "recorded": recorded_splits[name]}
for name in expected_splits
if expected_splits[name].num_examples != recorded_splits[name].num_examples
]
if len(lowerCAmelCase ) > 0:
raise NonMatchingSplitsSizesError(str(lowerCAmelCase ) )
logger.info("All the splits matched successfully." )
def _snake_case ( lowerCAmelCase : str , lowerCAmelCase : bool = True ):
"""simple docstring"""
if record_checksum:
SCREAMING_SNAKE_CASE_ : int = shaaaa()
with open(lowerCAmelCase , "rb" ) as f:
for chunk in iter(lambda: f.read(1 << 2_0 ) , B"" ):
m.update(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : List[str] = m.hexdigest()
else:
SCREAMING_SNAKE_CASE_ : Optional[Any] = None
return {"num_bytes": os.path.getsize(lowerCAmelCase ), "checksum": checksum}
def _snake_case ( lowerCAmelCase : str ):
"""simple docstring"""
if dataset_size and config.IN_MEMORY_MAX_SIZE:
return dataset_size < config.IN_MEMORY_MAX_SIZE
else:
return False
| 216 | 1 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import DebertaVaConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
TFDebertaVaModel,
)
class lowercase :
def __init__( self , lowercase , lowercase=13 , lowercase=7 , lowercase=True , lowercase=True , lowercase=True , lowercase=True , lowercase=99 , lowercase=32 , lowercase=2 , lowercase=4 , lowercase=37 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=512 , lowercase=16 , lowercase=2 , lowercase=0.02 , lowercase=False , lowercase=True , lowercase="None" , lowercase=3 , lowercase=4 , lowercase=None , ) -> List[str]:
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = seq_length
lowerCAmelCase = is_training
lowerCAmelCase = use_input_mask
lowerCAmelCase = use_token_type_ids
lowerCAmelCase = use_labels
lowerCAmelCase = vocab_size
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_act
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = type_vocab_size
lowerCAmelCase = type_sequence_label_size
lowerCAmelCase = initializer_range
lowerCAmelCase = num_labels
lowerCAmelCase = num_choices
lowerCAmelCase = relative_attention
lowerCAmelCase = position_biased_input
lowerCAmelCase = pos_att_type
lowerCAmelCase = scope
def _snake_case ( self ) -> str:
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase = None
if self.use_input_mask:
lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase = None
if self.use_token_type_ids:
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase = None
lowerCAmelCase = None
lowerCAmelCase = None
if self.use_labels:
lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase = DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , initializer_range=self.initializer_range , return_dict=lowercase , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _snake_case ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> Tuple:
lowerCAmelCase = TFDebertaVaModel(config=lowercase )
lowerCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
lowerCAmelCase = [input_ids, input_mask]
lowerCAmelCase = model(lowercase )
lowerCAmelCase = model(lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _snake_case ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> Optional[int]:
lowerCAmelCase = TFDebertaVaForMaskedLM(config=lowercase )
lowerCAmelCase = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
lowerCAmelCase = model(lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _snake_case ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> Dict:
lowerCAmelCase = self.num_labels
lowerCAmelCase = TFDebertaVaForSequenceClassification(config=lowercase )
lowerCAmelCase = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
lowerCAmelCase = model(lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _snake_case ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> List[Any]:
lowerCAmelCase = self.num_labels
lowerCAmelCase = TFDebertaVaForTokenClassification(config=lowercase )
lowerCAmelCase = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
lowerCAmelCase = model(lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _snake_case ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> Dict:
lowerCAmelCase = TFDebertaVaForQuestionAnswering(config=lowercase )
lowerCAmelCase = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
lowerCAmelCase = model(lowercase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _snake_case ( self ) -> Optional[int]:
lowerCAmelCase = self.prepare_config_and_inputs()
(
(
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) ,
) = config_and_inputs
lowerCAmelCase = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class lowercase ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
_SCREAMING_SNAKE_CASE = (
(
TFDebertaVaModel,
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
)
if is_tf_available()
else ()
)
_SCREAMING_SNAKE_CASE = (
{
'feature-extraction': TFDebertaVaModel,
'fill-mask': TFDebertaVaForMaskedLM,
'question-answering': TFDebertaVaForQuestionAnswering,
'text-classification': TFDebertaVaForSequenceClassification,
'token-classification': TFDebertaVaForTokenClassification,
'zero-shot': TFDebertaVaForSequenceClassification,
}
if is_tf_available()
else {}
)
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
def _snake_case ( self ) -> Optional[int]:
lowerCAmelCase = TFDebertaVaModelTester(self )
lowerCAmelCase = ConfigTester(self , config_class=lowercase , hidden_size=37 )
def _snake_case ( self ) -> List[str]:
self.config_tester.run_common_tests()
def _snake_case ( self ) -> Optional[int]:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase )
def _snake_case ( self ) -> Tuple:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowercase )
def _snake_case ( self ) -> Dict:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowercase )
def _snake_case ( self ) -> Dict:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowercase )
def _snake_case ( self ) -> List[Any]:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowercase )
@slow
def _snake_case ( self ) -> Optional[Any]:
lowerCAmelCase = TFDebertaVaModel.from_pretrained("""kamalkraj/deberta-v2-xlarge""" )
self.assertIsNotNone(lowercase )
@require_tf
class lowercase ( unittest.TestCase ):
@unittest.skip(reason="""Model not available yet""" )
def _snake_case ( self ) -> Tuple:
pass
@slow
def _snake_case ( self ) -> List[Any]:
lowerCAmelCase = TFDebertaVaModel.from_pretrained("""kamalkraj/deberta-v2-xlarge""" )
lowerCAmelCase = tf.constant([[0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2]] )
lowerCAmelCase = tf.constant([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
lowerCAmelCase = model(lowercase , attention_mask=lowercase )[0]
lowerCAmelCase = tf.constant(
[[[0.2_356, 0.1_948, 0.0_369], [-0.1_063, 0.3_586, -0.5_152], [-0.6_399, -0.0_259, -0.2_525]]] )
tf.debugging.assert_near(output[:, 1:4, 1:4] , lowercase , atol=1e-4 )
| 393 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaInpaintPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class lowercase ( _UpperCAmelCase , unittest.TestCase ):
_SCREAMING_SNAKE_CASE = KandinskyVaaInpaintPipeline
_SCREAMING_SNAKE_CASE = ['image_embeds', 'negative_image_embeds', 'image', 'mask_image']
_SCREAMING_SNAKE_CASE = [
'image_embeds',
'negative_image_embeds',
'image',
'mask_image',
]
_SCREAMING_SNAKE_CASE = [
'generator',
'height',
'width',
'latents',
'guidance_scale',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
_SCREAMING_SNAKE_CASE = False
@property
def _snake_case ( self ) -> Optional[int]:
return 32
@property
def _snake_case ( self ) -> Tuple:
return 32
@property
def _snake_case ( self ) -> Any:
return self.time_input_dim
@property
def _snake_case ( self ) -> Union[str, Any]:
return self.time_input_dim * 4
@property
def _snake_case ( self ) -> Union[str, Any]:
return 100
@property
def _snake_case ( self ) -> Tuple:
torch.manual_seed(0 )
lowerCAmelCase = {
"""in_channels""": 9,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """image""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
lowerCAmelCase = UNetaDConditionModel(**lowercase )
return model
@property
def _snake_case ( self ) -> Any:
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def _snake_case ( self ) -> List[str]:
torch.manual_seed(0 )
lowerCAmelCase = VQModel(**self.dummy_movq_kwargs )
return model
def _snake_case ( self ) -> Any:
lowerCAmelCase = self.dummy_unet
lowerCAmelCase = self.dummy_movq
lowerCAmelCase = DDIMScheduler(
num_train_timesteps=1_000 , beta_schedule="""linear""" , beta_start=0.00_085 , beta_end=0.012 , clip_sample=lowercase , set_alpha_to_one=lowercase , steps_offset=1 , prediction_type="""epsilon""" , thresholding=lowercase , )
lowerCAmelCase = {
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def _snake_case ( self , lowercase , lowercase=0 ) -> Dict:
lowerCAmelCase = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(lowercase ) ).to(lowercase )
lowerCAmelCase = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
lowercase )
# create init_image
lowerCAmelCase = floats_tensor((1, 3, 64, 64) , rng=random.Random(lowercase ) ).to(lowercase )
lowerCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCAmelCase = Image.fromarray(np.uinta(lowercase ) ).convert("""RGB""" ).resize((256, 256) )
# create mask
lowerCAmelCase = np.ones((64, 64) , dtype=np.floataa )
lowerCAmelCase = 0
if str(lowercase ).startswith("""mps""" ):
lowerCAmelCase = torch.manual_seed(lowercase )
else:
lowerCAmelCase = torch.Generator(device=lowercase ).manual_seed(lowercase )
lowerCAmelCase = {
"""image""": init_image,
"""mask_image""": mask,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""num_inference_steps""": 2,
"""guidance_scale""": 4.0,
"""output_type""": """np""",
}
return inputs
def _snake_case ( self ) -> Tuple:
lowerCAmelCase = """cpu"""
lowerCAmelCase = self.get_dummy_components()
lowerCAmelCase = self.pipeline_class(**lowercase )
lowerCAmelCase = pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
lowerCAmelCase = pipe(**self.get_dummy_inputs(lowercase ) )
lowerCAmelCase = output.images
lowerCAmelCase = pipe(
**self.get_dummy_inputs(lowercase ) , return_dict=lowercase , )[0]
lowerCAmelCase = image[0, -3:, -3:, -1]
lowerCAmelCase = image_from_tuple[0, -3:, -3:, -1]
print(f'image.shape {image.shape}' )
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase = np.array(
[0.50_775_903, 0.49_527_195, 0.48_824_543, 0.50_192_237, 0.48_644_906, 0.49_373_814, 0.4_780_598, 0.47_234_827, 0.48_327_848] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f' expected_slice {expected_slice}, but got {image_slice.flatten()}'
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'
def _snake_case ( self ) -> Optional[int]:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class lowercase ( unittest.TestCase ):
def _snake_case ( self ) -> Tuple:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _snake_case ( self ) -> Optional[int]:
lowerCAmelCase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/kandinskyv22_inpaint_cat_with_hat_fp16.npy""" )
lowerCAmelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" )
lowerCAmelCase = np.ones((768, 768) , dtype=np.floataa )
lowerCAmelCase = 0
lowerCAmelCase = """a hat"""
lowerCAmelCase = KandinskyVaaPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(lowercase )
lowerCAmelCase = KandinskyVaaInpaintPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-decoder-inpaint""" , torch_dtype=torch.floataa )
lowerCAmelCase = pipeline.to(lowercase )
pipeline.set_progress_bar_config(disable=lowercase )
lowerCAmelCase = torch.Generator(device="""cpu""" ).manual_seed(0 )
lowerCAmelCase , lowerCAmelCase = pipe_prior(
lowercase , generator=lowercase , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple()
lowerCAmelCase = pipeline(
image=lowercase , mask_image=lowercase , image_embeds=lowercase , negative_image_embeds=lowercase , generator=lowercase , num_inference_steps=100 , height=768 , width=768 , output_type="""np""" , )
lowerCAmelCase = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(lowercase , lowercase )
| 393 | 1 |
'''simple docstring'''
from numpy import exp, pi, sqrt
def a__ ( a__ , a__ = 0.0 , a__ = 1.0 ):
"""simple docstring"""
return 1 / sqrt(2 * pi * sigma**2 ) * exp(-((x - mu) ** 2) / (2 * sigma**2) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 627 |
'''simple docstring'''
import unittest
from transformers.models.xlm_prophetnet.tokenization_xlm_prophetnet import SPIECE_UNDERLINE, XLMProphetNetTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
UpperCAmelCase : int = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
class lowerCAmelCase__ ( a , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ = XLMProphetNetTokenizer
lowerCAmelCase__ = False
lowerCAmelCase__ = True
def UpperCAmelCase__ ( self : str ) -> Optional[int]:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
__SCREAMING_SNAKE_CASE = XLMProphetNetTokenizer(__SCREAMING_SNAKE_CASE , keep_accents=__SCREAMING_SNAKE_CASE )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCAmelCase__ ( self : Dict ) -> Tuple:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = """[PAD]"""
__SCREAMING_SNAKE_CASE = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """[PAD]""" )
self.assertEqual(vocab_keys[1] , """[CLS]""" )
self.assertEqual(vocab_keys[-1] , """j""" )
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , 1_012 )
def UpperCAmelCase__ ( self : Any ) -> List[str]:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1_012 )
def UpperCAmelCase__ ( self : int ) -> List[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = XLMProphetNetTokenizer(__SCREAMING_SNAKE_CASE , keep_accents=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(__SCREAMING_SNAKE_CASE , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
__SCREAMING_SNAKE_CASE = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
__SCREAMING_SNAKE_CASE , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
__SCREAMING_SNAKE_CASE = tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE )
self.assertListEqual(
__SCREAMING_SNAKE_CASE , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, -9, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, -9, 4]
] , )
__SCREAMING_SNAKE_CASE = tokenizer.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE )
self.assertListEqual(
__SCREAMING_SNAKE_CASE , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""[UNK]""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""[UNK]""",
""".""",
] , )
@cached_property
def UpperCAmelCase__ ( self : str ) -> str:
"""simple docstring"""
return XLMProphetNetTokenizer.from_pretrained("""microsoft/xprophetnet-large-wiki100-cased""" )
@slow
def UpperCAmelCase__ ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = """Hello World!"""
__SCREAMING_SNAKE_CASE = [35_389, 6_672, 49, 2]
self.assertListEqual(__SCREAMING_SNAKE_CASE , self.big_tokenizer.encode(__SCREAMING_SNAKE_CASE ) )
@slow
def UpperCAmelCase__ ( self : Dict ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = {"""input_ids""": [[11_073, 82_783, 18, 26, 82_783, 549, 51_540, 248, 17_209, 1_301, 217, 20, 215_186, 1_325, 147, 17_209, 1_301, 217, 20, 56_370, 53, 122_020, 20, 16_477, 27, 87_355, 4_548, 20, 4_728, 78_392, 17, 159_969, 18, 26, 24_491, 629, 15, 538, 22_704, 5_439, 15, 2_788, 24_491, 9_885, 15, 43_534, 605, 15, 814, 18_403, 33_200, 29, 15, 43_534, 24_458, 12_410, 111, 24_966, 83_669, 9_637, 144_068, 26, 850, 22_346, 27, 147, 24_966, 83_669, 83_490, 26, 39_113, 735, 27, 689, 656, 2_800, 1_339, 4_600, 53, 122_020, 115_785, 34, 816, 1_339, 46_887, 18, 147, 53_905, 1_951, 42_238, 41_170, 17_732, 834, 436, 15, 27_523, 98_733, 217, 147, 5_542, 4_981, 930, 17_347, 16, 2], [20_091, 629, 94, 82_786, 58, 490, 20, 1_528, 84, 53_905, 344, 80_592, 110_128, 18_822, 5_267, 1_306, 62, 152_537, 308, 7_997, 401, 124_427, 549, 35_442, 225, 109, 15_055, 25_748, 147, 7_119, 43_712, 34, 767, 135_366, 18, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [592, 63_784, 119_466, 17, 147_808, 88_214, 18, 656, 81, 32, 3_296, 10_280, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__SCREAMING_SNAKE_CASE , model_name="""microsoft/xprophetnet-large-wiki100-cased""" , revision="""1acad1643ddd54a44df6a1b797ada8373685d90e""" , )
| 627 | 1 |
'''simple docstring'''
import os
import random
import sys
from . import cryptomath_module as cryptoMath # noqa: N812
from . import rabin_miller as rabinMiller # noqa: N812
def _A ( ) -> None:
print("Making key files..." )
make_key_files("rsa" ,1_0_2_4 )
print("Key files generation successful." )
def _A ( A ) -> tuple[tuple[int, int], tuple[int, int]]:
print("Generating prime p..." )
lowercase : str = rabinMiller.generate_large_prime(A )
print("Generating prime q..." )
lowercase : Optional[Any] = rabinMiller.generate_large_prime(A )
lowercase : Optional[Any] = p * q
print("Generating e that is relatively prime to (p - 1) * (q - 1)..." )
while True:
lowercase : str = random.randrange(2 ** (key_size - 1) ,2 ** (key_size) )
if cryptoMath.gcd(A ,(p - 1) * (q - 1) ) == 1:
break
print("Calculating d that is mod inverse of e..." )
lowercase : Tuple = cryptoMath.find_mod_inverse(A ,(p - 1) * (q - 1) )
lowercase : Any = (n, e)
lowercase : List[Any] = (n, d)
return (public_key, private_key)
def _A ( A ,A ) -> None:
if os.path.exists(F'''{name}_pubkey.txt''' ) or os.path.exists(F'''{name}_privkey.txt''' ):
print("\nWARNING:" )
print(
F'''"{name}_pubkey.txt" or "{name}_privkey.txt" already exists. \n'''
"Use a different name or delete these files and re-run this program." )
sys.exit()
lowercase , lowercase : Any = generate_key(A )
print(F'''\nWriting public key to file {name}_pubkey.txt...''' )
with open(F'''{name}_pubkey.txt''' ,"w" ) as out_file:
out_file.write(F'''{key_size},{public_key[0]},{public_key[1]}''' )
print(F'''Writing private key to file {name}_privkey.txt...''' )
with open(F'''{name}_privkey.txt''' ,"w" ) as out_file:
out_file.write(F'''{key_size},{private_key[0]},{private_key[1]}''' )
if __name__ == "__main__":
main()
| 425 |
'''simple docstring'''
import random
import unittest
import torch
from diffusers import IFImgaImgSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class _UpperCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase):
'''simple docstring'''
_snake_case = IFImgaImgSuperResolutionPipeline
_snake_case = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''width''', '''height'''}
_snake_case = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'''original_image'''})
_snake_case = PipelineTesterMixin.required_optional_params - {'''latents'''}
def a__ ( self ) -> Optional[int]:
return self._get_superresolution_dummy_components()
def a__ ( self , a_ , a_=0 ) -> Union[str, Any]:
if str(a_ ).startswith("mps" ):
lowercase : Dict = torch.manual_seed(a_ )
else:
lowercase : Tuple = torch.Generator(device=a_ ).manual_seed(a_ )
lowercase : Dict = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(a_ ) ).to(a_ )
lowercase : str = floats_tensor((1, 3, 1_6, 1_6) , rng=random.Random(a_ ) ).to(a_ )
lowercase : Optional[Any] = {
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"original_image": original_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def a__ ( self ) -> str:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def a__ ( self ) -> List[Any]:
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != "cuda" , reason="float16 requires CUDA" )
def a__ ( self ) -> Dict:
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1e-1 )
def a__ ( self ) -> str:
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def a__ ( self ) -> List[Any]:
self._test_save_load_local()
def a__ ( self ) -> List[str]:
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
| 425 | 1 |
"""simple docstring"""
def __magic_name__ ( lowercase , lowercase ):
SCREAMING_SNAKE_CASE_: List[Any] =0
while b > 0:
if b & 1:
res += a
a += a
b >>= 1
return res
def __magic_name__ ( lowercase , lowercase , lowercase ):
SCREAMING_SNAKE_CASE_: Any =0
while b > 0:
if b & 1:
SCREAMING_SNAKE_CASE_: Union[str, Any] =((res % c) + (a % c)) % c
a += a
b >>= 1
return res
| 409 |
"""simple docstring"""
# Lint as: python3
import dataclasses
import re
from dataclasses import dataclass
from functools import total_ordering
from typing import Optional, Union
_UpperCAmelCase = re.compile(r"""^(?P<major>\d+)""" r"""\.(?P<minor>\d+)""" r"""\.(?P<patch>\d+)$""")
@total_ordering
@dataclass
class a :
UpperCamelCase : str
UpperCamelCase : Optional[str] = None
UpperCamelCase : Optional[Union[str, int]] = None
UpperCamelCase : Optional[Union[str, int]] = None
UpperCamelCase : Optional[Union[str, int]] = None
def lowerCamelCase__ ( self : Union[str, Any] ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Any =_str_to_version_tuple(self.version_str )
def __repr__( self : List[str] ) -> int:
'''simple docstring'''
return f'''{self.tuple[0]}.{self.tuple[1]}.{self.tuple[2]}'''
@property
def lowerCamelCase__ ( self : Union[str, Any] ) -> str:
'''simple docstring'''
return self.major, self.minor, self.patch
def lowerCamelCase__ ( self : Optional[Any] , lowerCAmelCase : Any ) -> Any:
'''simple docstring'''
if isinstance(lowerCAmelCase , lowerCAmelCase ):
return Version(lowerCAmelCase )
elif isinstance(lowerCAmelCase , lowerCAmelCase ):
return other
raise TypeError(f'''{other} (type {type(lowerCAmelCase )}) cannot be compared to version.''' )
def __eq__( self : Union[str, Any] , lowerCAmelCase : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
try:
SCREAMING_SNAKE_CASE_: Any =self._validate_operand(lowerCAmelCase )
except (TypeError, ValueError):
return False
else:
return self.tuple == other.tuple
def __lt__( self : Any , lowerCAmelCase : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[Any] =self._validate_operand(lowerCAmelCase )
return self.tuple < other.tuple
def __hash__( self : List[str] ) -> str:
'''simple docstring'''
return hash(_version_tuple_to_str(self.tuple ) )
@classmethod
def lowerCamelCase__ ( cls : Union[str, Any] , lowerCAmelCase : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: str ={f.name for f in dataclasses.fields(cls )}
return cls(**{k: v for k, v in dic.items() if k in field_names} )
def lowerCamelCase__ ( self : Dict ) -> str:
'''simple docstring'''
return self.version_str
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: Dict =_VERSION_REG.match(lowercase )
if not res:
raise ValueError(f'''Invalid version \'{version_str}\'. Format should be x.y.z with {{x,y,z}} being digits.''' )
return tuple(int(lowercase ) for v in [res.group("""major""" ), res.group("""minor""" ), res.group("""patch""" )] )
def __magic_name__ ( lowercase ):
return ".".join(str(lowercase ) for v in version_tuple )
| 409 | 1 |
"""simple docstring"""
import tempfile
import torch
from diffusers import IPNDMScheduler
from .test_schedulers import SchedulerCommonTest
class _SCREAMING_SNAKE_CASE ( A__ ):
UpperCAmelCase_ :Tuple = (IPNDMScheduler,)
UpperCAmelCase_ :Tuple = (("num_inference_steps", 50),)
def __lowerCAmelCase ( self , **__A ) -> Optional[int]:
lowerCAmelCase_ :Dict = {"""num_train_timesteps""": 1000}
config.update(**__A )
return config
def __lowerCAmelCase ( self , __A=0 , **__A ) -> Optional[Any]:
lowerCAmelCase_ :int = dict(self.forward_default_kwargs )
lowerCAmelCase_ :Optional[int] = kwargs.pop("""num_inference_steps""" , __A )
lowerCAmelCase_ :Optional[int] = self.dummy_sample
lowerCAmelCase_ :List[str] = 0.1 * sample
lowerCAmelCase_ :List[str] = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5]
for scheduler_class in self.scheduler_classes:
lowerCAmelCase_ :Optional[Any] = self.get_scheduler_config(**__A )
lowerCAmelCase_ :Any = scheduler_class(**__A )
scheduler.set_timesteps(__A )
# copy over dummy past residuals
lowerCAmelCase_ :Union[str, Any] = dummy_past_residuals[:]
if time_step is None:
lowerCAmelCase_ :Tuple = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__A )
lowerCAmelCase_ :Union[str, Any] = scheduler_class.from_pretrained(__A )
new_scheduler.set_timesteps(__A )
# copy over dummy past residuals
lowerCAmelCase_ :Union[str, Any] = dummy_past_residuals[:]
lowerCAmelCase_ :Tuple = scheduler.step(__A , __A , __A , **__A ).prev_sample
lowerCAmelCase_ :int = new_scheduler.step(__A , __A , __A , **__A ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
lowerCAmelCase_ :str = scheduler.step(__A , __A , __A , **__A ).prev_sample
lowerCAmelCase_ :Dict = new_scheduler.step(__A , __A , __A , **__A ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def __lowerCAmelCase ( self ) -> Dict:
pass
def __lowerCAmelCase ( self , __A=0 , **__A ) -> Dict:
lowerCAmelCase_ :Optional[Any] = dict(self.forward_default_kwargs )
lowerCAmelCase_ :int = kwargs.pop("""num_inference_steps""" , __A )
lowerCAmelCase_ :List[Any] = self.dummy_sample
lowerCAmelCase_ :Tuple = 0.1 * sample
lowerCAmelCase_ :int = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5]
for scheduler_class in self.scheduler_classes:
lowerCAmelCase_ :str = self.get_scheduler_config()
lowerCAmelCase_ :List[str] = scheduler_class(**__A )
scheduler.set_timesteps(__A )
# copy over dummy past residuals (must be after setting timesteps)
lowerCAmelCase_ :Any = dummy_past_residuals[:]
if time_step is None:
lowerCAmelCase_ :int = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__A )
lowerCAmelCase_ :Optional[Any] = scheduler_class.from_pretrained(__A )
# copy over dummy past residuals
new_scheduler.set_timesteps(__A )
# copy over dummy past residual (must be after setting timesteps)
lowerCAmelCase_ :List[Any] = dummy_past_residuals[:]
lowerCAmelCase_ :Optional[Any] = scheduler.step(__A , __A , __A , **__A ).prev_sample
lowerCAmelCase_ :str = new_scheduler.step(__A , __A , __A , **__A ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
lowerCAmelCase_ :List[Any] = scheduler.step(__A , __A , __A , **__A ).prev_sample
lowerCAmelCase_ :Dict = new_scheduler.step(__A , __A , __A , **__A ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def __lowerCAmelCase ( self , **__A ) -> List[str]:
lowerCAmelCase_ :List[str] = self.scheduler_classes[0]
lowerCAmelCase_ :Optional[Any] = self.get_scheduler_config(**__A )
lowerCAmelCase_ :List[str] = scheduler_class(**__A )
lowerCAmelCase_ :List[str] = 10
lowerCAmelCase_ :List[Any] = self.dummy_model()
lowerCAmelCase_ :Optional[Any] = self.dummy_sample_deter
scheduler.set_timesteps(__A )
for i, t in enumerate(scheduler.timesteps ):
lowerCAmelCase_ :Optional[int] = model(__A , __A )
lowerCAmelCase_ :str = scheduler.step(__A , __A , __A ).prev_sample
for i, t in enumerate(scheduler.timesteps ):
lowerCAmelCase_ :Optional[int] = model(__A , __A )
lowerCAmelCase_ :str = scheduler.step(__A , __A , __A ).prev_sample
return sample
def __lowerCAmelCase ( self ) -> List[Any]:
lowerCAmelCase_ :Dict = dict(self.forward_default_kwargs )
lowerCAmelCase_ :List[str] = kwargs.pop("""num_inference_steps""" , __A )
for scheduler_class in self.scheduler_classes:
lowerCAmelCase_ :List[Any] = self.get_scheduler_config()
lowerCAmelCase_ :List[str] = scheduler_class(**__A )
lowerCAmelCase_ :Optional[Any] = self.dummy_sample
lowerCAmelCase_ :str = 0.1 * sample
if num_inference_steps is not None and hasattr(__A , """set_timesteps""" ):
scheduler.set_timesteps(__A )
elif num_inference_steps is not None and not hasattr(__A , """set_timesteps""" ):
lowerCAmelCase_ :Dict = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
lowerCAmelCase_ :Optional[int] = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5]
lowerCAmelCase_ :Union[str, Any] = dummy_past_residuals[:]
lowerCAmelCase_ :Dict = scheduler.timesteps[5]
lowerCAmelCase_ :List[str] = scheduler.timesteps[6]
lowerCAmelCase_ :Tuple = scheduler.step(__A , __A , __A , **__A ).prev_sample
lowerCAmelCase_ :str = scheduler.step(__A , __A , __A , **__A ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
lowerCAmelCase_ :Tuple = scheduler.step(__A , __A , __A , **__A ).prev_sample
lowerCAmelCase_ :List[str] = scheduler.step(__A , __A , __A , **__A ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def __lowerCAmelCase ( self ) -> Tuple:
for timesteps in [100, 1000]:
self.check_over_configs(num_train_timesteps=__A , time_step=__A )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100] ):
self.check_over_forward(num_inference_steps=__A , time_step=__A )
def __lowerCAmelCase ( self ) -> Any:
lowerCAmelCase_ :Optional[Any] = self.full_loop()
lowerCAmelCase_ :str = torch.mean(torch.abs(__A ) )
assert abs(result_mean.item() - 254_0529 ) < 10
| 256 |
"""simple docstring"""
import math
def _snake_case ( ) -> None:
'''simple docstring'''
lowerCAmelCase_ :List[str] = input("""Enter message: """ )
lowerCAmelCase_ :Any = int(input(f"""Enter key [2-{len(lowercase__ ) - 1}]: """ ) )
lowerCAmelCase_ :str = input("""Encryption/Decryption [e/d]: """ )
if mode.lower().startswith("""e""" ):
lowerCAmelCase_ :int = encrypt_message(lowercase__ , lowercase__ )
elif mode.lower().startswith("""d""" ):
lowerCAmelCase_ :List[str] = decrypt_message(lowercase__ , lowercase__ )
# Append pipe symbol (vertical bar) to identify spaces at the end.
print(f"""Output:\n{text + "|"}""" )
def _snake_case ( lowercase__ : int , lowercase__ : str ) -> str:
'''simple docstring'''
lowerCAmelCase_ :int = [""""""] * key
for col in range(lowercase__ ):
lowerCAmelCase_ :str = col
while pointer < len(lowercase__ ):
cipher_text[col] += message[pointer]
pointer += key
return "".join(lowercase__ )
def _snake_case ( lowercase__ : int , lowercase__ : str ) -> str:
'''simple docstring'''
lowerCAmelCase_ :List[Any] = math.ceil(len(lowercase__ ) / key )
lowerCAmelCase_ :int = key
lowerCAmelCase_ :Tuple = (num_cols * num_rows) - len(lowercase__ )
lowerCAmelCase_ :Any = [""""""] * num_cols
lowerCAmelCase_ :Tuple = 0
lowerCAmelCase_ :Any = 0
for symbol in message:
plain_text[col] += symbol
col += 1
if (
(col == num_cols)
or (col == num_cols - 1)
and (row >= num_rows - num_shaded_boxes)
):
lowerCAmelCase_ :List[Any] = 0
row += 1
return "".join(lowercase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 256 | 1 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS, UNCONDITIONAL_AUDIO_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __magic_name__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
UpperCamelCase__ = DanceDiffusionPipeline
UpperCamelCase__ = UNCONDITIONAL_AUDIO_GENERATION_PARAMS
UpperCamelCase__ = PipelineTesterMixin.required_optional_params - {
'callback',
'latents',
'callback_steps',
'output_type',
'num_images_per_prompt',
}
UpperCamelCase__ = UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS
UpperCamelCase__ = False
UpperCamelCase__ = False
def _A( self ):
torch.manual_seed(0 )
lowercase =UNetaDModel(
block_out_channels=(32, 32, 64) , extra_in_channels=16 , sample_size=5_12 , sample_rate=1_60_00 , in_channels=2 , out_channels=2 , flip_sin_to_cos=snake_case_ , use_timestep_embedding=snake_case_ , time_embedding_type='''fourier''' , mid_block_type='''UNetMidBlock1D''' , down_block_types=('''DownBlock1DNoSkip''', '''DownBlock1D''', '''AttnDownBlock1D''') , up_block_types=('''AttnUpBlock1D''', '''UpBlock1D''', '''UpBlock1DNoSkip''') , )
lowercase =IPNDMScheduler()
lowercase ={
'''unet''': unet,
'''scheduler''': scheduler,
}
return components
def _A( self , snake_case_ , snake_case_=0 ):
if str(snake_case_ ).startswith('''mps''' ):
lowercase =torch.manual_seed(snake_case_ )
else:
lowercase =torch.Generator(device=snake_case_ ).manual_seed(snake_case_ )
lowercase ={
'''batch_size''': 1,
'''generator''': generator,
'''num_inference_steps''': 4,
}
return inputs
def _A( self ):
lowercase ='''cpu''' # ensure determinism for the device-dependent torch.Generator
lowercase =self.get_dummy_components()
lowercase =DanceDiffusionPipeline(**snake_case_ )
lowercase =pipe.to(snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
lowercase =self.get_dummy_inputs(snake_case_ )
lowercase =pipe(**snake_case_ )
lowercase =output.audios
lowercase =audio[0, -3:, -3:]
assert audio.shape == (1, 2, components["unet"].sample_size)
lowercase =np.array([-0.72_65, 1.00_00, -0.83_88, 0.11_75, 0.94_98, -1.00_00] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
@skip_mps
def _A( self ):
return super().test_save_load_local()
@skip_mps
def _A( self ):
return super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
@skip_mps
def _A( self ):
return super().test_save_load_optional_components()
@skip_mps
def _A( self ):
return super().test_attention_slicing_forward_pass()
def _A( self ):
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class __magic_name__ ( unittest.TestCase ):
def _A( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _A( self ):
lowercase =torch_device
lowercase =DanceDiffusionPipeline.from_pretrained('''harmonai/maestro-150k''' )
lowercase =pipe.to(snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
lowercase =torch.manual_seed(0 )
lowercase =pipe(generator=snake_case_ , num_inference_steps=1_00 , audio_length_in_s=4.0_96 )
lowercase =output.audios
lowercase =audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
lowercase =np.array([-0.01_92, -0.02_31, -0.03_18, -0.00_59, 0.00_02, -0.00_20] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
def _A( self ):
lowercase =torch_device
lowercase =DanceDiffusionPipeline.from_pretrained('''harmonai/maestro-150k''' , torch_dtype=torch.floataa )
lowercase =pipe.to(snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
lowercase =torch.manual_seed(0 )
lowercase =pipe(generator=snake_case_ , num_inference_steps=1_00 , audio_length_in_s=4.0_96 )
lowercase =output.audios
lowercase =audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
lowercase =np.array([-0.03_67, -0.04_88, -0.07_71, -0.05_25, -0.04_44, -0.03_41] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
| 72 |
from math import factorial
def a ( A__ = 2_0 ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple = 2 * n # middle entry of odd rows starting at row 3 is the solution for n = 1,
# 2, 3,...
SCREAMING_SNAKE_CASE__ : Dict = n // 2
return int(factorial(A__ ) / (factorial(A__ ) * factorial(n - k )) )
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution(20))
else:
try:
a_ :str = int(sys.argv[1])
print(solution(n))
except ValueError:
print('Invalid entry - please enter a number.')
| 35 | 0 |
import comet # From: unbabel-comet
import torch
import datasets
lowercase : Any = datasets.logging.get_logger(__name__)
lowercase : Tuple = '''\
@inproceedings{rei-EtAl:2020:WMT,
author = {Rei, Ricardo and Stewart, Craig and Farinha, Ana C and Lavie, Alon},
title = {Unbabel\'s Participation in the WMT20 Metrics Shared Task},
booktitle = {Proceedings of the Fifth Conference on Machine Translation},
month = {November},
year = {2020},
address = {Online},
publisher = {Association for Computational Linguistics},
pages = {909--918},
}
@inproceedings{rei-etal-2020-comet,
title = "{COMET}: A Neural Framework for {MT} Evaluation",
author = "Rei, Ricardo and
Stewart, Craig and
Farinha, Ana C and
Lavie, Alon",
booktitle = "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)",
month = nov,
year = "2020",
address = "Online",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/2020.emnlp-main.213",
pages = "2685--2702",
}
'''
lowercase : int = '''\
Crosslingual Optimized Metric for Evaluation of Translation (COMET) is an open-source framework used to train Machine Translation metrics that achieve high levels of correlation with different types of human judgments (HTER, DA\'s or MQM).
With the release of the framework the authors also released fully trained models that were used to compete in the WMT20 Metrics Shared Task achieving SOTA in that years competition.
See the [README.md] file at https://unbabel.github.io/COMET/html/models.html for more information.
'''
lowercase : str = '''
COMET score.
Args:
`sources` (list of str): Source sentences
`predictions` (list of str): candidate translations
`references` (list of str): reference translations
`cuda` (bool): If set to True, runs COMET using GPU
`show_progress` (bool): Shows progress
`model`: COMET model to be used. Will default to `wmt-large-da-estimator-1719` if None.
Returns:
`samples`: List of dictionaries with `src`, `mt`, `ref` and `score`.
`scores`: List of scores.
Examples:
>>> comet_metric = datasets.load_metric(\'comet\')
>>> # comet_metric = load_metric(\'comet\', \'wmt20-comet-da\') # you can also choose which model to use
>>> source = ["Dem Feuer konnte Einhalt geboten werden", "Schulen und Kindergärten wurden eröffnet."]
>>> hypothesis = ["The fire could be stopped", "Schools and kindergartens were open"]
>>> reference = ["They were able to control the fire.", "Schools and kindergartens opened"]
>>> results = comet_metric.compute(predictions=hypothesis, references=reference, sources=source)
>>> print([round(v, 2) for v in results["scores"]])
[0.19, 0.92]
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase_ ( datasets.Metric ):
'''simple docstring'''
def _lowerCAmelCase ( self ) -> List[str]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="https://unbabel.github.io/COMET/html/index.html" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"sources": datasets.Value("string" , id="sequence" ),
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , codebase_urls=["https://github.com/Unbabel/COMET"] , reference_urls=[
"https://github.com/Unbabel/COMET",
"https://www.aclweb.org/anthology/2020.emnlp-main.213/",
"http://www.statmt.org/wmt20/pdf/2020.wmt-1.101.pdf6",
] , )
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE ) -> str:
if self.config_name == "default":
snake_case_ : List[str] = comet.load_from_checkpoint(comet.download_model("wmt20-comet-da" ) )
else:
snake_case_ : str = comet.load_from_checkpoint(comet.download_model(self.config_name ) )
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=False ) -> Tuple:
if gpus is None:
snake_case_ : Dict = 1 if torch.cuda.is_available() else 0
snake_case_ : Union[str, Any] = {"src": sources, "mt": predictions, "ref": references}
snake_case_ : str = [dict(zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) for t in zip(*data.values() )]
snake_case_ , snake_case_ : Union[str, Any] = self.scorer.predict(_SCREAMING_SNAKE_CASE , gpus=_SCREAMING_SNAKE_CASE , progress_bar=_SCREAMING_SNAKE_CASE )
return {"mean_score": mean_score, "scores": scores}
| 114 |
from __future__ import annotations
import inspect
import unittest
from typing import List, Tuple
from transformers import RegNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFRegNetForImageClassification, TFRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=10 , _SCREAMING_SNAKE_CASE=[10, 20, 30, 40] , _SCREAMING_SNAKE_CASE=[1, 1, 2, 1] , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE="relu" , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=None , ) -> List[str]:
snake_case_ : str = parent
snake_case_ : Optional[int] = batch_size
snake_case_ : Dict = image_size
snake_case_ : Tuple = num_channels
snake_case_ : Union[str, Any] = embeddings_size
snake_case_ : int = hidden_sizes
snake_case_ : Optional[int] = depths
snake_case_ : Dict = is_training
snake_case_ : Tuple = use_labels
snake_case_ : int = hidden_act
snake_case_ : List[str] = num_labels
snake_case_ : List[Any] = scope
snake_case_ : Union[str, Any] = len(_SCREAMING_SNAKE_CASE )
def _lowerCAmelCase ( self ) -> Dict:
snake_case_ : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case_ : Optional[Any] = None
if self.use_labels:
snake_case_ : Any = ids_tensor([self.batch_size] , self.num_labels )
snake_case_ : Optional[int] = self.get_config()
return config, pixel_values, labels
def _lowerCAmelCase ( self ) -> Optional[int]:
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , )
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
snake_case_ : Union[str, Any] = TFRegNetModel(config=_SCREAMING_SNAKE_CASE )
snake_case_ : List[str] = model(_SCREAMING_SNAKE_CASE , training=_SCREAMING_SNAKE_CASE )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple:
snake_case_ : Optional[int] = self.num_labels
snake_case_ : Tuple = TFRegNetForImageClassification(_SCREAMING_SNAKE_CASE )
snake_case_ : List[str] = model(_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE , training=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowerCAmelCase ( self ) -> Union[str, Any]:
snake_case_ : Optional[Any] = self.prepare_config_and_inputs()
snake_case_ , snake_case_ , snake_case_ : Union[str, Any] = config_and_inputs
snake_case_ : Any = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class UpperCAmelCase_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
'''simple docstring'''
A : Optional[int] = (TFRegNetModel, TFRegNetForImageClassification) if is_tf_available() else ()
A : Dict = (
{'feature-extraction': TFRegNetModel, 'image-classification': TFRegNetForImageClassification}
if is_tf_available()
else {}
)
A : List[Any] = False
A : List[str] = False
A : Optional[Any] = False
A : List[Any] = False
A : List[Any] = False
def _lowerCAmelCase ( self ) -> Any:
snake_case_ : List[Any] = TFRegNetModelTester(self )
snake_case_ : Any = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , has_text_modality=_SCREAMING_SNAKE_CASE )
def _lowerCAmelCase ( self ) -> Dict:
return
@unittest.skip(reason="RegNet does not use inputs_embeds" )
def _lowerCAmelCase ( self ) -> Dict:
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices("GPU" ) ) == 0 , reason="TF does not support backprop for grouped convolutions on CPU." , )
@slow
def _lowerCAmelCase ( self ) -> Any:
super().test_keras_fit()
@unittest.skip(reason="RegNet does not support input and output embeddings" )
def _lowerCAmelCase ( self ) -> str:
pass
def _lowerCAmelCase ( self ) -> int:
snake_case_ , snake_case_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ : int = model_class(_SCREAMING_SNAKE_CASE )
snake_case_ : str = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case_ : List[str] = [*signature.parameters.keys()]
snake_case_ : Dict = ["pixel_values"]
self.assertListEqual(arg_names[:1] , _SCREAMING_SNAKE_CASE )
def _lowerCAmelCase ( self ) -> Union[str, Any]:
snake_case_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE )
def _lowerCAmelCase ( self ) -> Tuple:
def check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
snake_case_ : List[str] = model_class(_SCREAMING_SNAKE_CASE )
snake_case_ : Tuple = model(**self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , training=_SCREAMING_SNAKE_CASE )
snake_case_ : Union[str, Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
snake_case_ : Any = self.model_tester.num_stages
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , expected_num_stages + 1 )
# RegNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , )
snake_case_ , snake_case_ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ : Optional[int] = ["basic", "bottleneck"]
for model_class in self.all_model_classes:
for layer_type in layers_type:
snake_case_ : Any = layer_type
snake_case_ : int = True
check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case_ : Any = True
check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def _lowerCAmelCase ( self ) -> Optional[Any]:
snake_case_ , snake_case_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
def check_equivalence(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE={} ):
snake_case_ : List[Any] = model(_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
snake_case_ : List[Any] = model(_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ).to_tuple()
def recursive_check(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
if isinstance(_SCREAMING_SNAKE_CASE , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
recursive_check(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif tuple_object is None:
return
else:
self.assertTrue(
all(tf.equal(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) , msg=(
"Tuple and dict output are not equal. Difference:"
f''' {tf.math.reduce_max(tf.abs(tuple_object - dict_object ) )}'''
) , )
recursive_check(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
for model_class in self.all_model_classes:
snake_case_ : str = model_class(_SCREAMING_SNAKE_CASE )
snake_case_ : List[Any] = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
snake_case_ : Optional[Any] = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
check_equivalence(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
snake_case_ : Union[str, Any] = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , return_labels=_SCREAMING_SNAKE_CASE )
snake_case_ : Union[str, Any] = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , return_labels=_SCREAMING_SNAKE_CASE )
check_equivalence(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
snake_case_ : Optional[Any] = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
snake_case_ : Union[str, Any] = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
check_equivalence(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , {"output_hidden_states": True} )
snake_case_ : List[Any] = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , return_labels=_SCREAMING_SNAKE_CASE )
snake_case_ : Dict = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , return_labels=_SCREAMING_SNAKE_CASE )
check_equivalence(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , {"output_hidden_states": True} )
def _lowerCAmelCase ( self ) -> List[str]:
snake_case_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_SCREAMING_SNAKE_CASE )
@slow
def _lowerCAmelCase ( self ) -> Any:
for model_name in TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ : Dict = TFRegNetModel.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
def lowerCAmelCase__ ( ):
snake_case_ : int = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
@require_vision
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _lowerCAmelCase ( self ) -> List[Any]:
return (
AutoImageProcessor.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def _lowerCAmelCase ( self ) -> Dict:
snake_case_ : str = TFRegNetForImageClassification.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
snake_case_ : int = self.default_image_processor
snake_case_ : List[Any] = prepare_img()
snake_case_ : str = image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors="tf" )
# forward pass
snake_case_ : Any = model(**_SCREAMING_SNAKE_CASE , training=_SCREAMING_SNAKE_CASE )
# verify the logits
snake_case_ : List[str] = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , _SCREAMING_SNAKE_CASE )
snake_case_ : Optional[int] = tf.constant([-0.4180, -1.5051, -3.4836] )
tf.debugging.assert_near(outputs.logits[0, :3] , _SCREAMING_SNAKE_CASE , atol=1e-4 )
| 114 | 1 |
'''simple docstring'''
import random
def _UpperCAmelCase ( __A : int , __A : float , __A : bool = False ):
a_ : dict = {i: [] for i in range(__A )}
# if probability is greater or equal than 1, then generate a complete graph
if probability >= 1:
return complete_graph(__A )
# if probability is lower or equal than 0, then return a graph without edges
if probability <= 0:
return graph
# for each couple of nodes, add an edge from u to v
# if the number randomly generated is greater than probability probability
for i in range(__A ):
for j in range(i + 1 , __A ):
if random.random() < probability:
graph[i].append(__A )
if not directed:
# if the graph is undirected, add an edge in from j to i, either
graph[j].append(__A )
return graph
def _UpperCAmelCase ( __A : int ):
return {
i: [j for j in range(__A ) if i != j] for i in range(__A )
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 466 |
'''simple docstring'''
from __future__ import annotations
def _UpperCAmelCase ( __A : str , __A : str ):
a_ : int = get_failure_array(__A )
# 2) Step through text searching for pattern
a_ , a_ : Any = 0, 0 # index into text, pattern
while i < len(__A ):
if pattern[j] == text[i]:
if j == (len(__A ) - 1):
return True
j += 1
# if this is a prefix in our pattern
# just go back far enough to continue
elif j > 0:
a_ : Any = failure[j - 1]
continue
i += 1
return False
def _UpperCAmelCase ( __A : str ):
a_ : Optional[Any] = [0]
a_ : Any = 0
a_ : int = 1
while j < len(__A ):
if pattern[i] == pattern[j]:
i += 1
elif i > 0:
a_ : List[Any] = failure[i - 1]
continue
j += 1
failure.append(__A )
return failure
if __name__ == "__main__":
# Test 1)
__lowerCAmelCase = 'abc1abc12'
__lowerCAmelCase = 'alskfjaldsabc1abc1abc12k23adsfabcabc'
__lowerCAmelCase = 'alskfjaldsk23adsfabcabc'
assert kmp(pattern, texta) and not kmp(pattern, texta)
# Test 2)
__lowerCAmelCase = 'ABABX'
__lowerCAmelCase = 'ABABZABABYABABX'
assert kmp(pattern, text)
# Test 3)
__lowerCAmelCase = 'AAAB'
__lowerCAmelCase = 'ABAAAAAB'
assert kmp(pattern, text)
# Test 4)
__lowerCAmelCase = 'abcdabcy'
__lowerCAmelCase = 'abcxabcdabxabcdabcdabcy'
assert kmp(pattern, text)
# Test 5)
__lowerCAmelCase = 'aabaabaaa'
assert get_failure_array(pattern) == [0, 1, 0, 1, 2, 3, 4, 5, 2]
| 466 | 1 |
def lowerCamelCase_ ( UpperCamelCase__ : str, UpperCamelCase__ : str ):
'''simple docstring'''
UpperCamelCase__ = len(UpperCamelCase__ )
UpperCamelCase__ = len(UpperCamelCase__ )
UpperCamelCase__ = (
first_str_length if first_str_length > second_str_length else second_str_length
)
UpperCamelCase__ = []
for char_count in range(UpperCamelCase__ ):
if char_count < first_str_length:
output_list.append(first_str[char_count] )
if char_count < second_str_length:
output_list.append(second_str[char_count] )
return "".join(UpperCamelCase__ )
if __name__ == "__main__":
print(alternative_string_arrange("""AB""", """XYZ"""), end=""" """)
| 710 | from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_speech_available, is_torch_available
lowercase = {
"""configuration_audio_spectrogram_transformer""": [
"""AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""ASTConfig""",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = [
"""AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ASTForAudioClassification""",
"""ASTModel""",
"""ASTPreTrainedModel""",
]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = ["""ASTFeatureExtractor"""]
if TYPE_CHECKING:
from .configuration_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
ASTConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ASTForAudioClassification,
ASTModel,
ASTPreTrainedModel,
)
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_audio_spectrogram_transformer import ASTFeatureExtractor
else:
import sys
lowercase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 591 | 0 |
'''simple docstring'''
from .testing import (
are_the_same_tensors,
execute_subprocess_async,
require_bnb,
require_cpu,
require_cuda,
require_huggingface_suite,
require_mps,
require_multi_gpu,
require_multi_xpu,
require_safetensors,
require_single_gpu,
require_single_xpu,
require_torch_min_version,
require_tpu,
require_xpu,
skip,
slow,
)
from .training import RegressionDataset, RegressionModel, RegressionModelaXPU
from .scripts import test_script, test_sync, test_ops # isort: skip
| 72 |
'''simple docstring'''
import absl # noqa: F401 # Here to have a nice missing dependency error message early on
import nltk # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import six # noqa: F401 # Here to have a nice missing dependency error message early on
from rouge_score import rouge_scorer, scoring
import datasets
_UpperCAmelCase : Dict = '''\
@inproceedings{lin-2004-rouge,
title = "{ROUGE}: A Package for Automatic Evaluation of Summaries",
author = "Lin, Chin-Yew",
booktitle = "Text Summarization Branches Out",
month = jul,
year = "2004",
address = "Barcelona, Spain",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/W04-1013",
pages = "74--81",
}
'''
_UpperCAmelCase : Union[str, Any] = '''\
ROUGE, or Recall-Oriented Understudy for Gisting Evaluation, is a set of metrics and a software package used for
evaluating automatic summarization and machine translation software in natural language processing.
The metrics compare an automatically produced summary or translation against a reference or a set of references (human-produced) summary or translation.
Note that ROUGE is case insensitive, meaning that upper case letters are treated the same way as lower case letters.
This metrics is a wrapper around Google Research reimplementation of ROUGE:
https://github.com/google-research/google-research/tree/master/rouge
'''
_UpperCAmelCase : Dict = '''
Calculates average rouge scores for a list of hypotheses and references
Args:
predictions: list of predictions to score. Each prediction
should be a string with tokens separated by spaces.
references: list of reference for each prediction. Each
reference should be a string with tokens separated by spaces.
rouge_types: A list of rouge types to calculate.
Valid names:
`"rouge{n}"` (e.g. `"rouge1"`, `"rouge2"`) where: {n} is the n-gram based scoring,
`"rougeL"`: Longest common subsequence based scoring.
`"rougeLSum"`: rougeLsum splits text using `"\n"`.
See details in https://github.com/huggingface/datasets/issues/617
use_stemmer: Bool indicating whether Porter stemmer should be used to strip word suffixes.
use_aggregator: Return aggregates if this is set to True
Returns:
rouge1: rouge_1 (precision, recall, f1),
rouge2: rouge_2 (precision, recall, f1),
rougeL: rouge_l (precision, recall, f1),
rougeLsum: rouge_lsum (precision, recall, f1)
Examples:
>>> rouge = datasets.load_metric(\'rouge\')
>>> predictions = ["hello there", "general kenobi"]
>>> references = ["hello there", "general kenobi"]
>>> results = rouge.compute(predictions=predictions, references=references)
>>> print(list(results.keys()))
[\'rouge1\', \'rouge2\', \'rougeL\', \'rougeLsum\']
>>> print(results["rouge1"])
AggregateScore(low=Score(precision=1.0, recall=1.0, fmeasure=1.0), mid=Score(precision=1.0, recall=1.0, fmeasure=1.0), high=Score(precision=1.0, recall=1.0, fmeasure=1.0))
>>> print(results["rouge1"].mid.fmeasure)
1.0
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __magic_name__ ( datasets.Metric ):
def _A( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/google-research/google-research/tree/master/rouge'''] , reference_urls=[
'''https://en.wikipedia.org/wiki/ROUGE_(metric)''',
'''https://github.com/google-research/google-research/tree/master/rouge''',
] , )
def _A( self , snake_case_ , snake_case_ , snake_case_=None , snake_case_=True , snake_case_=False ):
if rouge_types is None:
lowercase =['''rouge1''', '''rouge2''', '''rougeL''', '''rougeLsum''']
lowercase =rouge_scorer.RougeScorer(rouge_types=snake_case_ , use_stemmer=snake_case_ )
if use_aggregator:
lowercase =scoring.BootstrapAggregator()
else:
lowercase =[]
for ref, pred in zip(snake_case_ , snake_case_ ):
lowercase =scorer.score(snake_case_ , snake_case_ )
if use_aggregator:
aggregator.add_scores(snake_case_ )
else:
scores.append(snake_case_ )
if use_aggregator:
lowercase =aggregator.aggregate()
else:
lowercase ={}
for key in scores[0]:
lowercase =[score[key] for score in scores]
return result
| 72 | 1 |
'''simple docstring'''
import argparse
import copy
def snake_case__ ( _A: int ) -> Dict:
'''simple docstring'''
lowerCAmelCase = {}
with open(_A ) as f:
for line in f:
if line.split()[0] not in dict_of_neighbours:
lowerCAmelCase = []
_list.append([line.split()[1], line.split()[2]] )
lowerCAmelCase = _list
else:
dict_of_neighbours[line.split()[0]].append(
[line.split()[1], line.split()[2]] )
if line.split()[1] not in dict_of_neighbours:
lowerCAmelCase = []
_list.append([line.split()[0], line.split()[2]] )
lowerCAmelCase = _list
else:
dict_of_neighbours[line.split()[1]].append(
[line.split()[0], line.split()[2]] )
return dict_of_neighbours
def snake_case__ ( _A: int , _A: Dict ) -> List[str]:
'''simple docstring'''
with open(_A ) as f:
lowerCAmelCase = f.read(1 )
lowerCAmelCase = start_node
lowerCAmelCase = []
lowerCAmelCase = start_node
lowerCAmelCase = 0
while visiting not in first_solution:
lowerCAmelCase = 10000
for k in dict_of_neighbours[visiting]:
if int(k[1] ) < int(_A ) and k[0] not in first_solution:
lowerCAmelCase = k[1]
lowerCAmelCase = k[0]
first_solution.append(_A )
lowerCAmelCase = distance_of_first_solution + int(_A )
lowerCAmelCase = best_node
first_solution.append(_A )
lowerCAmelCase = 0
for k in dict_of_neighbours[first_solution[-2]]:
if k[0] == start_node:
break
position += 1
lowerCAmelCase = (
distance_of_first_solution
+ int(dict_of_neighbours[first_solution[-2]][position][1] )
- 10000
)
return first_solution, distance_of_first_solution
def snake_case__ ( _A: List[Any] , _A: List[Any] ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase = []
for n in solution[1:-1]:
lowerCAmelCase = solution.index(_A )
for kn in solution[1:-1]:
lowerCAmelCase = solution.index(_A )
if n == kn:
continue
lowerCAmelCase = copy.deepcopy(_A )
lowerCAmelCase = kn
lowerCAmelCase = n
lowerCAmelCase = 0
for k in _tmp[:-1]:
lowerCAmelCase = _tmp[_tmp.index(_A ) + 1]
for i in dict_of_neighbours[k]:
if i[0] == next_node:
lowerCAmelCase = distance + int(i[1] )
_tmp.append(_A )
if _tmp not in neighborhood_of_solution:
neighborhood_of_solution.append(_tmp )
lowerCAmelCase = len(neighborhood_of_solution[0] ) - 1
neighborhood_of_solution.sort(key=lambda _A : x[index_of_last_item_in_the_list] )
return neighborhood_of_solution
def snake_case__ ( _A: int , _A: List[Any] , _A: Optional[int] , _A: Optional[int] , _A: Optional[int] ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase = 1
lowerCAmelCase = first_solution
lowerCAmelCase = []
lowerCAmelCase = distance_of_first_solution
lowerCAmelCase = solution
while count <= iters:
lowerCAmelCase = find_neighborhood(_A , _A )
lowerCAmelCase = 0
lowerCAmelCase = neighborhood[index_of_best_solution]
lowerCAmelCase = len(_A ) - 1
lowerCAmelCase = False
while not found:
lowerCAmelCase = 0
while i < len(_A ):
if best_solution[i] != solution[i]:
lowerCAmelCase = best_solution[i]
lowerCAmelCase = solution[i]
break
lowerCAmelCase = i + 1
if [first_exchange_node, second_exchange_node] not in tabu_list and [
second_exchange_node,
first_exchange_node,
] not in tabu_list:
tabu_list.append([first_exchange_node, second_exchange_node] )
lowerCAmelCase = True
lowerCAmelCase = best_solution[:-1]
lowerCAmelCase = neighborhood[index_of_best_solution][best_cost_index]
if cost < best_cost:
lowerCAmelCase = cost
lowerCAmelCase = solution
else:
lowerCAmelCase = index_of_best_solution + 1
lowerCAmelCase = neighborhood[index_of_best_solution]
if len(_A ) >= size:
tabu_list.pop(0 )
lowerCAmelCase = count + 1
return best_solution_ever, best_cost
def snake_case__ ( _A: Union[str, Any]=None ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase = generate_neighbours(args.File )
lowerCAmelCase , lowerCAmelCase = generate_first_solution(
args.File , _A )
lowerCAmelCase , lowerCAmelCase = tabu_search(
_A , _A , _A , args.Iterations , args.Size , )
print(f"Best solution: {best_sol}, with total distance: {best_cost}." )
if __name__ == "__main__":
__lowercase = argparse.ArgumentParser(description='''Tabu Search''')
parser.add_argument(
'''-f''',
'''--File''',
type=str,
help='''Path to the file containing the data''',
required=True,
)
parser.add_argument(
'''-i''',
'''--Iterations''',
type=int,
help='''How many iterations the algorithm should perform''',
required=True,
)
parser.add_argument(
'''-s''', '''--Size''', type=int, help='''Size of the tabu list''', required=True
)
# Pass the arguments to main method
main(parser.parse_args())
| 605 | '''simple docstring'''
import argparse
import json
from collections import OrderedDict
import torch
from huggingface_hub import cached_download, hf_hub_url
from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification
def snake_case__ ( _A: Dict ) -> str:
'''simple docstring'''
lowerCAmelCase = []
embed.append(
(
f"cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight",
f"stage{idx}.patch_embed.proj.weight",
) )
embed.append(
(
f"cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias",
f"stage{idx}.patch_embed.proj.bias",
) )
embed.append(
(
f"cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight",
f"stage{idx}.patch_embed.norm.weight",
) )
embed.append(
(
f"cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias",
f"stage{idx}.patch_embed.norm.bias",
) )
return embed
def snake_case__ ( _A: Dict , _A: Any ) -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase = []
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight",
f"stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight",
f"stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias",
f"stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean",
f"stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var",
f"stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked",
f"stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight",
f"stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight",
f"stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias",
f"stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean",
f"stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var",
f"stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked",
f"stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight",
f"stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight",
f"stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias",
f"stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean",
f"stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var",
f"stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked",
f"stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight",
f"stage{idx}.blocks.{cnt}.attn.proj_q.weight",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias",
f"stage{idx}.blocks.{cnt}.attn.proj_q.bias",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight",
f"stage{idx}.blocks.{cnt}.attn.proj_k.weight",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias",
f"stage{idx}.blocks.{cnt}.attn.proj_k.bias",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight",
f"stage{idx}.blocks.{cnt}.attn.proj_v.weight",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias",
f"stage{idx}.blocks.{cnt}.attn.proj_v.bias",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight",
f"stage{idx}.blocks.{cnt}.attn.proj.weight",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias",
f"stage{idx}.blocks.{cnt}.attn.proj.bias",
) )
attention_weights.append(
(f"cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight", f"stage{idx}.blocks.{cnt}.mlp.fc1.weight") )
attention_weights.append(
(f"cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias", f"stage{idx}.blocks.{cnt}.mlp.fc1.bias") )
attention_weights.append(
(f"cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight", f"stage{idx}.blocks.{cnt}.mlp.fc2.weight") )
attention_weights.append(
(f"cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias", f"stage{idx}.blocks.{cnt}.mlp.fc2.bias") )
attention_weights.append(
(f"cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight", f"stage{idx}.blocks.{cnt}.norm1.weight") )
attention_weights.append(
(f"cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias", f"stage{idx}.blocks.{cnt}.norm1.bias") )
attention_weights.append(
(f"cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight", f"stage{idx}.blocks.{cnt}.norm2.weight") )
attention_weights.append(
(f"cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias", f"stage{idx}.blocks.{cnt}.norm2.bias") )
return attention_weights
def snake_case__ ( _A: Dict ) -> Dict:
'''simple docstring'''
lowerCAmelCase = []
token.append((f"cvt.encoder.stages.{idx}.cls_token", """stage2.cls_token""") )
return token
def snake_case__ ( ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase = []
head.append(("""layernorm.weight""", """norm.weight""") )
head.append(("""layernorm.bias""", """norm.bias""") )
head.append(("""classifier.weight""", """head.weight""") )
head.append(("""classifier.bias""", """head.bias""") )
return head
def snake_case__ ( _A: Union[str, Any] , _A: str , _A: List[Any] , _A: List[Any] ) -> List[str]:
'''simple docstring'''
lowerCAmelCase = """imagenet-1k-id2label.json"""
lowerCAmelCase = 1000
lowerCAmelCase = """huggingface/label-files"""
lowerCAmelCase = num_labels
lowerCAmelCase = json.load(open(cached_download(hf_hub_url(_A , _A , repo_type="""dataset""" ) ) , """r""" ) )
lowerCAmelCase = {int(_A ): v for k, v in idalabel.items()}
lowerCAmelCase = idalabel
lowerCAmelCase = {v: k for k, v in idalabel.items()}
lowerCAmelCase = lowerCAmelCase = CvtConfig(num_labels=_A , idalabel=_A , labelaid=_A )
# For depth size 13 (13 = 1+2+10)
if cvt_model.rsplit("""/""" , 1 )[-1][4:6] == "13":
lowerCAmelCase = [1, 2, 10]
# For depth size 21 (21 = 1+4+16)
elif cvt_model.rsplit("""/""" , 1 )[-1][4:6] == "21":
lowerCAmelCase = [1, 4, 16]
# For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20)
else:
lowerCAmelCase = [2, 2, 20]
lowerCAmelCase = [3, 12, 16]
lowerCAmelCase = [192, 768, 1024]
lowerCAmelCase = CvtForImageClassification(_A )
lowerCAmelCase = AutoImageProcessor.from_pretrained("""facebook/convnext-base-224-22k-1k""" )
lowerCAmelCase = image_size
lowerCAmelCase = torch.load(_A , map_location=torch.device("""cpu""" ) )
lowerCAmelCase = OrderedDict()
lowerCAmelCase = []
for idx in range(len(config.depth ) ):
if config.cls_token[idx]:
lowerCAmelCase = list_of_state_dict + cls_token(_A )
lowerCAmelCase = list_of_state_dict + embeddings(_A )
for cnt in range(config.depth[idx] ):
lowerCAmelCase = list_of_state_dict + attention(_A , _A )
lowerCAmelCase = list_of_state_dict + final()
for gg in list_of_state_dict:
print(_A )
for i in range(len(_A ) ):
lowerCAmelCase = original_weights[list_of_state_dict[i][1]]
model.load_state_dict(_A )
model.save_pretrained(_A )
image_processor.save_pretrained(_A )
# Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al
if __name__ == "__main__":
__lowercase = argparse.ArgumentParser()
parser.add_argument(
'''--cvt_model''',
default='''cvt-w24''',
type=str,
help='''Name of the cvt model you\'d like to convert.''',
)
parser.add_argument(
'''--image_size''',
default=3_8_4,
type=int,
help='''Input Image Size''',
)
parser.add_argument(
'''--cvt_file_name''',
default=R'''cvtmodels\CvT-w24-384x384-IN-22k.pth''',
type=str,
help='''Input Image Size''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
__lowercase = parser.parse_args()
convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
| 605 | 1 |
"""simple docstring"""
from __future__ import annotations
import time
__A = list[tuple[int, int]]
__A = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
__A = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :Optional[Any] = pos_x
lowerCAmelCase__ :str = pos_y
lowerCAmelCase__ :int = (pos_y, pos_x)
lowerCAmelCase__ :List[Any] = goal_x
lowerCAmelCase__ :List[Any] = goal_y
lowerCAmelCase__ :List[Any] = parent
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :List[str] = Node(start[1] , start[0] , goal[1] , goal[0] , __UpperCAmelCase )
lowerCAmelCase__ :Any = Node(goal[1] , goal[0] , goal[1] , goal[0] , __UpperCAmelCase )
lowerCAmelCase__ :Any = [self.start]
lowerCAmelCase__ :Optional[Any] = False
def snake_case ( self ):
'''simple docstring'''
while self.node_queue:
lowerCAmelCase__ :List[Any] = self.node_queue.pop(0 )
if current_node.pos == self.target.pos:
lowerCAmelCase__ :Dict = True
return self.retrace_path(__UpperCAmelCase )
lowerCAmelCase__ :Dict = self.get_successors(__UpperCAmelCase )
for node in successors:
self.node_queue.append(__UpperCAmelCase )
if not self.reached:
return [self.start.pos]
return None
def snake_case ( self , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :List[str] = []
for action in delta:
lowerCAmelCase__ :str = parent.pos_x + action[1]
lowerCAmelCase__ :Optional[int] = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(__UpperCAmelCase ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(__UpperCAmelCase , __UpperCAmelCase , self.target.pos_y , self.target.pos_x , __UpperCAmelCase ) )
return successors
def snake_case ( self , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :Union[str, Any] = node
lowerCAmelCase__ :int = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
lowerCAmelCase__ :Any = current_node.parent
path.reverse()
return path
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :int = BreadthFirstSearch(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ :Union[str, Any] = BreadthFirstSearch(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ :Union[str, Any] = False
def snake_case ( self ):
'''simple docstring'''
while self.fwd_bfs.node_queue or self.bwd_bfs.node_queue:
lowerCAmelCase__ :List[Any] = self.fwd_bfs.node_queue.pop(0 )
lowerCAmelCase__ :List[str] = self.bwd_bfs.node_queue.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
lowerCAmelCase__ :Dict = True
return self.retrace_bidirectional_path(
__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ :Tuple = current_bwd_node
lowerCAmelCase__ :int = current_fwd_node
lowerCAmelCase__ :Any = {
self.fwd_bfs: self.fwd_bfs.get_successors(__UpperCAmelCase ),
self.bwd_bfs: self.bwd_bfs.get_successors(__UpperCAmelCase ),
}
for bfs in [self.fwd_bfs, self.bwd_bfs]:
for node in successors[bfs]:
bfs.node_queue.append(__UpperCAmelCase )
if not self.reached:
return [self.fwd_bfs.start.pos]
return None
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :Dict = self.fwd_bfs.retrace_path(__UpperCAmelCase )
lowerCAmelCase__ :Union[str, Any] = self.bwd_bfs.retrace_path(__UpperCAmelCase )
bwd_path.pop()
bwd_path.reverse()
lowerCAmelCase__ :List[Any] = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
import doctest
doctest.testmod()
__A = (0, 0)
__A = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
__A = time.time()
__A = BreadthFirstSearch(init, goal)
__A = bfs.search()
__A = time.time() - start_bfs_time
print("""Unidirectional BFS computation time : """, bfs_time)
__A = time.time()
__A = BidirectionalBreadthFirstSearch(init, goal)
__A = bd_bfs.search()
__A = time.time() - start_bd_bfs_time
print("""Bidirectional BFS computation time : """, bd_bfs_time)
| 93 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {"ctrl": "https://huggingface.co/ctrl/resolve/main/config.json"}
class _A ( UpperCamelCase ):
"""simple docstring"""
lowerCamelCase : Tuple = 'ctrl'
lowerCamelCase : Any = ['past_key_values']
lowerCamelCase : Optional[int] = {
'max_position_embeddings': 'n_positions',
'hidden_size': 'n_embd',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[Any]=246534 , __SCREAMING_SNAKE_CASE : int=256 , __SCREAMING_SNAKE_CASE : Optional[Any]=1280 , __SCREAMING_SNAKE_CASE : Optional[Any]=8192 , __SCREAMING_SNAKE_CASE : int=48 , __SCREAMING_SNAKE_CASE : Union[str, Any]=16 , __SCREAMING_SNAKE_CASE : int=0.1 , __SCREAMING_SNAKE_CASE : Dict=0.1 , __SCREAMING_SNAKE_CASE : List[Any]=1e-6 , __SCREAMING_SNAKE_CASE : List[str]=0.02 , __SCREAMING_SNAKE_CASE : Union[str, Any]=True , **__SCREAMING_SNAKE_CASE : int , ) -> Any:
__UpperCAmelCase =vocab_size
__UpperCAmelCase =n_positions
__UpperCAmelCase =n_embd
__UpperCAmelCase =n_layer
__UpperCAmelCase =n_head
__UpperCAmelCase =dff
__UpperCAmelCase =resid_pdrop
__UpperCAmelCase =embd_pdrop
__UpperCAmelCase =layer_norm_epsilon
__UpperCAmelCase =initializer_range
__UpperCAmelCase =use_cache
super().__init__(**__SCREAMING_SNAKE_CASE )
| 68 | 0 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_clip import CLIPImageProcessor
lowerCAmelCase__ = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( lowercase ):
"""simple docstring"""
def __init__( self , *snake_case__ , **snake_case__ ):
"""simple docstring"""
warnings.warn(
"The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use CLIPImageProcessor instead." , UpperCAmelCase__ , )
super().__init__(*UpperCAmelCase__ , **UpperCAmelCase__ )
| 721 |
"""simple docstring"""
import argparse
import os
import re
lowerCAmelCase__ = '''src/transformers'''
# Pattern that looks at the indentation in a line.
lowerCAmelCase__ = re.compile(r'''^(\s*)\S''')
# Pattern that matches `"key":" and puts `key` in group 0.
lowerCAmelCase__ = re.compile(r'''^\s*"([^"]+)":''')
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
lowerCAmelCase__ = re.compile(r'''^\s*_import_structure\["([^"]+)"\]''')
# Pattern that matches `"key",` and puts `key` in group 0.
lowerCAmelCase__ = re.compile(r'''^\s*"([^"]+)",\s*$''')
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
lowerCAmelCase__ = re.compile(r'''\[([^\]]+)\]''')
def a__ ( SCREAMING_SNAKE_CASE : List[str] ):
'''simple docstring'''
lowerCAmelCase : Union[str, Any] = _re_indent.search(SCREAMING_SNAKE_CASE )
return "" if search is None else search.groups()[0]
def a__ ( SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Optional[int]="" , SCREAMING_SNAKE_CASE : Optional[int]=None , SCREAMING_SNAKE_CASE : Dict=None ):
'''simple docstring'''
lowerCAmelCase : Tuple = 0
lowerCAmelCase : Optional[int] = code.split("\n" )
if start_prompt is not None:
while not lines[index].startswith(SCREAMING_SNAKE_CASE ):
index += 1
lowerCAmelCase : Dict = ["\n".join(lines[:index] )]
else:
lowerCAmelCase : Any = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
lowerCAmelCase : Union[str, Any] = [lines[index]]
index += 1
while index < len(SCREAMING_SNAKE_CASE ) and (end_prompt is None or not lines[index].startswith(SCREAMING_SNAKE_CASE )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(SCREAMING_SNAKE_CASE ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + " " ):
current_block.append(lines[index] )
blocks.append("\n".join(SCREAMING_SNAKE_CASE ) )
if index < len(SCREAMING_SNAKE_CASE ) - 1:
lowerCAmelCase : List[str] = [lines[index + 1]]
index += 1
else:
lowerCAmelCase : Optional[Any] = []
else:
blocks.append("\n".join(SCREAMING_SNAKE_CASE ) )
lowerCAmelCase : str = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(SCREAMING_SNAKE_CASE ) > 0:
blocks.append("\n".join(SCREAMING_SNAKE_CASE ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(SCREAMING_SNAKE_CASE ):
blocks.append("\n".join(lines[index:] ) )
return blocks
def a__ ( SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
def _inner(SCREAMING_SNAKE_CASE : Optional[Any] ):
return key(SCREAMING_SNAKE_CASE ).lower().replace("_" , "" )
return _inner
def a__ ( SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Dict=None ):
'''simple docstring'''
def noop(SCREAMING_SNAKE_CASE : List[Any] ):
return x
if key is None:
lowerCAmelCase : int = noop
# Constants are all uppercase, they go first.
lowerCAmelCase : Dict = [obj for obj in objects if key(SCREAMING_SNAKE_CASE ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
lowerCAmelCase : List[Any] = [obj for obj in objects if key(SCREAMING_SNAKE_CASE )[0].isupper() and not key(SCREAMING_SNAKE_CASE ).isupper()]
# Functions begin with a lowercase, they go last.
lowerCAmelCase : List[Any] = [obj for obj in objects if not key(SCREAMING_SNAKE_CASE )[0].isupper()]
lowerCAmelCase : Dict = ignore_underscore(SCREAMING_SNAKE_CASE )
return sorted(SCREAMING_SNAKE_CASE , key=SCREAMING_SNAKE_CASE ) + sorted(SCREAMING_SNAKE_CASE , key=SCREAMING_SNAKE_CASE ) + sorted(SCREAMING_SNAKE_CASE , key=SCREAMING_SNAKE_CASE )
def a__ ( SCREAMING_SNAKE_CASE : List[str] ):
'''simple docstring'''
def _replace(SCREAMING_SNAKE_CASE : List[Any] ):
lowerCAmelCase : List[str] = match.groups()[0]
if "," not in imports:
return f"""[{imports}]"""
lowerCAmelCase : Dict = [part.strip().replace("\"" , "" ) for part in imports.split("," )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
lowerCAmelCase : Any = keys[:-1]
return "[" + ", ".join([f"""\"{k}\"""" for k in sort_objects(SCREAMING_SNAKE_CASE )] ) + "]"
lowerCAmelCase : List[Any] = import_statement.split("\n" )
if len(SCREAMING_SNAKE_CASE ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
lowerCAmelCase : Tuple = 2 if lines[1].strip() == "[" else 1
lowerCAmelCase : Optional[Any] = [(i, _re_strip_line.search(SCREAMING_SNAKE_CASE ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
lowerCAmelCase : Optional[Any] = sort_objects(SCREAMING_SNAKE_CASE , key=lambda SCREAMING_SNAKE_CASE : x[1] )
lowerCAmelCase : List[str] = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(SCREAMING_SNAKE_CASE ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
lowerCAmelCase : Optional[int] = _re_bracket_content.sub(_replace , lines[1] )
else:
lowerCAmelCase : List[str] = [part.strip().replace("\"" , "" ) for part in lines[1].split("," )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
lowerCAmelCase : Union[str, Any] = keys[:-1]
lowerCAmelCase : str = get_indent(lines[1] ) + ", ".join([f"""\"{k}\"""" for k in sort_objects(SCREAMING_SNAKE_CASE )] )
return "\n".join(SCREAMING_SNAKE_CASE )
else:
# Finally we have to deal with imports fitting on one line
lowerCAmelCase : Any = _re_bracket_content.sub(_replace , SCREAMING_SNAKE_CASE )
return import_statement
def a__ ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Tuple=True ):
'''simple docstring'''
with open(SCREAMING_SNAKE_CASE , encoding="utf-8" ) as f:
lowerCAmelCase : Union[str, Any] = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
lowerCAmelCase : List[str] = split_code_in_indented_blocks(
SCREAMING_SNAKE_CASE , start_prompt="_import_structure = {" , end_prompt="if TYPE_CHECKING:" )
# We ignore block 0 (everything untils start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(SCREAMING_SNAKE_CASE ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
lowerCAmelCase : Tuple = main_blocks[block_idx]
lowerCAmelCase : Optional[Any] = block.split("\n" )
# Get to the start of the imports.
lowerCAmelCase : int = 0
while line_idx < len(SCREAMING_SNAKE_CASE ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
lowerCAmelCase : Optional[Any] = len(SCREAMING_SNAKE_CASE )
else:
line_idx += 1
if line_idx >= len(SCREAMING_SNAKE_CASE ):
continue
# Ignore beginning and last line: they don't contain anything.
lowerCAmelCase : Optional[Any] = "\n".join(block_lines[line_idx:-1] )
lowerCAmelCase : Dict = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
lowerCAmelCase : Optional[Any] = split_code_in_indented_blocks(SCREAMING_SNAKE_CASE , indent_level=SCREAMING_SNAKE_CASE )
# We have two categories of import key: list or _import_structure[key].append/extend
lowerCAmelCase : Tuple = _re_direct_key if "_import_structure = {" in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
lowerCAmelCase : Tuple = [(pattern.search(SCREAMING_SNAKE_CASE ).groups()[0] if pattern.search(SCREAMING_SNAKE_CASE ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
lowerCAmelCase : int = [(i, key) for i, key in enumerate(SCREAMING_SNAKE_CASE ) if key is not None]
lowerCAmelCase : Union[str, Any] = [x[0] for x in sorted(SCREAMING_SNAKE_CASE , key=lambda SCREAMING_SNAKE_CASE : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
lowerCAmelCase : Tuple = 0
lowerCAmelCase : Any = []
for i in range(len(SCREAMING_SNAKE_CASE ) ):
if keys[i] is None:
reorderded_blocks.append(internal_blocks[i] )
else:
lowerCAmelCase : Dict = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reorderded_blocks.append(SCREAMING_SNAKE_CASE )
count += 1
# And we put our main block back together with its first and last line.
lowerCAmelCase : List[Any] = "\n".join(block_lines[:line_idx] + reorderded_blocks + [block_lines[-1]] )
if code != "\n".join(SCREAMING_SNAKE_CASE ):
if check_only:
return True
else:
print(f"""Overwriting {file}.""" )
with open(SCREAMING_SNAKE_CASE , "w" , encoding="utf-8" ) as f:
f.write("\n".join(SCREAMING_SNAKE_CASE ) )
def a__ ( SCREAMING_SNAKE_CASE : List[str]=True ):
'''simple docstring'''
lowerCAmelCase : Optional[int] = []
for root, _, files in os.walk(SCREAMING_SNAKE_CASE ):
if "__init__.py" in files:
lowerCAmelCase : Tuple = sort_imports(os.path.join(SCREAMING_SNAKE_CASE , "__init__.py" ) , check_only=SCREAMING_SNAKE_CASE )
if result:
lowerCAmelCase : Optional[Any] = [os.path.join(SCREAMING_SNAKE_CASE , "__init__.py" )]
if len(SCREAMING_SNAKE_CASE ) > 0:
raise ValueError(f"""Would overwrite {len(SCREAMING_SNAKE_CASE )} files, run `make style`.""" )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument('''--check_only''', action='''store_true''', help='''Whether to only check or fix style.''')
lowerCAmelCase__ = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 681 | 0 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
SCREAMING_SNAKE_CASE__ : List[str] = logging.get_logger(__name__)
if is_vision_available():
import PIL
class __lowerCAmelCase ( _UpperCamelCase ):
_UpperCamelCase : Tuple = ["""pixel_values"""]
def __init__( self , snake_case = True , snake_case = None , snake_case = PILImageResampling.BICUBIC , snake_case = True , snake_case = None , snake_case = True , snake_case = 1 / 255 , snake_case = True , snake_case = None , snake_case = None , snake_case = True , **snake_case , ) -> None:
"""simple docstring"""
super().__init__(**snake_case_ )
a__ : List[str] = size if size is not None else {"shortest_edge": 224}
a__ : List[Any] = get_size_dict(snake_case_ , default_to_square=snake_case_ )
a__ : Optional[Any] = crop_size if crop_size is not None else {"height": 224, "width": 224}
a__ : Dict = get_size_dict(snake_case_ , default_to_square=snake_case_ , param_name="crop_size" )
a__ : List[str] = do_resize
a__ : Optional[Any] = size
a__ : Tuple = resample
a__ : Tuple = do_center_crop
a__ : Dict = crop_size
a__ : Optional[int] = do_rescale
a__ : Tuple = rescale_factor
a__ : int = do_normalize
a__ : Tuple = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
a__ : Tuple = image_std if image_std is not None else OPENAI_CLIP_STD
a__ : Tuple = do_convert_rgb
def _snake_case ( self , snake_case , snake_case , snake_case = PILImageResampling.BICUBIC , snake_case = None , **snake_case , ) -> np.ndarray:
"""simple docstring"""
a__ : List[Any] = get_size_dict(snake_case_ , default_to_square=snake_case_ )
if "shortest_edge" not in size:
raise ValueError(F"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" )
a__ : List[Any] = get_resize_output_image_size(snake_case_ , size=size["shortest_edge"] , default_to_square=snake_case_ )
return resize(snake_case_ , size=snake_case_ , resample=snake_case_ , data_format=snake_case_ , **snake_case_ )
def _snake_case ( self , snake_case , snake_case , snake_case = None , **snake_case , ) -> np.ndarray:
"""simple docstring"""
a__ : Tuple = get_size_dict(snake_case_ )
if "height" not in size or "width" not in size:
raise ValueError(F"""The `size` parameter must contain the keys (height, width). Got {size.keys()}""" )
return center_crop(snake_case_ , size=(size["height"], size["width"]) , data_format=snake_case_ , **snake_case_ )
def _snake_case ( self , snake_case , snake_case , snake_case = None , **snake_case , ) -> str:
"""simple docstring"""
return rescale(snake_case_ , scale=snake_case_ , data_format=snake_case_ , **snake_case_ )
def _snake_case ( self , snake_case , snake_case , snake_case , snake_case = None , **snake_case , ) -> np.ndarray:
"""simple docstring"""
return normalize(snake_case_ , mean=snake_case_ , std=snake_case_ , data_format=snake_case_ , **snake_case_ )
def _snake_case ( self , snake_case , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = ChannelDimension.FIRST , **snake_case , ) -> PIL.Image.Image:
"""simple docstring"""
a__ : List[Any] = do_resize if do_resize is not None else self.do_resize
a__ : List[Any] = size if size is not None else self.size
a__ : Union[str, Any] = get_size_dict(snake_case_ , param_name="size" , default_to_square=snake_case_ )
a__ : List[str] = resample if resample is not None else self.resample
a__ : List[str] = do_center_crop if do_center_crop is not None else self.do_center_crop
a__ : List[Any] = crop_size if crop_size is not None else self.crop_size
a__ : Any = get_size_dict(snake_case_ , param_name="crop_size" , default_to_square=snake_case_ )
a__ : Tuple = do_rescale if do_rescale is not None else self.do_rescale
a__ : Optional[int] = rescale_factor if rescale_factor is not None else self.rescale_factor
a__ : List[Any] = do_normalize if do_normalize is not None else self.do_normalize
a__ : int = image_mean if image_mean is not None else self.image_mean
a__ : str = image_std if image_std is not None else self.image_std
a__ : Optional[int] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
a__ : str = make_list_of_images(snake_case_ )
if not valid_images(snake_case_ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
a__ : List[Any] = [convert_to_rgb(snake_case_ ) for image in images]
# All transformations expect numpy arrays.
a__ : Any = [to_numpy_array(snake_case_ ) for image in images]
if do_resize:
a__ : Any = [self.resize(image=snake_case_ , size=snake_case_ , resample=snake_case_ ) for image in images]
if do_center_crop:
a__ : Dict = [self.center_crop(image=snake_case_ , size=snake_case_ ) for image in images]
if do_rescale:
a__ : str = [self.rescale(image=snake_case_ , scale=snake_case_ ) for image in images]
if do_normalize:
a__ : int = [self.normalize(image=snake_case_ , mean=snake_case_ , std=snake_case_ ) for image in images]
a__ : Optional[Any] = [to_channel_dimension_format(snake_case_ , snake_case_ ) for image in images]
a__ : Optional[Any] = {"pixel_values": images}
return BatchFeature(data=snake_case_ , tensor_type=snake_case_ )
| 112 |
'''simple docstring'''
# This is the module that test_patching.py uses to test patch_submodule()
import os # noqa: this is just for tests
import os as renamed_os # noqa: this is just for tests
from os import path # noqa: this is just for tests
from os import path as renamed_path # noqa: this is just for tests
from os.path import join # noqa: this is just for tests
from os.path import join as renamed_join # noqa: this is just for tests
__snake_case : Dict = open # noqa: we just need to have a builtin inside this module to test it properly
| 131 | 0 |
"""simple docstring"""
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__lowercase : str = logging.get_logger(__name__)
__lowercase : Any = "▁"
__lowercase : str = {
"vocab_file": "vocab.json",
"spm_file": "sentencepiece.bpe.model",
}
__lowercase : Optional[int] = {
"vocab_file": {
"facebook/s2t-small-librispeech-asr": (
"https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/vocab.json"
),
},
"spm_file": {
"facebook/s2t-small-librispeech-asr": (
"https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/sentencepiece.bpe.model"
)
},
}
__lowercase : int = {
"facebook/s2t-small-librispeech-asr": 1024,
}
__lowercase : Union[str, Any] = ["pt", "fr", "ru", "nl", "ro", "it", "es", "de"]
__lowercase : List[str] = {"mustc": MUSTC_LANGS}
class _A ( _UpperCAmelCase ):
"""simple docstring"""
UpperCamelCase_ : str = VOCAB_FILES_NAMES
UpperCamelCase_ : List[Any] = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ : List[Any] = MAX_MODEL_INPUT_SIZES
UpperCamelCase_ : Optional[Any] = ['''input_ids''', '''attention_mask''']
UpperCamelCase_ : List[int] = []
def __init__( self : List[str] , A_ : str , A_ : Tuple , A_ : Union[str, Any]="<s>" , A_ : Union[str, Any]="</s>" , A_ : Optional[int]="<pad>" , A_ : Union[str, Any]="<unk>" , A_ : List[str]=False , A_ : Union[str, Any]=False , A_ : Optional[int]=None , A_ : Tuple=None , A_ : Optional[Dict[str, Any]] = None , **A_ : Optional[int] , ) -> None:
__snake_case = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=A_ , eos_token=A_ , unk_token=A_ , pad_token=A_ , do_upper_case=A_ , do_lower_case=A_ , tgt_lang=A_ , lang_codes=A_ , sp_model_kwargs=self.sp_model_kwargs , **A_ , )
__snake_case = do_upper_case
__snake_case = do_lower_case
__snake_case = load_json(A_ )
__snake_case = {v: k for k, v in self.encoder.items()}
__snake_case = spm_file
__snake_case = load_spm(A_ , self.sp_model_kwargs )
if lang_codes is not None:
__snake_case = lang_codes
__snake_case = LANGUAGES[lang_codes]
__snake_case = [f"<lang:{lang}>" for lang in self.langs]
__snake_case = {lang: self.sp_model.PieceToId(f"<lang:{lang}>" ) for lang in self.langs}
__snake_case = self.lang_tokens
__snake_case = tgt_lang if tgt_lang is not None else self.langs[0]
self.set_tgt_lang_special_tokens(self._tgt_lang )
else:
__snake_case = {}
@property
def lowercase ( self : Optional[int] ) -> int:
return len(self.encoder )
@property
def lowercase ( self : str ) -> str:
return self._tgt_lang
@tgt_lang.setter
def lowercase ( self : Tuple , A_ : Union[str, Any] ) -> None:
__snake_case = new_tgt_lang
self.set_tgt_lang_special_tokens(A_ )
def lowercase ( self : List[str] , A_ : str ) -> None:
__snake_case = self.lang_code_to_id[tgt_lang]
__snake_case = [lang_code_id]
def lowercase ( self : Dict , A_ : str ) -> List[str]:
return self.sp_model.encode(A_ , out_type=A_ )
def lowercase ( self : List[Any] , A_ : int ) -> List[Any]:
return self.encoder.get(A_ , self.encoder[self.unk_token] )
def lowercase ( self : Optional[int] , A_ : int ) -> str:
return self.decoder.get(A_ , self.unk_token )
def lowercase ( self : str , A_ : List[str] ) -> str:
__snake_case = []
__snake_case = ''''''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
__snake_case = self.sp_model.decode(A_ )
out_string += (decoded.upper() if self.do_upper_case else decoded) + token + " "
__snake_case = []
else:
current_sub_tokens.append(A_ )
__snake_case = self.sp_model.decode(A_ )
out_string += decoded.upper() if self.do_upper_case else decoded
return out_string.strip()
def lowercase ( self : Any , A_ : List[str] , A_ : Tuple=None ) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + [self.eos_token_id]
def lowercase ( self : Dict , A_ : List[int] , A_ : Optional[List[int]] = None , A_ : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A_ , token_ids_a=A_ , already_has_special_tokens=A_ )
__snake_case = [1] * len(self.prefix_tokens )
__snake_case = [1]
if token_ids_a is None:
return prefix_ones + ([0] * len(A_ )) + suffix_ones
return prefix_ones + ([0] * len(A_ )) + ([0] * len(A_ )) + suffix_ones
def lowercase ( self : Any ) -> Dict:
__snake_case = self.encoder.copy()
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Union[str, Any] ) -> Dict:
__snake_case = self.__dict__.copy()
__snake_case = None
return state
def __setstate__( self : Dict , A_ : Dict ) -> None:
__snake_case = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
__snake_case = {}
__snake_case = load_spm(self.spm_file , self.sp_model_kwargs )
def lowercase ( self : int , A_ : str , A_ : Optional[str] = None ) -> Tuple[str]:
__snake_case = Path(A_ )
assert save_dir.is_dir(), f"{save_directory} should be a directory"
__snake_case = save_dir / (
(filename_prefix + '''-''' if filename_prefix else '''''') + self.vocab_files_names['''vocab_file''']
)
__snake_case = save_dir / (
(filename_prefix + '''-''' if filename_prefix else '''''') + self.vocab_files_names['''spm_file''']
)
save_json(self.encoder , A_ )
if os.path.abspath(self.spm_file ) != os.path.abspath(A_ ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , A_ )
elif not os.path.isfile(self.spm_file ):
with open(A_ , '''wb''' ) as fi:
__snake_case = self.sp_model.serialized_model_proto()
fi.write(A_ )
return (str(A_ ), str(A_ ))
def SCREAMING_SNAKE_CASE ( snake_case, snake_case):
__snake_case = sentencepiece.SentencePieceProcessor(**snake_case)
spm.Load(str(snake_case))
return spm
def SCREAMING_SNAKE_CASE ( snake_case):
with open(snake_case, '''r''') as f:
return json.load(snake_case)
def SCREAMING_SNAKE_CASE ( snake_case, snake_case):
with open(snake_case, '''w''') as f:
json.dump(snake_case, snake_case, indent=2) | 721 | """simple docstring"""
import math
import random
from typing import Any
from .hill_climbing import SearchProblem
def SCREAMING_SNAKE_CASE ( snake_case, snake_case = True, snake_case = math.inf, snake_case = -math.inf, snake_case = math.inf, snake_case = -math.inf, snake_case = False, snake_case = 1_00, snake_case = 0.01, snake_case = 1, ):
__snake_case = False
__snake_case = search_prob
__snake_case = start_temperate
__snake_case = []
__snake_case = 0
__snake_case = None
while not search_end:
__snake_case = current_state.score()
if best_state is None or current_score > best_state.score():
__snake_case = current_state
scores.append(snake_case)
iterations += 1
__snake_case = None
__snake_case = current_state.get_neighbors()
while (
next_state is None and neighbors
): # till we do not find a neighbor that we can move to
__snake_case = random.randint(0, len(snake_case) - 1) # picking a random neighbor
__snake_case = neighbors.pop(snake_case)
__snake_case = picked_neighbor.score() - current_score
if (
picked_neighbor.x > max_x
or picked_neighbor.x < min_x
or picked_neighbor.y > max_y
or picked_neighbor.y < min_y
):
continue # neighbor outside our bounds
if not find_max:
__snake_case = change * -1 # in case we are finding minimum
if change > 0: # improves the solution
__snake_case = picked_neighbor
else:
__snake_case = (math.e) ** (
change / current_temp
) # probability generation function
if random.random() < probability: # random number within probability
__snake_case = picked_neighbor
__snake_case = current_temp - (current_temp * rate_of_decrease)
if current_temp < threshold_temp or next_state is None:
# temperature below threshold, or could not find a suitable neighbor
__snake_case = True
else:
__snake_case = next_state
if visualization:
from matplotlib import pyplot as plt
plt.plot(range(snake_case), snake_case)
plt.xlabel('''Iterations''')
plt.ylabel('''Function values''')
plt.show()
return best_state
if __name__ == "__main__":
def SCREAMING_SNAKE_CASE ( snake_case, snake_case):
return (x**2) + (y**2)
# starting the problem with initial coordinates (12, 47)
__lowercase : Dict = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
__lowercase : Union[str, Any] = simulated_annealing(
prob, find_max=False, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
"The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 "
F"""and 50 > y > - 5 found via hill climbing: {local_min.score()}"""
)
# starting the problem with initial coordinates (12, 47)
__lowercase : Any = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
__lowercase : int = simulated_annealing(
prob, find_max=True, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
"The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 "
F"""and 50 > y > - 5 found via hill climbing: {local_min.score()}"""
)
def SCREAMING_SNAKE_CASE ( snake_case, snake_case):
return (3 * x**2) - (6 * y)
__lowercase : List[str] = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
__lowercase : Dict = simulated_annealing(prob, find_max=False, visualization=True)
print(
"The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: "
F"""{local_min.score()}"""
)
__lowercase : Any = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
__lowercase : Tuple = simulated_annealing(prob, find_max=True, visualization=True)
print(
"The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: "
F"""{local_min.score()}"""
) | 93 | 0 |
import json
import os
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__lowerCamelCase : List[str] = logging.get_logger(__name__)
__lowerCamelCase : Union[str, Any] = {
'''vocab_file''': '''vocab.json''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
'''merges_file''': '''merges.txt''',
}
__lowerCamelCase : Optional[int] = {
'''vocab_file''': {
'''facebook/s2t-wav2vec2-large-en-de''': (
'''https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/vocab.json'''
),
},
'''tokenizer_config_file''': {
'''facebook/s2t-wav2vec2-large-en-de''': (
'''https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/tokenizer_config.json'''
),
},
'''merges_file''': {
'''facebook/s2t-wav2vec2-large-en-de''': (
'''https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/merges.txt'''
),
},
}
__lowerCamelCase : str = '''</w>'''
__lowerCamelCase : List[Any] = '''@@ '''
def _snake_case ( lowerCAmelCase : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = set()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
SCREAMING_SNAKE_CASE_ : Optional[Any] = char
return pairs
# Speech2Text2 has no max input length
__lowerCamelCase : Optional[Any] = {'''facebook/s2t-wav2vec2-large-en-de''': 10_24}
class a__ ( A__ ):
A = VOCAB_FILES_NAMES
A = PRETRAINED_VOCAB_FILES_MAP
A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A = ['input_ids', 'attention_mask']
def __init__( self : int,_A : Union[str, Any],_A : Optional[Any]="<s>",_A : Union[str, Any]="<pad>",_A : Optional[Any]="</s>",_A : Dict="<unk>",_A : str=False,_A : Optional[int]=None,**_A : List[Any],):
"""simple docstring"""
super().__init__(
unk_token=_A,bos_token=_A,eos_token=_A,pad_token=_A,do_lower_case=_A,**_A,)
SCREAMING_SNAKE_CASE_ : Optional[int] = do_lower_case
with open(_A,encoding="utf-8" ) as vocab_handle:
SCREAMING_SNAKE_CASE_ : int = json.load(_A )
SCREAMING_SNAKE_CASE_ : Optional[Any] = {v: k for k, v in self.encoder.items()}
if merges_file is None:
logger.info(F'No merges files provided. {self.__class__.__name__} can only be used for decoding.' )
SCREAMING_SNAKE_CASE_ : Optional[Any] = None
SCREAMING_SNAKE_CASE_ : Tuple = None
else:
with open(_A,encoding="utf-8" ) as merges_handle:
SCREAMING_SNAKE_CASE_ : int = merges_handle.read().split("\n" )[:-1]
SCREAMING_SNAKE_CASE_ : List[str] = [tuple(merge.split()[:2] ) for merge in merges]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = dict(zip(_A,range(len(_A ) ) ) )
SCREAMING_SNAKE_CASE_ : Dict = {}
@property
def __UpperCamelCase ( self : str ):
"""simple docstring"""
return len(self.decoder )
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
return dict(self.encoder,**self.added_tokens_encoder )
def __UpperCamelCase ( self : Any,_A : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = tuple(token[:-1] ) + (token[-1] + BPE_TOKEN_MERGES,)
if token in self.cache:
return self.cache[token]
SCREAMING_SNAKE_CASE_ : Any = get_pairs(_A )
if not pairs:
return token
while True:
SCREAMING_SNAKE_CASE_ : List[str] = min(_A,key=lambda _A : self.bpe_ranks.get(_A,float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Any = bigram
SCREAMING_SNAKE_CASE_ : str = []
SCREAMING_SNAKE_CASE_ : List[str] = 0
while i < len(_A ):
try:
SCREAMING_SNAKE_CASE_ : Any = word.index(_A,_A )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
SCREAMING_SNAKE_CASE_ : Optional[int] = j
if word[i] == first and i < len(_A ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
SCREAMING_SNAKE_CASE_ : List[Any] = tuple(_A )
SCREAMING_SNAKE_CASE_ : List[Any] = new_word
if len(_A ) == 1:
break
else:
SCREAMING_SNAKE_CASE_ : Any = get_pairs(_A )
SCREAMING_SNAKE_CASE_ : int = " ".join(_A )
if word == "\n " + BPE_TOKEN_MERGES:
SCREAMING_SNAKE_CASE_ : Dict = "\n" + BPE_TOKEN_MERGES
if word.endswith(_A ):
SCREAMING_SNAKE_CASE_ : List[str] = word.replace(_A,"" )
SCREAMING_SNAKE_CASE_ : Tuple = word.replace(" ",_A )
SCREAMING_SNAKE_CASE_ : List[str] = word
return word
def __UpperCamelCase ( self : Optional[int],_A : str ):
"""simple docstring"""
if self.bpe_ranks is None:
raise ValueError(
"This tokenizer was instantiated without a `merges.txt` file, so"
" that it can only be used for decoding, not for encoding."
"Make sure to provide `merges.txt` file at instantiation to enable "
"encoding." )
if self.do_lower_case:
SCREAMING_SNAKE_CASE_ : Dict = text.lower()
SCREAMING_SNAKE_CASE_ : str = text.split()
SCREAMING_SNAKE_CASE_ : Tuple = []
for token in text:
if token:
split_tokens.extend(list(self.bpe(_A ).split(" " ) ) )
return split_tokens
def __UpperCamelCase ( self : List[Any],_A : str ):
"""simple docstring"""
return self.encoder.get(_A,self.encoder.get(self.unk_token ) )
def __UpperCamelCase ( self : Union[str, Any],_A : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.decoder.get(_A,self.unk_token )
return result
def __UpperCamelCase ( self : Optional[int],_A : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = " ".join(_A )
# make sure @@ tokens are concatenated
SCREAMING_SNAKE_CASE_ : Optional[int] = "".join(string.split(_A ) )
return string
def __UpperCamelCase ( self : Tuple,_A : str,_A : Optional[str] = None ):
"""simple docstring"""
if not os.path.isdir(_A ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
SCREAMING_SNAKE_CASE_ : List[str] = os.path.join(
_A,(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
SCREAMING_SNAKE_CASE_ : Any = os.path.join(
_A,(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(_A,"w",encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder,indent=2,sort_keys=_A,ensure_ascii=_A ) + "\n" )
SCREAMING_SNAKE_CASE_ : Tuple = 0
if self.bpe_ranks is None:
return (vocab_file,)
with open(_A,"w",encoding="utf-8" ) as writer:
for bpe_tokens, token_index in sorted(self.bpe_ranks.items(),key=lambda _A : kv[1] ):
if index != token_index:
logger.warning(
F'Saving vocabulary to {merges_file}: BPE merge indices are not consecutive.'
" Please check that the tokenizer is not corrupted!" )
SCREAMING_SNAKE_CASE_ : List[Any] = token_index
writer.write(" ".join(_A ) + "\n" )
index += 1
return (vocab_file, merges_file)
| 216 | class a__ :
def __init__( self : int,_A : Union[str, Any],_A : Dict,_A : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = None
SCREAMING_SNAKE_CASE_ : int = None
SCREAMING_SNAKE_CASE_ : str = graph
self._normalize_graph(_A,_A )
SCREAMING_SNAKE_CASE_ : Tuple = len(_A )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = None
def __UpperCamelCase ( self : Any,_A : str,_A : str ):
"""simple docstring"""
if sources is int:
SCREAMING_SNAKE_CASE_ : Dict = [sources]
if sinks is int:
SCREAMING_SNAKE_CASE_ : Optional[int] = [sinks]
if len(_A ) == 0 or len(_A ) == 0:
return
SCREAMING_SNAKE_CASE_ : Dict = sources[0]
SCREAMING_SNAKE_CASE_ : Dict = sinks[0]
# make fake vertex if there are more
# than one source or sink
if len(_A ) > 1 or len(_A ) > 1:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 0
for i in sources:
max_input_flow += sum(self.graph[i] )
SCREAMING_SNAKE_CASE_ : Optional[Any] = len(self.graph ) + 1
for room in self.graph:
room.insert(0,0 )
self.graph.insert(0,[0] * size )
for i in sources:
SCREAMING_SNAKE_CASE_ : List[str] = max_input_flow
SCREAMING_SNAKE_CASE_ : Any = 0
SCREAMING_SNAKE_CASE_ : Dict = len(self.graph ) + 1
for room in self.graph:
room.append(0 )
self.graph.append([0] * size )
for i in sinks:
SCREAMING_SNAKE_CASE_ : str = max_input_flow
SCREAMING_SNAKE_CASE_ : str = size - 1
def __UpperCamelCase ( self : str ):
"""simple docstring"""
if self.maximum_flow_algorithm is None:
raise Exception("You need to set maximum flow algorithm before." )
if self.source_index is None or self.sink_index is None:
return 0
self.maximum_flow_algorithm.execute()
return self.maximum_flow_algorithm.getMaximumFlow()
def __UpperCamelCase ( self : Union[str, Any],_A : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = algorithm(self )
class a__ :
def __init__( self : List[str],_A : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = flow_network
SCREAMING_SNAKE_CASE_ : str = flow_network.verticesCount
SCREAMING_SNAKE_CASE_ : Dict = flow_network.sourceIndex
SCREAMING_SNAKE_CASE_ : Any = flow_network.sinkIndex
# it's just a reference, so you shouldn't change
# it in your algorithms, use deep copy before doing that
SCREAMING_SNAKE_CASE_ : Optional[int] = flow_network.graph
SCREAMING_SNAKE_CASE_ : List[Any] = False
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
if not self.executed:
self._algorithm()
SCREAMING_SNAKE_CASE_ : Dict = True
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
pass
class a__ ( A__ ):
def __init__( self : Tuple,_A : Union[str, Any] ):
"""simple docstring"""
super().__init__(_A )
# use this to save your result
SCREAMING_SNAKE_CASE_ : int = -1
def __UpperCamelCase ( self : Any ):
"""simple docstring"""
if not self.executed:
raise Exception("You should execute algorithm before using its result!" )
return self.maximum_flow
class a__ ( A__ ):
def __init__( self : Optional[Any],_A : Optional[int] ):
"""simple docstring"""
super().__init__(_A )
SCREAMING_SNAKE_CASE_ : List[Any] = [[0] * self.verticies_count for i in range(self.verticies_count )]
SCREAMING_SNAKE_CASE_ : int = [0] * self.verticies_count
SCREAMING_SNAKE_CASE_ : List[str] = [0] * self.verticies_count
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = self.verticies_count
# push some substance to graph
for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index] ):
self.preflow[self.source_index][nextvertex_index] += bandwidth
self.preflow[nextvertex_index][self.source_index] -= bandwidth
self.excesses[nextvertex_index] += bandwidth
# Relabel-to-front selection rule
SCREAMING_SNAKE_CASE_ : str = [
i
for i in range(self.verticies_count )
if i != self.source_index and i != self.sink_index
]
# move through list
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 0
while i < len(_A ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = vertices_list[i]
SCREAMING_SNAKE_CASE_ : Any = self.heights[vertex_index]
self.process_vertex(_A )
if self.heights[vertex_index] > previous_height:
# if it was relabeled, swap elements
# and start from 0 index
vertices_list.insert(0,vertices_list.pop(_A ) )
SCREAMING_SNAKE_CASE_ : str = 0
else:
i += 1
SCREAMING_SNAKE_CASE_ : List[Any] = sum(self.preflow[self.source_index] )
def __UpperCamelCase ( self : Dict,_A : Tuple ):
"""simple docstring"""
while self.excesses[vertex_index] > 0:
for neighbour_index in range(self.verticies_count ):
# if it's neighbour and current vertex is higher
if (
self.graph[vertex_index][neighbour_index]
- self.preflow[vertex_index][neighbour_index]
> 0
and self.heights[vertex_index] > self.heights[neighbour_index]
):
self.push(_A,_A )
self.relabel(_A )
def __UpperCamelCase ( self : int,_A : Optional[Any],_A : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = min(
self.excesses[from_index],self.graph[from_index][to_index] - self.preflow[from_index][to_index],)
self.preflow[from_index][to_index] += preflow_delta
self.preflow[to_index][from_index] -= preflow_delta
self.excesses[from_index] -= preflow_delta
self.excesses[to_index] += preflow_delta
def __UpperCamelCase ( self : Tuple,_A : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = None
for to_index in range(self.verticies_count ):
if (
self.graph[vertex_index][to_index]
- self.preflow[vertex_index][to_index]
> 0
) and (min_height is None or self.heights[to_index] < min_height):
SCREAMING_SNAKE_CASE_ : int = self.heights[to_index]
if min_height is not None:
SCREAMING_SNAKE_CASE_ : Dict = min_height + 1
if __name__ == "__main__":
__lowerCamelCase : str = [0]
__lowerCamelCase : str = [3]
# graph = [
# [0, 0, 4, 6, 0, 0],
# [0, 0, 5, 2, 0, 0],
# [0, 0, 0, 0, 4, 4],
# [0, 0, 0, 0, 6, 6],
# [0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0],
# ]
__lowerCamelCase : Any = [[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]]
# prepare our network
__lowerCamelCase : Dict = FlowNetwork(graph, entrances, exits)
# set algorithm
flow_network.set_maximum_flow_algorithm(PushRelabelExecutor)
# and calculate
__lowerCamelCase : Optional[Any] = flow_network.find_maximum_flow()
print(f'''maximum flow is {maximum_flow}''')
| 216 | 1 |
__A = "\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n"
__A = [{"type": "code", "content": INSTALL_CONTENT}]
__A = {
"{processor_class}": "FakeProcessorClass",
"{model_class}": "FakeModelClass",
"{object_class}": "FakeObjectClass",
}
| 167 |
import argparse
import json
from typing import List
from ltp import LTP
from transformers import BertTokenizer
def lowerCamelCase_ ( UpperCamelCase__ : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
if (
(cp >= 0x4_e00 and cp <= 0x9_fff)
or (cp >= 0x3_400 and cp <= 0x4_dbf) #
or (cp >= 0x20_000 and cp <= 0x2a_6df) #
or (cp >= 0x2a_700 and cp <= 0x2b_73f) #
or (cp >= 0x2b_740 and cp <= 0x2b_81f) #
or (cp >= 0x2b_820 and cp <= 0x2c_eaf) #
or (cp >= 0xf_900 and cp <= 0xf_aff)
or (cp >= 0x2f_800 and cp <= 0x2f_a1f) #
): #
return True
return False
def lowerCamelCase_ ( UpperCamelCase__ : str ) -> Dict:
"""simple docstring"""
for char in word:
__lowerCamelCase = ord(UpperCamelCase__ )
if not _is_chinese_char(UpperCamelCase__ ):
return 0
return 1
def lowerCamelCase_ ( UpperCamelCase__ : List[str] ) -> Optional[Any]:
"""simple docstring"""
__lowerCamelCase = set()
for token in tokens:
__lowerCamelCase = len(UpperCamelCase__ ) > 1 and is_chinese(UpperCamelCase__ )
if chinese_word:
word_set.add(UpperCamelCase__ )
__lowerCamelCase = list(UpperCamelCase__ )
return word_list
def lowerCamelCase_ ( UpperCamelCase__ : List[str] , UpperCamelCase__ : set() ) -> int:
"""simple docstring"""
if not chinese_word_set:
return bert_tokens
__lowerCamelCase = max([len(UpperCamelCase__ ) for w in chinese_word_set] )
__lowerCamelCase = bert_tokens
__lowerCamelCase , __lowerCamelCase = 0, len(UpperCamelCase__ )
while start < end:
__lowerCamelCase = True
if is_chinese(bert_word[start] ):
__lowerCamelCase = min(end - start , UpperCamelCase__ )
for i in range(UpperCamelCase__ , 1 , -1 ):
__lowerCamelCase = ''.join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
__lowerCamelCase = '##' + bert_word[j]
__lowerCamelCase = start + i
__lowerCamelCase = False
break
if single_word:
start += 1
return bert_word
def lowerCamelCase_ ( UpperCamelCase__ : List[str] , UpperCamelCase__ : LTP , UpperCamelCase__ : BertTokenizer ) -> List[str]:
"""simple docstring"""
__lowerCamelCase = []
for i in range(0 , len(UpperCamelCase__ ) , 100 ):
__lowerCamelCase = ltp_tokenizer.seg(lines[i : i + 100] )[0]
__lowerCamelCase = [get_chinese_word(UpperCamelCase__ ) for r in res]
ltp_res.extend(UpperCamelCase__ )
assert len(UpperCamelCase__ ) == len(UpperCamelCase__ )
__lowerCamelCase = []
for i in range(0 , len(UpperCamelCase__ ) , 100 ):
__lowerCamelCase = bert_tokenizer(lines[i : i + 100] , add_special_tokens=UpperCamelCase__ , truncation=UpperCamelCase__ , max_length=512 )
bert_res.extend(res['input_ids'] )
assert len(UpperCamelCase__ ) == len(UpperCamelCase__ )
__lowerCamelCase = []
for input_ids, chinese_word in zip(UpperCamelCase__ , UpperCamelCase__ ):
__lowerCamelCase = []
for id in input_ids:
__lowerCamelCase = bert_tokenizer._convert_id_to_token(UpperCamelCase__ )
input_tokens.append(UpperCamelCase__ )
__lowerCamelCase = add_sub_symbol(UpperCamelCase__ , UpperCamelCase__ )
__lowerCamelCase = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(UpperCamelCase__ ):
if token[:2] == "##":
__lowerCamelCase = token[2:]
# save chinese tokens' pos
if len(UpperCamelCase__ ) == 1 and _is_chinese_char(ord(UpperCamelCase__ ) ):
ref_id.append(UpperCamelCase__ )
ref_ids.append(UpperCamelCase__ )
assert len(UpperCamelCase__ ) == len(UpperCamelCase__ )
return ref_ids
def lowerCamelCase_ ( UpperCamelCase__ : Optional[int] ) -> int:
"""simple docstring"""
with open(args.file_name , 'r' , encoding='utf-8' ) as f:
__lowerCamelCase = f.readlines()
__lowerCamelCase = [line.strip() for line in data if len(UpperCamelCase__ ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
__lowerCamelCase = LTP(args.ltp ) # faster in GPU device
__lowerCamelCase = BertTokenizer.from_pretrained(args.bert )
__lowerCamelCase = prepare_ref(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
with open(args.save_path , 'w' , encoding='utf-8' ) as f:
__lowerCamelCase = [json.dumps(UpperCamelCase__ ) + '\n' for ref in ref_ids]
f.writelines(UpperCamelCase__ )
if __name__ == "__main__":
__A = argparse.ArgumentParser(description="prepare_chinese_ref")
parser.add_argument(
"--file_name",
type=str,
default="./resources/chinese-demo.txt",
help="file need process, same as training data in lm",
)
parser.add_argument(
"--ltp", type=str, default="./resources/ltp", help="resources for LTP tokenizer, usually a path"
)
parser.add_argument("--bert", type=str, default="./resources/robert", help="resources for Bert tokenizer")
parser.add_argument("--save_path", type=str, default="./resources/ref.txt", help="path to save res")
__A = parser.parse_args()
main(args)
| 167 | 1 |
"""simple docstring"""
import logging
import os
import sys
from dataclasses import dataclass, field
from itertools import chain
from typing import Optional, Union
import datasets
import numpy as np
import torch
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.31.0''')
_lowerCAmelCase : int = logging.getLogger(__name__)
@dataclass
class A_ :
lowerCAmelCase__ = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
lowerCAmelCase__ = field(
default=_a , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
lowerCAmelCase__ = field(
default=_a , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
lowerCAmelCase__ = field(
default=_a , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
lowerCAmelCase__ = field(
default=_a , metadata={'help': 'Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'} , )
lowerCAmelCase__ = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
lowerCAmelCase__ = field(
default=_a , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
@dataclass
class A_ :
lowerCAmelCase__ = field(default=_a , metadata={'help': 'The input training data file (a text file).'} )
lowerCAmelCase__ = field(
default=_a , metadata={'help': 'An optional input evaluation data file to evaluate the perplexity on (a text file).'} , )
lowerCAmelCase__ = field(
default=_a , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
lowerCAmelCase__ = field(
default=_a , metadata={'help': 'The number of processes to use for the preprocessing.'} , )
lowerCAmelCase__ = field(
default=_a , metadata={
'help': (
'The maximum total input sequence length after tokenization. If passed, sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
lowerCAmelCase__ = field(
default=_a , metadata={
'help': (
'Whether to pad all samples to the maximum sentence length. '
'If False, will pad the samples dynamically when batching to the maximum length in the batch. More '
'efficient on GPU but very bad for TPU.'
)
} , )
lowerCAmelCase__ = field(
default=_a , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
lowerCAmelCase__ = field(
default=_a , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
def _lowercase ( self: Tuple ):
'''simple docstring'''
if self.train_file is not None:
_lowerCamelCase : List[str] = self.train_file.split("." )[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
_lowerCamelCase : Tuple = self.validation_file.split("." )[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
@dataclass
class A_ :
lowerCAmelCase__ = 42
lowerCAmelCase__ = True
lowerCAmelCase__ = None
lowerCAmelCase__ = None
def __call__( self: Any ,__lowerCAmelCase: List[str] ):
'''simple docstring'''
_lowerCamelCase : Optional[Any] = "label" if "label" in features[0].keys() else "labels"
_lowerCamelCase : List[Any] = [feature.pop(__lowerCAmelCase ) for feature in features]
_lowerCamelCase : Optional[Any] = len(__lowerCAmelCase )
_lowerCamelCase : str = len(features[0]["input_ids"] )
_lowerCamelCase : Any = [
[{k: v[i] for k, v in feature.items()} for i in range(__lowerCAmelCase )] for feature in features
]
_lowerCamelCase : List[str] = list(chain(*__lowerCAmelCase ) )
_lowerCamelCase : Optional[Any] = self.tokenizer.pad(
__lowerCAmelCase ,padding=self.padding ,max_length=self.max_length ,pad_to_multiple_of=self.pad_to_multiple_of ,return_tensors="pt" ,)
# Un-flatten
_lowerCamelCase : List[str] = {k: v.view(__lowerCAmelCase ,__lowerCAmelCase ,-1 ) for k, v in batch.items()}
# Add back labels
_lowerCamelCase : List[Any] = torch.tensor(__lowerCAmelCase ,dtype=torch.intaa )
return batch
def lowerCamelCase_( ) -> Tuple:
'''simple docstring'''
_lowerCamelCase : Optional[int] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase : str = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase : int = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_swag" , _lowerCamelCase , _lowerCamelCase )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
_lowerCamelCase : Optional[int] = training_args.get_process_log_level()
logger.setLevel(_lowerCamelCase )
datasets.utils.logging.set_verbosity(_lowerCamelCase )
transformers.utils.logging.set_verbosity(_lowerCamelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(F"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
_lowerCamelCase : str = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_lowerCamelCase : List[Any] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
"Use --overwrite_output_dir to overcome." )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.train_file is not None or data_args.validation_file is not None:
_lowerCamelCase : str = {}
if data_args.train_file is not None:
_lowerCamelCase : List[Any] = data_args.train_file
if data_args.validation_file is not None:
_lowerCamelCase : Union[str, Any] = data_args.validation_file
_lowerCamelCase : Optional[Any] = data_args.train_file.split("." )[-1]
_lowerCamelCase : List[Any] = load_dataset(
_lowerCamelCase , data_files=_lowerCamelCase , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
# Downloading and loading the swag dataset from the hub.
_lowerCamelCase : Optional[Any] = load_dataset(
"swag" , "regular" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_lowerCamelCase : Any = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
_lowerCamelCase : Union[str, Any] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
_lowerCamelCase : Union[str, Any] = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=_lowerCamelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# When using your own dataset or a different dataset from swag, you will probably need to change this.
_lowerCamelCase : str = [F"""ending{i}""" for i in range(4 )]
_lowerCamelCase : Any = "sent1"
_lowerCamelCase : int = "sent2"
if data_args.max_seq_length is None:
_lowerCamelCase : Union[str, Any] = tokenizer.model_max_length
if max_seq_length > 1024:
logger.warning(
"The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value"
" of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can"
" override this default with `--block_size xxx`." )
_lowerCamelCase : Optional[Any] = 1024
else:
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F"""The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"""
F"""model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.""" )
_lowerCamelCase : Tuple = min(data_args.max_seq_length , tokenizer.model_max_length )
# Preprocessing the datasets.
def preprocess_function(_lowerCamelCase ):
_lowerCamelCase : List[Any] = [[context] * 4 for context in examples[context_name]]
_lowerCamelCase : str = examples[question_header_name]
_lowerCamelCase : List[Any] = [
[F"""{header} {examples[end][i]}""" for end in ending_names] for i, header in enumerate(_lowerCamelCase )
]
# Flatten out
_lowerCamelCase : Any = list(chain(*_lowerCamelCase ) )
_lowerCamelCase : List[str] = list(chain(*_lowerCamelCase ) )
# Tokenize
_lowerCamelCase : Optional[Any] = tokenizer(
_lowerCamelCase , _lowerCamelCase , truncation=_lowerCamelCase , max_length=_lowerCamelCase , padding="max_length" if data_args.pad_to_max_length else False , )
# Un-flatten
return {k: [v[i : i + 4] for i in range(0 , len(_lowerCamelCase ) , 4 )] for k, v in tokenized_examples.items()}
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError("--do_train requires a train dataset" )
_lowerCamelCase : str = raw_datasets["train"]
if data_args.max_train_samples is not None:
_lowerCamelCase : str = min(len(_lowerCamelCase ) , data_args.max_train_samples )
_lowerCamelCase : List[str] = train_dataset.select(range(_lowerCamelCase ) )
with training_args.main_process_first(desc="train dataset map pre-processing" ):
_lowerCamelCase : str = train_dataset.map(
_lowerCamelCase , batched=_lowerCamelCase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
if training_args.do_eval:
if "validation" not in raw_datasets:
raise ValueError("--do_eval requires a validation dataset" )
_lowerCamelCase : str = raw_datasets["validation"]
if data_args.max_eval_samples is not None:
_lowerCamelCase : int = min(len(_lowerCamelCase ) , data_args.max_eval_samples )
_lowerCamelCase : int = eval_dataset.select(range(_lowerCamelCase ) )
with training_args.main_process_first(desc="validation dataset map pre-processing" ):
_lowerCamelCase : Optional[Any] = eval_dataset.map(
_lowerCamelCase , batched=_lowerCamelCase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
# Data collator
_lowerCamelCase : List[str] = (
default_data_collator
if data_args.pad_to_max_length
else DataCollatorForMultipleChoice(tokenizer=_lowerCamelCase , pad_to_multiple_of=8 if training_args.fpaa else None )
)
# Metric
def compute_metrics(_lowerCamelCase ):
_lowerCamelCase, _lowerCamelCase : Optional[int] = eval_predictions
_lowerCamelCase : List[Any] = np.argmax(_lowerCamelCase , axis=1 )
return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()}
# Initialize our Trainer
_lowerCamelCase : Tuple = Trainer(
model=_lowerCamelCase , args=_lowerCamelCase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=_lowerCamelCase , data_collator=_lowerCamelCase , compute_metrics=_lowerCamelCase , )
# Training
if training_args.do_train:
_lowerCamelCase : Optional[Any] = None
if training_args.resume_from_checkpoint is not None:
_lowerCamelCase : List[Any] = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
_lowerCamelCase : Tuple = last_checkpoint
_lowerCamelCase : Optional[int] = trainer.train(resume_from_checkpoint=_lowerCamelCase )
trainer.save_model() # Saves the tokenizer too for easy upload
_lowerCamelCase : Optional[Any] = train_result.metrics
_lowerCamelCase : List[str] = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(_lowerCamelCase )
)
_lowerCamelCase : Optional[Any] = min(_lowerCamelCase , len(_lowerCamelCase ) )
trainer.log_metrics("train" , _lowerCamelCase )
trainer.save_metrics("train" , _lowerCamelCase )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("*** Evaluate ***" )
_lowerCamelCase : int = trainer.evaluate()
_lowerCamelCase : List[Any] = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(_lowerCamelCase )
_lowerCamelCase : List[Any] = min(_lowerCamelCase , len(_lowerCamelCase ) )
trainer.log_metrics("eval" , _lowerCamelCase )
trainer.save_metrics("eval" , _lowerCamelCase )
_lowerCamelCase : Union[str, Any] = {
"finetuned_from": model_args.model_name_or_path,
"tasks": "multiple-choice",
"dataset_tags": "swag",
"dataset_args": "regular",
"dataset": "SWAG",
"language": "en",
}
if training_args.push_to_hub:
trainer.push_to_hub(**_lowerCamelCase )
else:
trainer.create_model_card(**_lowerCamelCase )
def lowerCamelCase_( _lowerCamelCase ) -> Dict:
'''simple docstring'''
main()
if __name__ == "__main__":
main() | 46 |
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {"vocab_file": "vocab.json", "merges_file": "merges.txt"}
# See all BART models at https://huggingface.co/models?filter=bart
_snake_case = {
"vocab_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/vocab.json",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/vocab.json",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json",
},
"merges_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/merges.txt",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/merges.txt",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt",
},
}
_snake_case = {
"facebook/bart-base": 1024,
"facebook/bart-large": 1024,
"facebook/bart-large-mnli": 1024,
"facebook/bart-large-cnn": 1024,
"facebook/bart-large-xsum": 1024,
"yjernite/bart_eli5": 1024,
}
@lru_cache()
def A ( ):
'''simple docstring'''
_lowerCAmelCase : int = (
list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) )
)
_lowerCAmelCase : str = bs[:]
_lowerCAmelCase : Optional[int] = 0
for b in range(2**8 ):
if b not in bs:
bs.append(_lowerCamelCase )
cs.append(2**8 + n )
n += 1
_lowerCAmelCase : Optional[Any] = [chr(_lowerCamelCase ) for n in cs]
return dict(zip(_lowerCamelCase , _lowerCamelCase ) )
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = set()
_lowerCAmelCase : int = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_lowerCAmelCase : Any = char
return pairs
class UpperCAmelCase_ ( a):
lowerCamelCase__ = VOCAB_FILES_NAMES
lowerCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ = ['input_ids', 'attention_mask']
def __init__( self, __a, __a, __a="replace", __a="<s>", __a="</s>", __a="</s>", __a="<s>", __a="<unk>", __a="<pad>", __a="<mask>", __a=False, **__a, ):
'''simple docstring'''
_lowerCAmelCase : int = AddedToken(__a, lstrip=__a, rstrip=__a) if isinstance(__a, __a) else bos_token
_lowerCAmelCase : List[str] = AddedToken(__a, lstrip=__a, rstrip=__a) if isinstance(__a, __a) else eos_token
_lowerCAmelCase : str = AddedToken(__a, lstrip=__a, rstrip=__a) if isinstance(__a, __a) else sep_token
_lowerCAmelCase : Tuple = AddedToken(__a, lstrip=__a, rstrip=__a) if isinstance(__a, __a) else cls_token
_lowerCAmelCase : List[str] = AddedToken(__a, lstrip=__a, rstrip=__a) if isinstance(__a, __a) else unk_token
_lowerCAmelCase : Tuple = AddedToken(__a, lstrip=__a, rstrip=__a) if isinstance(__a, __a) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
_lowerCAmelCase : str = AddedToken(__a, lstrip=__a, rstrip=__a) if isinstance(__a, __a) else mask_token
super().__init__(
errors=__a, bos_token=__a, eos_token=__a, unk_token=__a, sep_token=__a, cls_token=__a, pad_token=__a, mask_token=__a, add_prefix_space=__a, **__a, )
with open(__a, encoding="utf-8") as vocab_handle:
_lowerCAmelCase : str = json.load(__a)
_lowerCAmelCase : Union[str, Any] = {v: k for k, v in self.encoder.items()}
_lowerCAmelCase : Any = errors # how to handle errors in decoding
_lowerCAmelCase : str = bytes_to_unicode()
_lowerCAmelCase : List[str] = {v: k for k, v in self.byte_encoder.items()}
with open(__a, encoding="utf-8") as merges_handle:
_lowerCAmelCase : int = merges_handle.read().split("\n")[1:-1]
_lowerCAmelCase : Union[str, Any] = [tuple(merge.split()) for merge in bpe_merges]
_lowerCAmelCase : List[Any] = dict(zip(__a, range(len(__a))))
_lowerCAmelCase : Dict = {}
_lowerCAmelCase : List[Any] = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
_lowerCAmelCase : Any = re.compile(R"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+")
@property
def snake_case__ ( self):
'''simple docstring'''
return len(self.encoder)
def snake_case__ ( self):
'''simple docstring'''
return dict(self.encoder, **self.added_tokens_encoder)
def snake_case__ ( self, __a):
'''simple docstring'''
if token in self.cache:
return self.cache[token]
_lowerCAmelCase : List[Any] = tuple(__a)
_lowerCAmelCase : int = get_pairs(__a)
if not pairs:
return token
while True:
_lowerCAmelCase : List[Any] = min(__a, key=lambda __a: self.bpe_ranks.get(__a, float("inf")))
if bigram not in self.bpe_ranks:
break
_lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = bigram
_lowerCAmelCase : List[str] = []
_lowerCAmelCase : int = 0
while i < len(__a):
try:
_lowerCAmelCase : Union[str, Any] = word.index(__a, __a)
except ValueError:
new_word.extend(word[i:])
break
else:
new_word.extend(word[i:j])
_lowerCAmelCase : List[str] = j
if word[i] == first and i < len(__a) - 1 and word[i + 1] == second:
new_word.append(first + second)
i += 2
else:
new_word.append(word[i])
i += 1
_lowerCAmelCase : Union[str, Any] = tuple(__a)
_lowerCAmelCase : List[str] = new_word
if len(__a) == 1:
break
else:
_lowerCAmelCase : Any = get_pairs(__a)
_lowerCAmelCase : str = " ".join(__a)
_lowerCAmelCase : Tuple = word
return word
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : List[str] = []
for token in re.findall(self.pat, __a):
_lowerCAmelCase : int = "".join(
self.byte_encoder[b] for b in token.encode("utf-8")) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(__a).split(" "))
return bpe_tokens
def snake_case__ ( self, __a):
'''simple docstring'''
return self.encoder.get(__a, self.encoder.get(self.unk_token))
def snake_case__ ( self, __a):
'''simple docstring'''
return self.decoder.get(__a)
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : int = "".join(__a)
_lowerCAmelCase : Any = bytearray([self.byte_decoder[c] for c in text]).decode("utf-8", errors=self.errors)
return text
def snake_case__ ( self, __a, __a = None):
'''simple docstring'''
if not os.path.isdir(__a):
logger.error(f"Vocabulary path ({save_directory}) should be a directory")
return
_lowerCAmelCase : List[Any] = os.path.join(
__a, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"])
_lowerCAmelCase : Any = os.path.join(
__a, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"])
with open(__a, "w", encoding="utf-8") as f:
f.write(json.dumps(self.encoder, indent=2, sort_keys=__a, ensure_ascii=__a) + "\n")
_lowerCAmelCase : Tuple = 0
with open(__a, "w", encoding="utf-8") as writer:
writer.write("#version: 0.2\n")
for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda __a: kv[1]):
if index != token_index:
logger.warning(
f"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
" Please check that the tokenizer is not corrupted!")
_lowerCAmelCase : Any = token_index
writer.write(" ".join(__a) + "\n")
index += 1
return vocab_file, merge_file
def snake_case__ ( self, __a, __a = None):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_lowerCAmelCase : Dict = [self.cls_token_id]
_lowerCAmelCase : List[Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def snake_case__ ( self, __a, __a = None, __a = False):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__a, token_ids_a=__a, already_has_special_tokens=__a)
if token_ids_a is None:
return [1] + ([0] * len(__a)) + [1]
return [1] + ([0] * len(__a)) + [1, 1] + ([0] * len(__a)) + [1]
def snake_case__ ( self, __a, __a = None):
'''simple docstring'''
_lowerCAmelCase : Any = [self.sep_token_id]
_lowerCAmelCase : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
def snake_case__ ( self, __a, __a=False, **__a):
'''simple docstring'''
_lowerCAmelCase : str = kwargs.pop("add_prefix_space", self.add_prefix_space)
if (is_split_into_words or add_prefix_space) and (len(__a) > 0 and not text[0].isspace()):
_lowerCAmelCase : int = " " + text
return (text, kwargs)
| 500 | 0 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_snake_case = {
'''configuration_mgp_str''': ['''MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MgpstrConfig'''],
'''processing_mgp_str''': ['''MgpstrProcessor'''],
'''tokenization_mgp_str''': ['''MgpstrTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
'''MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MgpstrModel''',
'''MgpstrPreTrainedModel''',
'''MgpstrForSceneTextRecognition''',
]
if TYPE_CHECKING:
from .configuration_mgp_str import MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP, MgpstrConfig
from .processing_mgp_str import MgpstrProcessor
from .tokenization_mgp_str import MgpstrTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mgp_str import (
MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST,
MgpstrForSceneTextRecognition,
MgpstrModel,
MgpstrPreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 231 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
'''distilbert-base-uncased''': '''https://huggingface.co/distilbert-base-uncased/resolve/main/config.json''',
'''distilbert-base-uncased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/config.json'''
),
'''distilbert-base-cased''': '''https://huggingface.co/distilbert-base-cased/resolve/main/config.json''',
'''distilbert-base-cased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/config.json'''
),
'''distilbert-base-german-cased''': '''https://huggingface.co/distilbert-base-german-cased/resolve/main/config.json''',
'''distilbert-base-multilingual-cased''': (
'''https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/config.json'''
),
'''distilbert-base-uncased-finetuned-sst-2-english''': (
'''https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english/resolve/main/config.json'''
),
}
class UpperCAmelCase_ ( UpperCamelCase ):
'''simple docstring'''
__A : Union[str, Any] = "distilbert"
__A : Tuple = {
"hidden_size": "dim",
"num_attention_heads": "n_heads",
"num_hidden_layers": "n_layers",
}
def __init__( self , __A=3_0522 , __A=512 , __A=False , __A=6 , __A=12 , __A=768 , __A=4 * 768 , __A=0.1 , __A=0.1 , __A="gelu" , __A=0.02 , __A=0.1 , __A=0.2 , __A=0 , **__A , ):
"""simple docstring"""
lowerCamelCase : Optional[Any] = vocab_size
lowerCamelCase : Union[str, Any] = max_position_embeddings
lowerCamelCase : int = sinusoidal_pos_embds
lowerCamelCase : int = n_layers
lowerCamelCase : str = n_heads
lowerCamelCase : Optional[Any] = dim
lowerCamelCase : int = hidden_dim
lowerCamelCase : Optional[int] = dropout
lowerCamelCase : Optional[int] = attention_dropout
lowerCamelCase : List[str] = activation
lowerCamelCase : Optional[Any] = initializer_range
lowerCamelCase : Tuple = qa_dropout
lowerCamelCase : Optional[int] = seq_classif_dropout
super().__init__(**__A , pad_token_id=__A )
class UpperCAmelCase_ ( UpperCamelCase ):
'''simple docstring'''
@property
def _snake_case ( self ):
"""simple docstring"""
if self.task == "multiple-choice":
lowerCamelCase : Any = {0: "batch", 1: "choice", 2: "sequence"}
else:
lowerCamelCase : Dict = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 231 | 1 |
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
A__ : Union[str, Any] = subprocess.check_output("""git merge-base main HEAD""".split()).decode("""utf-8""")
A__ : int = subprocess.check_output(f"""git diff --name-only {fork_point_sha}""".split()).decode("""utf-8""").split()
A__ : Tuple = """|""".join(sys.argv[1:])
A__ : Tuple = re.compile(rf"""^({joined_dirs}).*?\.py$""")
A__ : List[Any] = [x for x in modified_files if regex.match(x)]
print(""" """.join(relevant_modified_files), end="""""")
| 233 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A__ : Dict = {
"""configuration_blip_2""": [
"""BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""Blip2Config""",
"""Blip2QFormerConfig""",
"""Blip2VisionConfig""",
],
"""processing_blip_2""": ["""Blip2Processor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : int = [
"""BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Blip2Model""",
"""Blip2QFormerModel""",
"""Blip2PreTrainedModel""",
"""Blip2ForConditionalGeneration""",
"""Blip2VisionModel""",
]
if TYPE_CHECKING:
from .configuration_blip_a import (
BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlipaConfig,
BlipaQFormerConfig,
BlipaVisionConfig,
)
from .processing_blip_a import BlipaProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip_a import (
BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipaForConditionalGeneration,
BlipaModel,
BlipaPreTrainedModel,
BlipaQFormerModel,
BlipaVisionModel,
)
else:
import sys
A__ : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 233 | 1 |
import inspect
import unittest
from transformers import SegformerConfig, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_MAPPING,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerModel,
)
from transformers.models.segformer.modeling_segformer import SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import SegformerImageProcessor
class _A ( __lowercase ):
def UpperCAmelCase ( self ):
_UpperCAmelCase = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , """hidden_sizes""" ) )
self.parent.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , """num_attention_heads""" ) )
self.parent.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , """num_encoder_blocks""" ) )
class _A :
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=13 , _SCREAMING_SNAKE_CASE=64 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=[2, 2, 2, 2] , _SCREAMING_SNAKE_CASE=[8, 4, 2, 1] , _SCREAMING_SNAKE_CASE=[16, 32, 64, 128] , _SCREAMING_SNAKE_CASE=[1, 4, 8, 16] , _SCREAMING_SNAKE_CASE=[1, 2, 4, 8] , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=None , ):
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = image_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = num_encoder_blocks
_UpperCAmelCase = sr_ratios
_UpperCAmelCase = depths
_UpperCAmelCase = hidden_sizes
_UpperCAmelCase = downsampling_rates
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = is_training
_UpperCAmelCase = use_labels
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = initializer_range
_UpperCAmelCase = num_labels
_UpperCAmelCase = scope
def UpperCAmelCase ( self ):
_UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
_UpperCAmelCase = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase ( self ):
return SegformerConfig(
image_size=self.image_size , num_channels=self.num_channels , num_encoder_blocks=self.num_encoder_blocks , depths=self.depths , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = SegformerModel(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
_UpperCAmelCase = model(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = _UpperCAmelCase = self.image_size // (self.downsampling_rates[-1] * 2)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], expected_height, expected_width) )
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = SegformerForSemanticSegmentation(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
_UpperCAmelCase = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
_UpperCAmelCase = model(_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
self.parent.assertGreater(result.loss , 0.0 )
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = 1
_UpperCAmelCase = SegformerForSemanticSegmentation(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
_UpperCAmelCase = torch.randint(0 , 1 , (self.batch_size, self.image_size, self.image_size) ).to(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = model(_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
self.parent.assertGreater(result.loss , 0.0 )
def UpperCAmelCase ( self ):
_UpperCAmelCase = self.prepare_config_and_inputs()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = config_and_inputs
_UpperCAmelCase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _A ( __lowercase , __lowercase , unittest.TestCase ):
__a = (
(
SegformerModel,
SegformerForSemanticSegmentation,
SegformerForImageClassification,
)
if is_torch_available()
else ()
)
__a = (
{
"""feature-extraction""": SegformerModel,
"""image-classification""": SegformerForImageClassification,
"""image-segmentation""": SegformerForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__a = True
__a = False
__a = False
__a = False
def UpperCAmelCase ( self ):
_UpperCAmelCase = SegformerModelTester(self )
_UpperCAmelCase = SegformerConfigTester(self , config_class=_SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self ):
self.config_tester.run_common_tests()
def UpperCAmelCase ( self ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_binary_image_segmentation(*_SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_segmentation(*_SCREAMING_SNAKE_CASE )
@unittest.skip("""SegFormer does not use inputs_embeds""" )
def UpperCAmelCase ( self ):
pass
@unittest.skip("""SegFormer does not have get_input_embeddings method and get_output_embeddings methods""" )
def UpperCAmelCase ( self ):
pass
def UpperCAmelCase ( self ):
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCAmelCase = [*signature.parameters.keys()]
_UpperCAmelCase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self ):
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase = True
for model_class in self.all_model_classes:
_UpperCAmelCase = True
_UpperCAmelCase = False
_UpperCAmelCase = True
_UpperCAmelCase = model_class(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
_UpperCAmelCase = model(**self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
_UpperCAmelCase = outputs.attentions
_UpperCAmelCase = sum(self.model_tester.depths )
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
_UpperCAmelCase = True
_UpperCAmelCase = model_class(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
_UpperCAmelCase = model(**self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
_UpperCAmelCase = outputs.attentions
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
# verify the first attentions (first block, first layer)
_UpperCAmelCase = (self.model_tester.image_size // 4) ** 2
_UpperCAmelCase = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
# verify the last attentions (last block, last layer)
_UpperCAmelCase = (self.model_tester.image_size // 32) ** 2
_UpperCAmelCase = (self.model_tester.image_size // (32 * self.model_tester.sr_ratios[-1])) ** 2
self.assertListEqual(
list(attentions[-1].shape[-3:] ) , [self.model_tester.num_attention_heads[-1], expected_seq_len, expected_reduced_seq_len] , )
_UpperCAmelCase = len(_SCREAMING_SNAKE_CASE )
# Check attention is always last and order is fine
_UpperCAmelCase = True
_UpperCAmelCase = True
_UpperCAmelCase = model_class(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
_UpperCAmelCase = model(**self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
self.assertEqual(out_len + 1 , len(_SCREAMING_SNAKE_CASE ) )
_UpperCAmelCase = outputs.attentions
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
# verify the first attentions (first block, first layer)
_UpperCAmelCase = (self.model_tester.image_size // 4) ** 2
_UpperCAmelCase = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
def UpperCAmelCase ( self ):
def check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = model_class(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
_UpperCAmelCase = model(**self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
_UpperCAmelCase = outputs.hidden_states
_UpperCAmelCase = self.model_tester.num_encoder_blocks
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.hidden_sizes[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = True
check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_UpperCAmelCase = True
check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self ):
if not self.model_tester.is_training:
return
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase = True
for model_class in self.all_model_classes:
if model_class in get_values(_SCREAMING_SNAKE_CASE ):
continue
_UpperCAmelCase = model_class(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.train()
_UpperCAmelCase = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , return_labels=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = model(**_SCREAMING_SNAKE_CASE ).loss
loss.backward()
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def UpperCAmelCase ( self ):
pass
@slow
def UpperCAmelCase ( self ):
for model_name in SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase = SegformerModel.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( ) -> int:
_UpperCAmelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
class _A ( unittest.TestCase ):
@slow
def UpperCAmelCase ( self ):
# only resize + normalize
_UpperCAmelCase = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=_SCREAMING_SNAKE_CASE , align=_SCREAMING_SNAKE_CASE , do_random_crop=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = SegformerForSemanticSegmentation.from_pretrained("""nvidia/segformer-b0-finetuned-ade-512-512""" ).to(
_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = prepare_img()
_UpperCAmelCase = image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors="""pt""" )
_UpperCAmelCase = encoded_inputs.pixel_values.to(_SCREAMING_SNAKE_CASE )
with torch.no_grad():
_UpperCAmelCase = model(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = torch.Size((1, model.config.num_labels, 128, 128) )
self.assertEqual(outputs.logits.shape , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = torch.tensor(
[
[[-4.6310, -5.5232, -6.2356], [-5.1921, -6.1444, -6.5996], [-5.4424, -6.2790, -6.7574]],
[[-12.1391, -13.3122, -13.9554], [-12.8732, -13.9352, -14.3563], [-12.9438, -13.8226, -14.2513]],
[[-12.5134, -13.4686, -14.4915], [-12.8669, -14.4343, -14.7758], [-13.2523, -14.5819, -15.0694]],
] ).to(_SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , _SCREAMING_SNAKE_CASE , atol=1e-4 ) )
@slow
def UpperCAmelCase ( self ):
# only resize + normalize
_UpperCAmelCase = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=_SCREAMING_SNAKE_CASE , align=_SCREAMING_SNAKE_CASE , do_random_crop=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = SegformerForSemanticSegmentation.from_pretrained(
"""nvidia/segformer-b1-finetuned-cityscapes-1024-1024""" ).to(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = prepare_img()
_UpperCAmelCase = image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors="""pt""" )
_UpperCAmelCase = encoded_inputs.pixel_values.to(_SCREAMING_SNAKE_CASE )
with torch.no_grad():
_UpperCAmelCase = model(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = torch.Size((1, model.config.num_labels, 128, 128) )
self.assertEqual(outputs.logits.shape , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = torch.tensor(
[
[[-13.5748, -13.9111, -12.6500], [-14.3500, -15.3683, -14.2328], [-14.7532, -16.0424, -15.6087]],
[[-17.1651, -15.8725, -12.9653], [-17.2580, -17.3718, -14.8223], [-16.6058, -16.8783, -16.7452]],
[[-3.6456, -3.0209, -1.4203], [-3.0797, -3.1959, -2.0000], [-1.8757, -1.9217, -1.6997]],
] ).to(_SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , _SCREAMING_SNAKE_CASE , atol=1e-1 ) )
@slow
def UpperCAmelCase ( self ):
# only resize + normalize
_UpperCAmelCase = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=_SCREAMING_SNAKE_CASE , align=_SCREAMING_SNAKE_CASE , do_random_crop=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = SegformerForSemanticSegmentation.from_pretrained("""nvidia/segformer-b0-finetuned-ade-512-512""" ).to(
_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = prepare_img()
_UpperCAmelCase = image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors="""pt""" )
_UpperCAmelCase = encoded_inputs.pixel_values.to(_SCREAMING_SNAKE_CASE )
with torch.no_grad():
_UpperCAmelCase = model(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = outputs.logits.detach().cpu()
_UpperCAmelCase = image_processor.post_process_semantic_segmentation(outputs=_SCREAMING_SNAKE_CASE , target_sizes=[(500, 300)] )
_UpperCAmelCase = torch.Size((500, 300) )
self.assertEqual(segmentation[0].shape , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = image_processor.post_process_semantic_segmentation(outputs=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = torch.Size((128, 128) )
self.assertEqual(segmentation[0].shape , _SCREAMING_SNAKE_CASE ) | 175 |
from __future__ import annotations
def _SCREAMING_SNAKE_CASE ( snake_case ) -> list[int]:
if len(snake_case ) == 0:
return array
_UpperCAmelCase , _UpperCAmelCase = min(snake_case ), max(snake_case )
# Compute the variables
_UpperCAmelCase = _max - _min + 1
_UpperCAmelCase , _UpperCAmelCase = [0] * holes_range, [0] * holes_range
# Make the sorting.
for i in array:
_UpperCAmelCase = i - _min
_UpperCAmelCase = i
holes_repeat[index] += 1
# Makes the array back by replacing the numbers.
_UpperCAmelCase = 0
for i in range(snake_case ):
while holes_repeat[i] > 0:
_UpperCAmelCase = holes[i]
index += 1
holes_repeat[i] -= 1
# Returns the sorted array.
return array
if __name__ == "__main__":
import doctest
doctest.testmod()
a = input("Enter numbers separated by comma:\n")
a = [int(x) for x in user_input.split(",")]
print(pigeon_sort(unsorted)) | 175 | 1 |
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import load_iris
from sklearn.metrics import ConfusionMatrixDisplay
from sklearn.model_selection import train_test_split
from xgboost import XGBClassifier
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> str:
'''simple docstring'''
return (data["data"], data["target"])
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> Optional[int]:
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = XGBClassifier()
classifier.fit(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return classifier
def __SCREAMING_SNAKE_CASE ( ) -> Tuple:
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = load_iris()
__UpperCAmelCase : List[Any] = data_handling(SCREAMING_SNAKE_CASE__ )
__UpperCAmelCase : List[Any] = train_test_split(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , test_size=0.2_5 )
__UpperCAmelCase : Union[str, Any] = iris['target_names']
# Create an XGBoost Classifier from the training data
__UpperCAmelCase : Optional[Any] = xgboost(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Display the confusion matrix of the classifier with both training and test sets
ConfusionMatrixDisplay.from_estimator(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , display_labels=SCREAMING_SNAKE_CASE__ , cmap='''Blues''' , normalize='''true''' , )
plt.title('''Normalized Confusion Matrix - IRIS Dataset''' )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 462 |
'''simple docstring'''
import json
import os
import tempfile
from unittest.mock import patch
import torch
from torch.utils.data import DataLoader, TensorDataset
from accelerate import DistributedType, infer_auto_device_map, init_empty_weights
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState, PartialState
from accelerate.test_utils import require_bnb, require_multi_gpu, slow
from accelerate.test_utils.testing import AccelerateTestCase, require_cuda
from accelerate.utils import patch_environment
def lowerCAmelCase__ ( ):
__a : Any = torch.nn.Linear(2 , 4 )
__a : int = torch.optim.AdamW(model.parameters() , lr=1.0 )
__a : Tuple = torch.optim.lr_scheduler.OneCycleLR(SCREAMING_SNAKE_CASE__ , max_lr=0.01 , steps_per_epoch=2 , epochs=1 )
__a : Union[str, Any] = DataLoader(TensorDataset(torch.tensor([1, 2, 3] ) ) )
__a : str = DataLoader(TensorDataset(torch.tensor([4, 5, 6] ) ) )
return model, optimizer, scheduler, train_dl, valid_dl
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE__ ):
return (model.weight.abs().sum() + model.bias.abs().sum()).item()
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE__ ):
__a : List[Any] = torch.nn.Linear(*tuple(model.weight.T.shape ) ).state_dict()
model.load_state_dict(SCREAMING_SNAKE_CASE__ )
class lowerCAmelCase ( lowerCamelCase__ ):
"""simple docstring"""
@require_cuda
def __magic_name__ ( self ) -> Optional[Any]:
__a : Optional[Any] = Accelerator()
assert PartialState._shared_state["_cpu"] is False
assert PartialState._shared_state["device"].type == "cuda"
with self.assertRaises(_A ):
__a : Optional[Any] = Accelerator(cpu=_A )
def __magic_name__ ( self ) -> Union[str, Any]:
__a : Optional[Any] = Accelerator()
__a : List[str] = GradientState()
assert state.num_steps == 1
__a : Tuple = 4
assert state.num_steps == 4
assert state.sync_gradients is True
__a : List[str] = False
assert state.sync_gradients is False
GradientState._reset_state()
def __magic_name__ ( self ) -> Optional[int]:
__a : Any = Accelerator()
__a , __a , __a , __a , __a : Optional[int] = create_components()
(
(
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) ,
) : Optional[Any] = accelerator.prepare(_A , _A , _A , _A , _A )
self.assertTrue(prepared_model in accelerator._models )
self.assertTrue(prepared_optimizer in accelerator._optimizers )
self.assertTrue(prepared_scheduler in accelerator._schedulers )
self.assertTrue(prepared_train_dl in accelerator._dataloaders )
self.assertTrue(prepared_valid_dl in accelerator._dataloaders )
def __magic_name__ ( self ) -> str:
__a : Union[str, Any] = Accelerator()
__a , __a , __a , __a , __a : List[Any] = create_components()
accelerator.prepare(_A , _A , _A , _A , _A )
accelerator.free_memory()
self.assertTrue(len(accelerator._models ) == 0 )
self.assertTrue(len(accelerator._optimizers ) == 0 )
self.assertTrue(len(accelerator._schedulers ) == 0 )
self.assertTrue(len(accelerator._dataloaders ) == 0 )
def __magic_name__ ( self ) -> Optional[int]:
PartialState._reset_state()
# Mock torch.cuda.set_device to avoid an exception as the device doesn't exist
def noop(*_A , **_A ):
pass
with patch('torch.cuda.set_device' , _A ), patch_environment(ACCELERATE_TORCH_DEVICE='cuda:64' ):
__a : Optional[Any] = Accelerator()
self.assertEqual(str(accelerator.state.device ) , 'cuda:64' )
def __magic_name__ ( self ) -> Tuple:
__a : Optional[int] = Accelerator()
__a , __a , __a , __a , __a : str = create_components()
accelerator.prepare(_A , _A , _A , _A , _A )
__a : str = get_signature(_A )
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(_A )
# make sure random weights don't match
load_random_weights(_A )
self.assertTrue(abs(model_signature - get_signature(_A ) ) > 1E-3 )
# make sure loaded weights match
accelerator.load_state(_A )
self.assertTrue(abs(model_signature - get_signature(_A ) ) < 1E-3 )
def __magic_name__ ( self ) -> Optional[int]:
__a : List[Any] = Accelerator()
__a , __a , __a , __a , __a : Any = create_components()
accelerator.prepare(_A , _A , _A , _A , _A )
__a : Any = get_signature(_A )
# saving hook
def save_config(_A , _A , _A ):
__a : Any = {'class_name': models[0].__class__.__name__}
with open(os.path.join(_A , 'data.json' ) , 'w' ) as f:
json.dump(_A , _A )
# loading hook
def load_config(_A , _A ):
with open(os.path.join(_A , 'data.json' ) , 'r' ) as f:
__a : Tuple = json.load(_A )
__a : Union[str, Any] = config['class_name']
__a : List[Any] = accelerator.register_save_state_pre_hook(_A )
__a : str = accelerator.register_load_state_pre_hook(_A )
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(_A )
# make sure random weights don't match with hooks
load_random_weights(_A )
self.assertTrue(abs(model_signature - get_signature(_A ) ) > 1E-3 )
# random class name to verify correct one is loaded
__a : Dict = 'random'
# make sure loaded weights match with hooks
accelerator.load_state(_A )
self.assertTrue(abs(model_signature - get_signature(_A ) ) < 1E-3 )
# mode.class_name is loaded from config
self.assertTrue(model.class_name == model.__class__.__name__ )
# remove hooks
save_hook.remove()
load_hook.remove()
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(_A )
# make sure random weights don't match with hooks removed
load_random_weights(_A )
self.assertTrue(abs(model_signature - get_signature(_A ) ) > 1E-3 )
# random class name to verify correct one is loaded
__a : Union[str, Any] = 'random'
# make sure loaded weights match with hooks removed
accelerator.load_state(_A )
self.assertTrue(abs(model_signature - get_signature(_A ) ) < 1E-3 )
# mode.class_name is NOT loaded from config
self.assertTrue(model.class_name != model.__class__.__name__ )
def __magic_name__ ( self ) -> Union[str, Any]:
__a : str = Accelerator()
__a , __a , __a , __a , __a : Union[str, Any] = create_components()
__a : Tuple = None
# This should work
__a , __a , __a , __a , __a , __a : str = accelerator.prepare(
_A , _A , _A , _A , _A , _A )
self.assertTrue(dummy_obj is None )
def __magic_name__ ( self ) -> Dict:
__a : Tuple = Accelerator()
__a , __a , __a , __a , __a : List[Any] = create_components()
__a : str = [1, 2, 3]
# This should work
__a , __a , __a , __a , __a , __a : int = accelerator.prepare(
_A , _A , _A , _A , _A , _A )
self.assertEqual(
getattr(_A , '_is_accelerate_prepared' , _A ) , _A , 'Dummy object should have `_is_accelerate_prepared` set to `True`' , )
self.assertEqual(
getattr(_A , '_is_accelerate_prepared' , _A ) , _A , 'Model is missing `_is_accelerator_prepared` or is set to `False`' , )
self.assertEqual(
getattr(_A , '_is_accelerate_prepared' , _A ) , _A , 'Optimizer is missing `_is_accelerator_prepared` or is set to `False`' , )
self.assertEqual(
getattr(_A , '_is_accelerate_prepared' , _A ) , _A , 'Scheduler is missing `_is_accelerator_prepared` or is set to `False`' , )
self.assertEqual(
getattr(_A , '_is_accelerate_prepared' , _A ) , _A , 'Train Dataloader is missing `_is_accelerator_prepared` or is set to `False`' , )
self.assertEqual(
getattr(_A , '_is_accelerate_prepared' , _A ) , _A , 'Valid Dataloader is missing `_is_accelerator_prepared` or is set to `False`' , )
@slow
@require_bnb
def __magic_name__ ( self ) -> Dict:
from transformers import AutoModelForCausalLM
__a : str = AutoModelForCausalLM.from_pretrained(
'EleutherAI/gpt-neo-125m' , load_in_abit=_A , device_map={'': 0} , )
__a : Tuple = Accelerator()
# This should work
__a : int = accelerator.prepare(_A )
@slow
@require_bnb
def __magic_name__ ( self ) -> Dict:
from transformers import AutoModelForCausalLM
__a : List[Any] = Accelerator()
with init_empty_weights():
__a : str = AutoModelForCausalLM.from_pretrained(
'EleutherAI/gpt-neo-125m' , )
model.tie_weights()
__a : Optional[Any] = infer_auto_device_map(_A )
__a : List[str] = 'cpu'
__a : List[Any] = AutoModelForCausalLM.from_pretrained(
'EleutherAI/gpt-neo-125m' , device_map=_A , load_in_abit=_A , llm_inta_enable_fpaa_cpu_offload=_A )
# This should not work and get value error
with self.assertRaises(_A ):
__a : int = accelerator.prepare(_A )
@slow
@require_bnb
@require_multi_gpu
def __magic_name__ ( self ) -> Any:
from transformers import AutoModelForCausalLM
__a : str = {'distributed_type': DistributedType.MULTI_GPU}
with init_empty_weights():
__a : Tuple = AutoModelForCausalLM.from_pretrained(
'EleutherAI/gpt-neo-125m' , )
model.tie_weights()
__a : Dict = infer_auto_device_map(_A )
__a : Optional[Any] = 1
__a : List[str] = AutoModelForCausalLM.from_pretrained(
'EleutherAI/gpt-neo-125m' , load_in_abit=_A , device_map=_A , )
__a : Optional[int] = Accelerator()
# This should not work and get value error
with self.assertRaises(_A ):
__a : str = accelerator.prepare(_A )
PartialState._reset_state()
@slow
@require_bnb
@require_multi_gpu
def __magic_name__ ( self ) -> int:
from transformers import AutoModelForCausalLM
with init_empty_weights():
__a : int = AutoModelForCausalLM.from_pretrained(
'EleutherAI/gpt-neo-125m' , )
__a : str = infer_auto_device_map(_A )
__a : Dict = 1
__a : int = AutoModelForCausalLM.from_pretrained(
'EleutherAI/gpt-neo-125m' , load_in_abit=_A , device_map=_A , )
__a : List[str] = Accelerator()
# This should work
__a : List[str] = accelerator.prepare(_A )
@require_cuda
def __magic_name__ ( self ) -> Dict:
__a : List[str] = torch.nn.Linear(10 , 10 )
__a : Optional[Any] = torch.optim.SGD(model.parameters() , lr=0.01 )
__a : Any = Accelerator(cpu=_A )
__a : Optional[Any] = accelerator.prepare(_A )
| 597 | 0 |
import itertools
import os
from collections import Counter, defaultdict
from concurrent.futures import ThreadPoolExecutor, as_completed
import numpy as np
import datasets
from .execute import check_correctness
_snake_case = '\\n@misc{chen2021evaluating,\n title={Evaluating Large Language Models Trained on Code},\n author={Mark Chen and Jerry Tworek and Heewoo Jun and Qiming Yuan \\nand Henrique Ponde de Oliveira Pinto and Jared Kaplan and Harri Edwards \\nand Yuri Burda and Nicholas Joseph and Greg Brockman and Alex Ray \\nand Raul Puri and Gretchen Krueger and Michael Petrov and Heidy Khlaaf \\nand Girish Sastry and Pamela Mishkin and Brooke Chan and Scott Gray \\nand Nick Ryder and Mikhail Pavlov and Alethea Power and Lukasz Kaiser \\nand Mohammad Bavarian and Clemens Winter and Philippe Tillet \\nand Felipe Petroski Such and Dave Cummings and Matthias Plappert \\nand Fotios Chantzis and Elizabeth Barnes and Ariel Herbert-Voss \\nand William Hebgen Guss and Alex Nichol and Alex Paino and Nikolas Tezak \\nand Jie Tang and Igor Babuschkin and Suchir Balaji and Shantanu Jain \\nand William Saunders and Christopher Hesse and Andrew N. Carr \\nand Jan Leike and Josh Achiam and Vedant Misra and Evan Morikawa \\nand Alec Radford and Matthew Knight and Miles Brundage and Mira Murati \\nand Katie Mayer and Peter Welinder and Bob McGrew and Dario Amodei \\nand Sam McCandlish and Ilya Sutskever and Wojciech Zaremba},\n year={2021},\n eprint={2107.03374},\n archivePrefix={arXiv},\n primaryClass={cs.LG}\n}\n'
_snake_case = '\\nThis metric implements the evaluation harness for the HumanEval problem solving dataset\ndescribed in the paper "Evaluating Large Language Models Trained on Code"\n(https://arxiv.org/abs/2107.03374).\n'
_snake_case = '\nCalculates how good are predictions given some references, using certain scores\nArgs:\n predictions: list of candidates to evaluate. Each candidates should be a list\n of strings with several code candidates to solve the problem.\n references: a list with a test for each prediction. Each test should evaluate the\n correctness of a code candidate.\n k: number of code candidates to consider in the evaluation (Default: [1, 10, 100])\n num_workers: number of workers used to evaluate the canidate programs (Default: 4).\n timeout:\nReturns:\n pass_at_k: dict with pass rates for each k\n results: dict with granular results of each unittest\nExamples:\n >>> code_eval = datasets.load_metric("code_eval")\n >>> test_cases = ["assert add(2,3)==5"]\n >>> candidates = [["def add(a,b): return a*b", "def add(a, b): return a+b"]]\n >>> pass_at_k, results = code_eval.compute(references=test_cases, predictions=candidates, k=[1, 2])\n >>> print(pass_at_k)\n {\'pass@1\': 0.5, \'pass@2\': 1.0}\n'
_snake_case = '\n################################################################################\n !!!WARNING!!!\n################################################################################\nThe "code_eval" metric executes untrusted model-generated code in Python.\nAlthough it is highly unlikely that model-generated code will do something\novertly malicious in response to this test suite, model-generated code may act\ndestructively due to a lack of model capability or alignment.\nUsers are strongly encouraged to sandbox this evaluation suite so that it\ndoes not perform destructive actions on their host or network. For more\ninformation on how OpenAI sandboxes its code, see the paper "Evaluating Large\nLanguage Models Trained on Code" (https://arxiv.org/abs/2107.03374).\n\nOnce you have read this disclaimer and taken appropriate precautions,\nset the environment variable HF_ALLOW_CODE_EVAL="1". Within Python you can to this\nwith:\n\n>>> import os\n>>> os.environ["HF_ALLOW_CODE_EVAL"] = "1"\n\n################################################################################\\n'
_snake_case = 'The MIT License\n\nCopyright (c) OpenAI (https://openai.com)\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the "Software"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\nTHE SOFTWARE.'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase_ ( datasets.Metric ):
"""simple docstring"""
def __lowercase( self ) -> Tuple:
return datasets.MetricInfo(
# This is the description that will appear on the metrics page.
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('string' ) ),
'references': datasets.Value('string' ),
} ) , homepage='https://github.com/openai/human-eval' , codebase_urls=['https://github.com/openai/human-eval'] , reference_urls=['https://github.com/openai/human-eval'] , license=_LICENSE , )
def __lowercase( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=[1, 10, 100] , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=3.0 ) -> int:
if os.getenv('HF_ALLOW_CODE_EVAL' , 0 ) != "1":
raise ValueError(_WARNING )
if os.name == "nt":
raise NotImplementedError('This metric is currently not supported on Windows.' )
with ThreadPoolExecutor(max_workers=_SCREAMING_SNAKE_CASE ) as executor:
__UpperCamelCase = []
__UpperCamelCase = Counter()
__UpperCamelCase = 0
__UpperCamelCase = defaultdict(_SCREAMING_SNAKE_CASE )
for task_id, (candidates, test_case) in enumerate(zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ):
for candidate in candidates:
__UpperCamelCase = candidate + '\n' + test_case
__UpperCamelCase = (test_program, timeout, task_id, completion_id[task_id])
__UpperCamelCase = executor.submit(_SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE )
futures.append(_SCREAMING_SNAKE_CASE )
completion_id[task_id] += 1
n_samples += 1
for future in as_completed(_SCREAMING_SNAKE_CASE ):
__UpperCamelCase = future.result()
results[result["task_id"]].append((result['completion_id'], result) )
__UpperCamelCase , __UpperCamelCase = [], []
for result in results.values():
result.sort()
__UpperCamelCase = [r[1]['passed'] for r in result]
total.append(len(_SCREAMING_SNAKE_CASE ) )
correct.append(sum(_SCREAMING_SNAKE_CASE ) )
__UpperCamelCase = np.array(_SCREAMING_SNAKE_CASE )
__UpperCamelCase = np.array(_SCREAMING_SNAKE_CASE )
__UpperCamelCase = k
__UpperCamelCase = {f"""pass@{k}""": estimate_pass_at_k(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).mean() for k in ks if (total >= k).all()}
return pass_at_k, results
def _a ( __lowercase , __lowercase , __lowercase ) -> List[Any]:
"""simple docstring"""
def estimator(__lowercase , __lowercase , __lowercase ) -> float:
if n - c < k:
return 1.0
return 1.0 - np.prod(1.0 - k / np.arange(n - c + 1 , n + 1 ) )
if isinstance(__lowercase , __lowercase ):
__UpperCamelCase = itertools.repeat(__lowercase , len(__lowercase ) )
else:
assert len(__lowercase ) == len(__lowercase )
__UpperCamelCase = iter(__lowercase )
return np.array([estimator(int(__lowercase ) , int(__lowercase ) , __lowercase ) for n, c in zip(__lowercase , __lowercase )] )
| 567 |
def _a ( __lowercase ) -> int:
"""simple docstring"""
if not isinstance(__lowercase , __lowercase ):
raise ValueError('Input must be an integer' )
if input_num <= 0:
raise ValueError('Input must be positive' )
return sum(
divisor for divisor in range(1 , input_num // 2 + 1 ) if input_num % divisor == 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 567 | 1 |
'''simple docstring'''
import re
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class SCREAMING_SNAKE_CASE ( _a ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = ["""image_processor""", """tokenizer"""]
_SCREAMING_SNAKE_CASE = """AutoImageProcessor"""
_SCREAMING_SNAKE_CASE = """AutoTokenizer"""
def __init__( self : int , UpperCamelCase__ : List[Any]=None , UpperCamelCase__ : int=None , **UpperCamelCase__ : Dict ):
"""simple docstring"""
UpperCamelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , a_ , )
UpperCamelCase = kwargs.pop('feature_extractor' )
UpperCamelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(a_ , a_ )
UpperCamelCase = self.image_processor
UpperCamelCase = False
def __call__( self : Optional[int] , *UpperCamelCase__ : Any , **UpperCamelCase__ : Optional[int] ):
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor(*a_ , **a_ )
UpperCamelCase = kwargs.pop('images' , a_ )
UpperCamelCase = kwargs.pop('text' , a_ )
if len(a_ ) > 0:
UpperCamelCase = args[0]
UpperCamelCase = args[1:]
if images is None and text is None:
raise ValueError('You need to specify either an `images` or `text` input to process.' )
if images is not None:
UpperCamelCase = self.image_processor(a_ , *a_ , **a_ )
if text is not None:
UpperCamelCase = self.tokenizer(a_ , **a_ )
if text is None:
return inputs
elif images is None:
return encodings
else:
UpperCamelCase = encodings['input_ids']
return inputs
def A ( self : List[Any] , *UpperCamelCase__ : List[Any] , **UpperCamelCase__ : List[Any] ):
"""simple docstring"""
return self.tokenizer.batch_decode(*a_ , **a_ )
def A ( self : str , *UpperCamelCase__ : Optional[int] , **UpperCamelCase__ : str ):
"""simple docstring"""
return self.tokenizer.decode(*a_ , **a_ )
@contextmanager
def A ( self : str ):
"""simple docstring"""
warnings.warn(
'`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '
'labels by using the argument `text` of the regular `__call__` method (either in the same call as '
'your images inputs, or in a separate call.' )
UpperCamelCase = True
UpperCamelCase = self.tokenizer
yield
UpperCamelCase = self.image_processor
UpperCamelCase = False
def A ( self : Dict , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[Any]=False , UpperCamelCase__ : Optional[Any]=None ):
"""simple docstring"""
if added_vocab is None:
UpperCamelCase = self.tokenizer.get_added_vocab()
UpperCamelCase = {}
while tokens:
UpperCamelCase = re.search(R'<s_(.*?)>' , a_ , re.IGNORECASE )
if start_token is None:
break
UpperCamelCase = start_token.group(1 )
UpperCamelCase = re.search(Rf"""</s_{key}>""" , a_ , re.IGNORECASE )
UpperCamelCase = start_token.group()
if end_token is None:
UpperCamelCase = tokens.replace(a_ , '' )
else:
UpperCamelCase = end_token.group()
UpperCamelCase = re.escape(a_ )
UpperCamelCase = re.escape(a_ )
UpperCamelCase = re.search(f"""{start_token_escaped}(.*?){end_token_escaped}""" , a_ , re.IGNORECASE )
if content is not None:
UpperCamelCase = content.group(1 ).strip()
if r"<s_" in content and r"</s_" in content: # non-leaf node
UpperCamelCase = self.tokenajson(a_ , is_inner_value=a_ , added_vocab=a_ )
if value:
if len(a_ ) == 1:
UpperCamelCase = value[0]
UpperCamelCase = value
else: # leaf nodes
UpperCamelCase = []
for leaf in content.split(R'<sep/>' ):
UpperCamelCase = leaf.strip()
if leaf in added_vocab and leaf[0] == "<" and leaf[-2:] == "/>":
UpperCamelCase = leaf[1:-2] # for categorical special tokens
output[key].append(a_ )
if len(output[key] ) == 1:
UpperCamelCase = output[key][0]
UpperCamelCase = tokens[tokens.find(a_ ) + len(a_ ) :].strip()
if tokens[:6] == r"<sep/>": # non-leaf nodes
return [output] + self.tokenajson(tokens[6:] , is_inner_value=a_ , added_vocab=a_ )
if len(a_ ):
return [output] if is_inner_value else output
else:
return [] if is_inner_value else {"text_sequence": tokens}
@property
def A ( self : Optional[Any] ):
"""simple docstring"""
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , a_ , )
return self.image_processor_class
@property
def A ( self : int ):
"""simple docstring"""
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , a_ , )
return self.image_processor
| 430 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase : List[str] = logging.get_logger(__name__)
_lowerCAmelCase : Tuple = {
"microsoft/wavlm-base": "https://huggingface.co/microsoft/wavlm-base/resolve/main/config.json",
# See all WavLM models at https://huggingface.co/models?filter=wavlm
}
class __snake_case ( SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE__ = 'wavlm'
def __init__( self ,a_=32 ,a_=768 ,a_=12 ,a_=12 ,a_=3072 ,a_="gelu" ,a_=0.1 ,a_=0.1 ,a_=0.1 ,a_=0.0 ,a_=0.1 ,a_=0.1 ,a_=0.02 ,a_=1e-5 ,a_="group" ,a_="gelu" ,a_=(512, 512, 512, 512, 512, 512, 512) ,a_=(5, 2, 2, 2, 2, 2, 2) ,a_=(10, 3, 3, 3, 3, 2, 2) ,a_=False ,a_=128 ,a_=16 ,a_=320 ,a_=800 ,a_=False ,a_=True ,a_=0.05 ,a_=10 ,a_=2 ,a_=0.0 ,a_=10 ,a_=320 ,a_=2 ,a_=0.1 ,a_=100 ,a_=256 ,a_=256 ,a_=0.1 ,a_="mean" ,a_=False ,a_=False ,a_=256 ,a_=(512, 512, 512, 512, 1500) ,a_=(5, 3, 3, 1, 1) ,a_=(1, 2, 3, 1, 1) ,a_=512 ,a_=80 ,a_=0 ,a_=1 ,a_=2 ,a_=False ,a_=3 ,a_=2 ,a_=3 ,a_=None ,**a_ ,):
"""simple docstring"""
super().__init__(**a_ ,pad_token_id=a_ ,bos_token_id=a_ ,eos_token_id=a_ )
lowerCAmelCase__ = hidden_size
lowerCAmelCase__ = feat_extract_norm
lowerCAmelCase__ = feat_extract_activation
lowerCAmelCase__ = list(a_ )
lowerCAmelCase__ = list(a_ )
lowerCAmelCase__ = list(a_ )
lowerCAmelCase__ = conv_bias
lowerCAmelCase__ = num_buckets
lowerCAmelCase__ = max_bucket_distance
lowerCAmelCase__ = num_conv_pos_embeddings
lowerCAmelCase__ = num_conv_pos_embedding_groups
lowerCAmelCase__ = len(self.conv_dim )
lowerCAmelCase__ = num_hidden_layers
lowerCAmelCase__ = intermediate_size
lowerCAmelCase__ = hidden_act
lowerCAmelCase__ = num_attention_heads
lowerCAmelCase__ = hidden_dropout
lowerCAmelCase__ = attention_dropout
lowerCAmelCase__ = activation_dropout
lowerCAmelCase__ = feat_proj_dropout
lowerCAmelCase__ = final_dropout
lowerCAmelCase__ = layerdrop
lowerCAmelCase__ = layer_norm_eps
lowerCAmelCase__ = initializer_range
lowerCAmelCase__ = num_ctc_classes
lowerCAmelCase__ = vocab_size
lowerCAmelCase__ = do_stable_layer_norm
lowerCAmelCase__ = use_weighted_layer_sum
lowerCAmelCase__ = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='
' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='
f' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'
f' `len(config.conv_kernel) = {len(self.conv_kernel )}`.' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowerCAmelCase__ = apply_spec_augment
lowerCAmelCase__ = mask_time_prob
lowerCAmelCase__ = mask_time_length
lowerCAmelCase__ = mask_time_min_masks
lowerCAmelCase__ = mask_feature_prob
lowerCAmelCase__ = mask_feature_length
# parameters for pretraining with codevector quantized representations
lowerCAmelCase__ = num_codevectors_per_group
lowerCAmelCase__ = num_codevector_groups
lowerCAmelCase__ = contrastive_logits_temperature
lowerCAmelCase__ = num_negatives
lowerCAmelCase__ = codevector_dim
lowerCAmelCase__ = proj_codevector_dim
lowerCAmelCase__ = diversity_loss_weight
# ctc loss
lowerCAmelCase__ = ctc_loss_reduction
lowerCAmelCase__ = ctc_zero_infinity
# adapter
lowerCAmelCase__ = add_adapter
lowerCAmelCase__ = adapter_kernel_size
lowerCAmelCase__ = adapter_stride
lowerCAmelCase__ = num_adapter_layers
lowerCAmelCase__ = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
lowerCAmelCase__ = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
lowerCAmelCase__ = list(a_ )
lowerCAmelCase__ = list(a_ )
lowerCAmelCase__ = list(a_ )
lowerCAmelCase__ = xvector_output_dim
@property
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
return functools.reduce(operator.mul ,self.conv_stride ,1 )
| 193 | 0 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_xlnet import XLNetTokenizer
else:
a_ :List[str] = None
a_ :Optional[Any] = logging.get_logger(__name__)
a_ :List[str] = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
a_ :List[str] = {
"vocab_file": {
"xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model",
"xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model",
},
"tokenizer_file": {
"xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/tokenizer.json",
"xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/tokenizer.json",
},
}
a_ :Any = {
"xlnet-base-cased": None,
"xlnet-large-cased": None,
}
a_ :Optional[int] = "▁"
# Segments (not really needed)
a_ :Dict = 0
a_ :int = 1
a_ :Dict = 2
a_ :Optional[Any] = 3
a_ :Union[str, Any] = 4
class snake_case__ ( lowerCAmelCase_ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
_SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
_SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_SCREAMING_SNAKE_CASE = """left"""
_SCREAMING_SNAKE_CASE = XLNetTokenizer
def __init__( self : List[str], _snake_case : Any=None, _snake_case : str=None, _snake_case : Union[str, Any]=False, _snake_case : Dict=True, _snake_case : int=False, _snake_case : Optional[int]="<s>", _snake_case : Union[str, Any]="</s>", _snake_case : List[str]="<unk>", _snake_case : List[Any]="<sep>", _snake_case : Optional[int]="<pad>", _snake_case : Optional[int]="<cls>", _snake_case : Optional[int]="<mask>", _snake_case : Union[str, Any]=["<eop>", "<eod>"], **_snake_case : Any, ) ->List[Any]:
# Mask token behave like a normal word, i.e. include the space before it
snake_case__ : List[Any] = AddedToken(_snake_case, lstrip=_snake_case, rstrip=_snake_case ) if isinstance(_snake_case, _snake_case ) else mask_token
super().__init__(
vocab_file=_snake_case, tokenizer_file=_snake_case, do_lower_case=_snake_case, remove_space=_snake_case, keep_accents=_snake_case, bos_token=_snake_case, eos_token=_snake_case, unk_token=_snake_case, sep_token=_snake_case, pad_token=_snake_case, cls_token=_snake_case, mask_token=_snake_case, additional_special_tokens=_snake_case, **_snake_case, )
snake_case__ : List[str] = 3
snake_case__ : List[Any] = do_lower_case
snake_case__ : Any = remove_space
snake_case__ : Any = keep_accents
snake_case__ : List[Any] = vocab_file
snake_case__ : List[str] = False if not self.vocab_file else True
def lowercase_ ( self : Union[str, Any], _snake_case : List[int], _snake_case : Optional[List[int]] = None ) ->List[int]:
snake_case__ : List[str] = [self.sep_token_id]
snake_case__ : Any = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def lowercase_ ( self : Any, _snake_case : List[int], _snake_case : Optional[List[int]] = None ) ->List[int]:
snake_case__ : str = [self.sep_token_id]
snake_case__ : Union[str, Any] = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def lowercase_ ( self : Dict, _snake_case : str, _snake_case : Optional[str] = None ) ->Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(_snake_case ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
snake_case__ : Any = os.path.join(
_snake_case, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_snake_case ):
copyfile(self.vocab_file, _snake_case )
return (out_vocab_file,)
| 243 |
import random
def lowercase_ (A : int ):
snake_case__ : List[str] = num - 1
snake_case__ : Union[str, Any] = 0
while s % 2 == 0:
snake_case__ : Any = s // 2
t += 1
for _ in range(5 ):
snake_case__ : List[Any] = random.randrange(2 , num - 1 )
snake_case__ : Tuple = pow(A , A , A )
if v != 1:
snake_case__ : str = 0
while v != (num - 1):
if i == t - 1:
return False
else:
snake_case__ : Tuple = i + 1
snake_case__ : Optional[int] = (v**2) % num
return True
def lowercase_ (A : int ):
if num < 2:
return False
snake_case__ : Dict = [
2,
3,
5,
7,
1_1,
1_3,
1_7,
1_9,
2_3,
2_9,
3_1,
3_7,
4_1,
4_3,
4_7,
5_3,
5_9,
6_1,
6_7,
7_1,
7_3,
7_9,
8_3,
8_9,
9_7,
1_0_1,
1_0_3,
1_0_7,
1_0_9,
1_1_3,
1_2_7,
1_3_1,
1_3_7,
1_3_9,
1_4_9,
1_5_1,
1_5_7,
1_6_3,
1_6_7,
1_7_3,
1_7_9,
1_8_1,
1_9_1,
1_9_3,
1_9_7,
1_9_9,
2_1_1,
2_2_3,
2_2_7,
2_2_9,
2_3_3,
2_3_9,
2_4_1,
2_5_1,
2_5_7,
2_6_3,
2_6_9,
2_7_1,
2_7_7,
2_8_1,
2_8_3,
2_9_3,
3_0_7,
3_1_1,
3_1_3,
3_1_7,
3_3_1,
3_3_7,
3_4_7,
3_4_9,
3_5_3,
3_5_9,
3_6_7,
3_7_3,
3_7_9,
3_8_3,
3_8_9,
3_9_7,
4_0_1,
4_0_9,
4_1_9,
4_2_1,
4_3_1,
4_3_3,
4_3_9,
4_4_3,
4_4_9,
4_5_7,
4_6_1,
4_6_3,
4_6_7,
4_7_9,
4_8_7,
4_9_1,
4_9_9,
5_0_3,
5_0_9,
5_2_1,
5_2_3,
5_4_1,
5_4_7,
5_5_7,
5_6_3,
5_6_9,
5_7_1,
5_7_7,
5_8_7,
5_9_3,
5_9_9,
6_0_1,
6_0_7,
6_1_3,
6_1_7,
6_1_9,
6_3_1,
6_4_1,
6_4_3,
6_4_7,
6_5_3,
6_5_9,
6_6_1,
6_7_3,
6_7_7,
6_8_3,
6_9_1,
7_0_1,
7_0_9,
7_1_9,
7_2_7,
7_3_3,
7_3_9,
7_4_3,
7_5_1,
7_5_7,
7_6_1,
7_6_9,
7_7_3,
7_8_7,
7_9_7,
8_0_9,
8_1_1,
8_2_1,
8_2_3,
8_2_7,
8_2_9,
8_3_9,
8_5_3,
8_5_7,
8_5_9,
8_6_3,
8_7_7,
8_8_1,
8_8_3,
8_8_7,
9_0_7,
9_1_1,
9_1_9,
9_2_9,
9_3_7,
9_4_1,
9_4_7,
9_5_3,
9_6_7,
9_7_1,
9_7_7,
9_8_3,
9_9_1,
9_9_7,
]
if num in low_primes:
return True
for prime in low_primes:
if (num % prime) == 0:
return False
return rabin_miller(A )
def lowercase_ (A : int = 1_0_2_4 ):
while True:
snake_case__ : List[str] = random.randrange(2 ** (keysize - 1) , 2 ** (keysize) )
if is_prime_low_num(A ):
return num
if __name__ == "__main__":
a_ :Any = generate_large_prime()
print(("Prime number:", num))
print(("is_prime_low_num:", is_prime_low_num(num)))
| 243 | 1 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, MBartConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFMBartForConditionalGeneration, TFMBartModel
@require_tf
class a__ :
'''simple docstring'''
A : Any = MBartConfig
A : Dict = {}
A : int = '''gelu'''
def __init__( self : Any , lowerCAmelCase_ : int , lowerCAmelCase_ : Tuple=13 , lowerCAmelCase_ : Any=7 , lowerCAmelCase_ : Optional[Any]=True , lowerCAmelCase_ : Any=False , lowerCAmelCase_ : Tuple=99 , lowerCAmelCase_ : List[str]=32 , lowerCAmelCase_ : Optional[Any]=2 , lowerCAmelCase_ : Dict=4 , lowerCAmelCase_ : Dict=37 , lowerCAmelCase_ : Tuple=0.1 , lowerCAmelCase_ : Dict=0.1 , lowerCAmelCase_ : List[Any]=20 , lowerCAmelCase_ : List[str]=2 , lowerCAmelCase_ : List[str]=1 , lowerCAmelCase_ : Any=0 , ) -> List[str]:
__A= parent
__A= batch_size
__A= seq_length
__A= is_training
__A= use_labels
__A= vocab_size
__A= hidden_size
__A= num_hidden_layers
__A= num_attention_heads
__A= intermediate_size
__A= hidden_dropout_prob
__A= attention_probs_dropout_prob
__A= max_position_embeddings
__A= eos_token_id
__A= pad_token_id
__A= bos_token_id
def lowerCAmelCase ( self : Optional[int] ) -> Dict:
__A= ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
__A= tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
__A= tf.concat([input_ids, eos_tensor] , axis=1 )
__A= ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__A= self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
__A= prepare_mbart_inputs_dict(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
return config, inputs_dict
def lowerCAmelCase ( self : Any , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Union[str, Any] ) -> int:
__A= TFMBartModel(config=lowerCAmelCase_ ).get_decoder()
__A= inputs_dict['input_ids']
__A= input_ids[:1, :]
__A= inputs_dict['attention_mask'][:1, :]
__A= inputs_dict['head_mask']
__A= 1
# first forward pass
__A= model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , head_mask=lowerCAmelCase_ , use_cache=lowerCAmelCase_ )
__A, __A= outputs.to_tuple()
__A= past_key_values[1]
def UpperCAmelCase__( _SCREAMING_SNAKE_CASE : List[Any],_SCREAMING_SNAKE_CASE : Tuple,_SCREAMING_SNAKE_CASE : Optional[Any],_SCREAMING_SNAKE_CASE : Optional[int]=None,_SCREAMING_SNAKE_CASE : Tuple=None,_SCREAMING_SNAKE_CASE : Optional[int]=None,_SCREAMING_SNAKE_CASE : List[str]=None,_SCREAMING_SNAKE_CASE : Union[str, Any]=None,):
"""simple docstring"""
if attention_mask is None:
__A= tf.cast(tf.math.not_equal(_SCREAMING_SNAKE_CASE,config.pad_token_id ),tf.inta )
if decoder_attention_mask is None:
__A= tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape,dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:],config.pad_token_id ),tf.inta ),
],axis=-1,)
if head_mask is None:
__A= tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
__A= tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
__A= tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class a__ ( a_ , a_ , unittest.TestCase ):
'''simple docstring'''
A : str = (TFMBartForConditionalGeneration, TFMBartModel) if is_tf_available() else ()
A : str = (TFMBartForConditionalGeneration,) if is_tf_available() else ()
A : Dict = (
{
'''conversational''': TFMBartForConditionalGeneration,
'''feature-extraction''': TFMBartModel,
'''summarization''': TFMBartForConditionalGeneration,
'''text2text-generation''': TFMBartForConditionalGeneration,
'''translation''': TFMBartForConditionalGeneration,
}
if is_tf_available()
else {}
)
A : List[Any] = True
A : str = False
A : Tuple = False
def lowerCAmelCase ( self : Optional[Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : int , lowerCAmelCase_ : List[Any] ) -> Union[str, Any]:
if pipeline_test_casse_name != "FeatureExtractionPipelineTests":
# Exception encountered when calling layer '...'
return True
return False
def lowerCAmelCase ( self : Union[str, Any] ) -> Any:
__A= TFMBartModelTester(self )
__A= ConfigTester(self , config_class=lowerCAmelCase_ )
def lowerCAmelCase ( self : Dict ) -> List[Any]:
self.config_tester.run_common_tests()
def lowerCAmelCase ( self : List[Any] ) -> Dict:
__A= self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*lowerCAmelCase_ )
@require_sentencepiece
@require_tokenizers
@require_tf
class a__ ( unittest.TestCase ):
'''simple docstring'''
A : Dict = [
''' UN Chief Says There Is No Military Solution in Syria''',
]
A : Optional[int] = [
'''Şeful ONU declară că nu există o soluţie militară în Siria''',
]
A : Any = '''facebook/mbart-large-en-ro'''
@cached_property
def lowerCAmelCase ( self : List[str] ) -> Tuple:
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def lowerCAmelCase ( self : Optional[int] ) -> Any:
__A= TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def lowerCAmelCase ( self : Optional[Any] , **lowerCAmelCase_ : Union[str, Any] ) -> Union[str, Any]:
__A= self.translate_src_text(**lowerCAmelCase_ )
self.assertListEqual(self.expected_text , lowerCAmelCase_ )
def lowerCAmelCase ( self : Tuple , **lowerCAmelCase_ : Dict ) -> List[str]:
__A= self.tokenizer(self.src_text , **lowerCAmelCase_ , return_tensors='tf' )
__A= self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 )
__A= self.tokenizer.batch_decode(lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ )
return generated_words
@slow
def lowerCAmelCase ( self : Tuple ) -> Union[str, Any]:
self._assert_generated_batch_equal_expected()
| 186 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import DistilBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.distilbert.modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertModel,
)
class a__ :
'''simple docstring'''
def __init__( self : str , lowerCAmelCase_ : str , ) -> Optional[int]:
__A= parent
__A= 13
__A= 7
__A= True
__A= True
__A= False
__A= True
__A= 99
__A= 32
__A= 2
__A= 4
__A= 37
__A= 'gelu'
__A= 0.1
__A= 0.1
__A= 512
__A= 16
__A= 2
__A= 0.02
__A= 3
__A= 4
__A= None
def lowerCAmelCase ( self : Optional[Any] ) -> str:
__A= ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__A= None
if self.use_input_mask:
__A= random_attention_mask([self.batch_size, self.seq_length] )
__A= None
__A= None
__A= None
if self.use_labels:
__A= ids_tensor([self.batch_size] , self.type_sequence_label_size )
__A= ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__A= ids_tensor([self.batch_size] , self.num_choices )
__A= DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase ( self : str , lowerCAmelCase_ : Any , lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Any , lowerCAmelCase_ : Optional[Any] ) -> Any:
__A= TFDistilBertModel(config=lowerCAmelCase_ )
__A= {'input_ids': input_ids, 'attention_mask': input_mask}
__A= model(lowerCAmelCase_ )
__A= [input_ids, input_mask]
__A= model(lowerCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase ( self : Optional[int] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Tuple ) -> Optional[int]:
__A= TFDistilBertForMaskedLM(config=lowerCAmelCase_ )
__A= {'input_ids': input_ids, 'attention_mask': input_mask}
__A= model(lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase ( self : str , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : str , lowerCAmelCase_ : Dict , lowerCAmelCase_ : List[str] ) -> int:
__A= TFDistilBertForQuestionAnswering(config=lowerCAmelCase_ )
__A= {
'input_ids': input_ids,
'attention_mask': input_mask,
}
__A= model(lowerCAmelCase_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCAmelCase ( self : List[str] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : int , lowerCAmelCase_ : Dict , lowerCAmelCase_ : List[str] ) -> Union[str, Any]:
__A= self.num_labels
__A= TFDistilBertForSequenceClassification(lowerCAmelCase_ )
__A= {'input_ids': input_ids, 'attention_mask': input_mask}
__A= model(lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase ( self : str , lowerCAmelCase_ : Any , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : int ) -> Optional[Any]:
__A= self.num_choices
__A= TFDistilBertForMultipleChoice(lowerCAmelCase_ )
__A= tf.tile(tf.expand_dims(lowerCAmelCase_ , 1 ) , (1, self.num_choices, 1) )
__A= tf.tile(tf.expand_dims(lowerCAmelCase_ , 1 ) , (1, self.num_choices, 1) )
__A= {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
}
__A= model(lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCAmelCase ( self : List[Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Optional[Any] ) -> Dict:
__A= self.num_labels
__A= TFDistilBertForTokenClassification(lowerCAmelCase_ )
__A= {'input_ids': input_ids, 'attention_mask': input_mask}
__A= model(lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase ( self : Union[str, Any] ) -> int:
__A= self.prepare_config_and_inputs()
((__A), (__A), (__A), (__A), (__A), (__A))= config_and_inputs
__A= {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class a__ ( a_ , a_ , unittest.TestCase ):
'''simple docstring'''
A : Optional[Any] = (
(
TFDistilBertModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertForMultipleChoice,
)
if is_tf_available()
else None
)
A : Optional[int] = (
{
'''feature-extraction''': TFDistilBertModel,
'''fill-mask''': TFDistilBertForMaskedLM,
'''question-answering''': TFDistilBertForQuestionAnswering,
'''text-classification''': TFDistilBertForSequenceClassification,
'''token-classification''': TFDistilBertForTokenClassification,
'''zero-shot''': TFDistilBertForSequenceClassification,
}
if is_tf_available()
else {}
)
A : str = False
A : List[Any] = False
def lowerCAmelCase ( self : List[str] ) -> Optional[Any]:
__A= TFDistilBertModelTester(self )
__A= ConfigTester(self , config_class=lowerCAmelCase_ , dim=37 )
def lowerCAmelCase ( self : Dict ) -> Tuple:
self.config_tester.run_common_tests()
def lowerCAmelCase ( self : Union[str, Any] ) -> Optional[int]:
__A= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*lowerCAmelCase_ )
def lowerCAmelCase ( self : Optional[int] ) -> Optional[Any]:
__A= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*lowerCAmelCase_ )
def lowerCAmelCase ( self : str ) -> Any:
__A= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*lowerCAmelCase_ )
def lowerCAmelCase ( self : Tuple ) -> Optional[Any]:
__A= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*lowerCAmelCase_ )
def lowerCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
__A= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*lowerCAmelCase_ )
def lowerCAmelCase ( self : Any ) -> Optional[int]:
__A= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*lowerCAmelCase_ )
@slow
def lowerCAmelCase ( self : int ) -> Tuple:
for model_name in list(TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1] ):
__A= TFDistilBertModel.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
@require_tf
class a__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowerCAmelCase ( self : Any ) -> List[Any]:
__A= TFDistilBertModel.from_pretrained('distilbert-base-uncased' )
__A= tf.constant([[0, 1, 2, 3, 4, 5]] )
__A= model(lowerCAmelCase_ )[0]
__A= [1, 6, 768]
self.assertEqual(output.shape , lowerCAmelCase_ )
__A= tf.constant(
[
[
[0.19_26_18_85, -0.13_73_29_55, 0.4_11_97_99],
[0.22_15_01_56, -0.07_42_26_61, 0.39_03_72_04],
[0.22_75_60_18, -0.0_89_64_14, 0.3_70_14_67],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , lowerCAmelCase_ , atol=1E-4 )
| 186 | 1 |
import argparse
import torch
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_from_original_stable_diffusion_ckpt
if __name__ == "__main__":
snake_case__ = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the checkpoint to convert.'''
)
# !wget https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml
parser.add_argument(
'''--original_config_file''',
default=None,
type=str,
help='''The YAML config file corresponding to the original architecture.''',
)
parser.add_argument(
'''--num_in_channels''',
default=None,
type=int,
help='''The number of input channels. If `None` number of input channels will be automatically inferred.''',
)
parser.add_argument(
'''--scheduler_type''',
default='''pndm''',
type=str,
help='''Type of scheduler to use. Should be one of [\'pndm\', \'lms\', \'ddim\', \'euler\', \'euler-ancestral\', \'dpm\']''',
)
parser.add_argument(
'''--pipeline_type''',
default=None,
type=str,
help=(
'''The pipeline type. One of \'FrozenOpenCLIPEmbedder\', \'FrozenCLIPEmbedder\', \'PaintByExample\''''
'''. If `None` pipeline will be automatically inferred.'''
),
)
parser.add_argument(
'''--image_size''',
default=None,
type=int,
help=(
'''The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2'''
''' Base. Use 768 for Stable Diffusion v2.'''
),
)
parser.add_argument(
'''--prediction_type''',
default=None,
type=str,
help=(
'''The prediction type that the model was trained on. Use \'epsilon\' for Stable Diffusion v1.X and Stable'''
''' Diffusion v2 Base. Use \'v_prediction\' for Stable Diffusion v2.'''
),
)
parser.add_argument(
'''--extract_ema''',
action='''store_true''',
help=(
'''Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights'''
''' or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield'''
''' higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.'''
),
)
parser.add_argument(
'''--upcast_attention''',
action='''store_true''',
help=(
'''Whether the attention computation should always be upcasted. This is necessary when running stable'''
''' diffusion 2.1.'''
),
)
parser.add_argument(
'''--from_safetensors''',
action='''store_true''',
help='''If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.''',
)
parser.add_argument(
'''--to_safetensors''',
action='''store_true''',
help='''Whether to store pipeline in safetensors format or not.''',
)
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
parser.add_argument('''--device''', type=str, help='''Device to use (e.g. cpu, cuda:0, cuda:1, etc.)''')
parser.add_argument(
'''--stable_unclip''',
type=str,
default=None,
required=False,
help='''Set if this is a stable unCLIP model. One of \'txt2img\' or \'img2img\'.''',
)
parser.add_argument(
'''--stable_unclip_prior''',
type=str,
default=None,
required=False,
help='''Set if this is a stable unCLIP txt2img model. Selects which prior to use. If `--stable_unclip` is set to `txt2img`, the karlo prior (https://huggingface.co/kakaobrain/karlo-v1-alpha/tree/main/prior) is selected by default.''',
)
parser.add_argument(
'''--clip_stats_path''',
type=str,
help='''Path to the clip stats file. Only required if the stable unclip model\'s config specifies `model.params.noise_aug_config.params.clip_stats_path`.''',
required=False,
)
parser.add_argument(
'''--controlnet''', action='''store_true''', default=None, help='''Set flag if this is a controlnet checkpoint.'''
)
parser.add_argument('''--half''', action='''store_true''', help='''Save weights in half precision.''')
parser.add_argument(
'''--vae_path''',
type=str,
default=None,
required=False,
help='''Set to a path, hub id to an already converted vae to not convert it again.''',
)
snake_case__ = parser.parse_args()
snake_case__ = download_from_original_stable_diffusion_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
prediction_type=args.prediction_type,
model_type=args.pipeline_type,
extract_ema=args.extract_ema,
scheduler_type=args.scheduler_type,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
stable_unclip=args.stable_unclip,
stable_unclip_prior=args.stable_unclip_prior,
clip_stats_path=args.clip_stats_path,
controlnet=args.controlnet,
vae_path=args.vae_path,
)
if args.half:
pipe.to(torch_dtype=torch.floataa)
if args.controlnet:
# only save the controlnet model
pipe.controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
else:
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 373 |
import datasets
from .evaluate import evaluate
snake_case__ = '''\
@inproceedings{Rajpurkar2016SQuAD10,
title={SQuAD: 100, 000+ Questions for Machine Comprehension of Text},
author={Pranav Rajpurkar and Jian Zhang and Konstantin Lopyrev and Percy Liang},
booktitle={EMNLP},
year={2016}
}
'''
snake_case__ = '''
This metric wrap the official scoring script for version 1 of the Stanford Question Answering Dataset (SQuAD).
Stanford Question Answering Dataset (SQuAD) is a reading comprehension dataset, consisting of questions posed by
crowdworkers on a set of Wikipedia articles, where the answer to every question is a segment of text, or span,
from the corresponding reading passage, or the question might be unanswerable.
'''
snake_case__ = '''
Computes SQuAD scores (F1 and EM).
Args:
predictions: List of question-answers dictionaries with the following key-values:
- \'id\': id of the question-answer pair as given in the references (see below)
- \'prediction_text\': the text of the answer
references: List of question-answers dictionaries with the following key-values:
- \'id\': id of the question-answer pair (see above),
- \'answers\': a Dict in the SQuAD dataset format
{
\'text\': list of possible texts for the answer, as a list of strings
\'answer_start\': list of start positions for the answer, as a list of ints
}
Note that answer_start values are not taken into account to compute the metric.
Returns:
\'exact_match\': Exact match (the normalized answer exactly match the gold answer)
\'f1\': The F-score of predicted tokens versus the gold answer
Examples:
>>> predictions = [{\'prediction_text\': \'1976\', \'id\': \'56e10a3be3433e1400422b22\'}]
>>> references = [{\'answers\': {\'answer_start\': [97], \'text\': [\'1976\']}, \'id\': \'56e10a3be3433e1400422b22\'}]
>>> squad_metric = datasets.load_metric("squad")
>>> results = squad_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'exact_match\': 100.0, \'f1\': 100.0}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION)
class lowerCAmelCase_ ( datasets.Metric):
def _snake_case ( self : List[str] ) ->Union[str, Any]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": {"id": datasets.Value("string" ), "prediction_text": datasets.Value("string" )},
"references": {
"id": datasets.Value("string" ),
"answers": datasets.features.Sequence(
{
"text": datasets.Value("string" ),
"answer_start": datasets.Value("int32" ),
} ),
},
} ) , codebase_urls=["https://rajpurkar.github.io/SQuAD-explorer/"] , reference_urls=["https://rajpurkar.github.io/SQuAD-explorer/"] , )
def _snake_case ( self : Dict , __A : List[Any] , __A : Optional[int] ) ->str:
"""simple docstring"""
a__ :Optional[int] = {prediction["id"]: prediction["prediction_text"] for prediction in predictions}
a__ :Optional[Any] = [
{
"paragraphs": [
{
"qas": [
{
"answers": [{"text": answer_text} for answer_text in ref["answers"]["text"]],
"id": ref["id"],
}
for ref in references
]
}
]
}
]
a__ :Union[str, Any] = evaluate(dataset=__A , predictions=__A )
return score
| 373 | 1 |
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class _SCREAMING_SNAKE_CASE ( snake_case ):
lowerCamelCase_ = ['image_processor', 'tokenizer']
lowerCamelCase_ = 'OwlViTImageProcessor'
lowerCamelCase_ = ('CLIPTokenizer', 'CLIPTokenizerFast')
def __init__( self : Dict , snake_case_ : List[Any]=None , snake_case_ : Any=None , **snake_case_ : Union[str, Any] ):
"""simple docstring"""
A : List[Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , snake_case_ , )
A : Any = kwargs.pop('''feature_extractor''' )
A : Optional[int] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(snake_case_ , snake_case_ )
def __call__( self : Tuple , snake_case_ : Dict=None , snake_case_ : List[str]=None , snake_case_ : List[Any]=None , snake_case_ : str="max_length" , snake_case_ : Optional[int]="np" , **snake_case_ : int ):
"""simple docstring"""
if text is None and query_images is None and images is None:
raise ValueError(
'''You have to specify at least one text or query image or image. All three cannot be none.''' )
if text is not None:
if isinstance(snake_case_ , snake_case_ ) or (isinstance(snake_case_ , snake_case_ ) and not isinstance(text[0] , snake_case_ )):
A : List[str] = [self.tokenizer(snake_case_ , padding=snake_case_ , return_tensors=snake_case_ , **snake_case_ )]
elif isinstance(snake_case_ , snake_case_ ) and isinstance(text[0] , snake_case_ ):
A : Optional[Any] = []
# Maximum number of queries across batch
A : Optional[Any] = max([len(snake_case_ ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(snake_case_ ) != max_num_queries:
A : Tuple = t + [''' '''] * (max_num_queries - len(snake_case_ ))
A : List[Any] = self.tokenizer(snake_case_ , padding=snake_case_ , return_tensors=snake_case_ , **snake_case_ )
encodings.append(snake_case_ )
else:
raise TypeError('''Input text should be a string, a list of strings or a nested list of strings''' )
if return_tensors == "np":
A : Tuple = np.concatenate([encoding['''input_ids'''] for encoding in encodings] , axis=0 )
A : List[str] = np.concatenate([encoding['''attention_mask'''] for encoding in encodings] , axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
A : Tuple = jnp.concatenate([encoding['''input_ids'''] for encoding in encodings] , axis=0 )
A : Dict = jnp.concatenate([encoding['''attention_mask'''] for encoding in encodings] , axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
A : Union[str, Any] = torch.cat([encoding['''input_ids'''] for encoding in encodings] , dim=0 )
A : Tuple = torch.cat([encoding['''attention_mask'''] for encoding in encodings] , dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
A : List[Any] = tf.stack([encoding['''input_ids'''] for encoding in encodings] , axis=0 )
A : str = tf.stack([encoding['''attention_mask'''] for encoding in encodings] , axis=0 )
else:
raise ValueError('''Target return tensor type could not be returned''' )
A : Any = BatchEncoding()
A : Tuple = input_ids
A : Optional[Any] = attention_mask
if query_images is not None:
A : List[str] = BatchEncoding()
A : str = self.image_processor(
snake_case_ , return_tensors=snake_case_ , **snake_case_ ).pixel_values
A : Optional[int] = query_pixel_values
if images is not None:
A : Union[str, Any] = self.image_processor(snake_case_ , return_tensors=snake_case_ , **snake_case_ )
if text is not None and images is not None:
A : Tuple = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
A : Union[str, Any] = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**snake_case_ ) , tensor_type=snake_case_ )
def _UpperCAmelCase ( self : int , *snake_case_ : List[str] , **snake_case_ : str ):
"""simple docstring"""
return self.image_processor.post_process(*snake_case_ , **snake_case_ )
def _UpperCAmelCase ( self : Optional[int] , *snake_case_ : Any , **snake_case_ : Dict ):
"""simple docstring"""
return self.image_processor.post_process_object_detection(*snake_case_ , **snake_case_ )
def _UpperCAmelCase ( self : Dict , *snake_case_ : Union[str, Any] , **snake_case_ : int ):
"""simple docstring"""
return self.image_processor.post_process_image_guided_detection(*snake_case_ , **snake_case_ )
def _UpperCAmelCase ( self : Optional[int] , *snake_case_ : int , **snake_case_ : List[str] ):
"""simple docstring"""
return self.tokenizer.batch_decode(*snake_case_ , **snake_case_ )
def _UpperCAmelCase ( self : int , *snake_case_ : Dict , **snake_case_ : List[str] ):
"""simple docstring"""
return self.tokenizer.decode(*snake_case_ , **snake_case_ )
@property
def _UpperCAmelCase ( self : str ):
"""simple docstring"""
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , snake_case_ , )
return self.image_processor_class
@property
def _UpperCAmelCase ( self : int ):
"""simple docstring"""
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , snake_case_ , )
return self.image_processor | 256 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCamelCase_ = "▁"
UpperCamelCase_ = {"vocab_file": "spiece.model"}
UpperCamelCase_ = {
"vocab_file": {"google/pegasus-xsum": "https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model"}
}
UpperCamelCase_ = {
"google/pegasus-xsum": 5_12,
}
UpperCamelCase_ = logging.get_logger(__name__)
class _SCREAMING_SNAKE_CASE ( snake_case ):
lowerCamelCase_ = VOCAB_FILES_NAMES
lowerCamelCase_ = VOCAB_FILES_NAMES
lowerCamelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase_ = ['input_ids', 'attention_mask']
def __init__( self : Optional[Any] , snake_case_ : Union[str, Any] , snake_case_ : int="<pad>" , snake_case_ : Any="</s>" , snake_case_ : List[Any]="<unk>" , snake_case_ : Optional[Any]="<mask_2>" , snake_case_ : Union[str, Any]="<mask_1>" , snake_case_ : Any=None , snake_case_ : str=103 , snake_case_ : Optional[Dict[str, Any]] = None , **snake_case_ : Union[str, Any] , ):
"""simple docstring"""
A : str = offset
if additional_special_tokens is not None:
if not isinstance(snake_case_ , snake_case_ ):
raise TypeError(
f"""additional_special_tokens should be of type {type(snake_case_ )}, but is"""
f""" {type(snake_case_ )}""" )
A : int = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
f"""<unk_{i}>""" for i in range(len(snake_case_ ) , self.offset - 1 )
]
if len(set(snake_case_ ) ) != len(snake_case_ ):
raise ValueError(
'''Please make sure that the provided additional_special_tokens do not contain an incorrectly'''
f""" shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.""" )
A : Union[str, Any] = additional_special_tokens_extended
else:
A : Tuple = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [f"""<unk_{i}>""" for i in range(2 , self.offset )]
A : Optional[int] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=snake_case_ , unk_token=snake_case_ , mask_token=snake_case_ , pad_token=snake_case_ , mask_token_sent=snake_case_ , offset=snake_case_ , additional_special_tokens=snake_case_ , sp_model_kwargs=self.sp_model_kwargs , **snake_case_ , )
A : Dict = mask_token_sent
A : Optional[int] = vocab_file
A : str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(snake_case_ )
# add special tokens to encoder dict
A : Dict[int, str] = {
0: self.pad_token,
1: self.eos_token,
}
if self.mask_token_sent is not None:
self.encoder.update(
{
2: self.mask_token_sent,
3: self.mask_token,
} )
if self.offset > 0:
# entries 2-104 are only used for pretraining and called <mask_1>, <mask_2>, unk_2, ...unk_102
# mask_token_sent is already added to list -> so start at 1
self.encoder.update({i + 3: additional_special_tokens[i] for i in range(1 , self.offset - 1 )} )
A : Dict[str, int] = {v: k for k, v in self.encoder.items()}
@property
def _UpperCAmelCase ( self : str ):
"""simple docstring"""
return len(self.sp_model ) + self.offset
def _UpperCAmelCase ( self : str ):
"""simple docstring"""
A : Dict = {self.convert_ids_to_tokens(snake_case_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Tuple ):
"""simple docstring"""
A : Optional[Any] = self.__dict__.copy()
A : Union[str, Any] = None
return state
def __setstate__( self : str , snake_case_ : Tuple ):
"""simple docstring"""
A : str = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
A : Union[str, Any] = {}
A : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _UpperCAmelCase ( self : Tuple , snake_case_ : str ):
"""simple docstring"""
return self.sp_model.encode(snake_case_ , out_type=snake_case_ )
def _UpperCAmelCase ( self : Tuple , snake_case_ : str ):
"""simple docstring"""
if token in self.decoder:
return self.decoder[token]
elif token in self.added_tokens_decoder:
return self.added_tokens_decoder[token]
A : int = self.sp_model.piece_to_id(snake_case_ )
return sp_id + self.offset
def _UpperCAmelCase ( self : Optional[int] , snake_case_ : int ):
"""simple docstring"""
if index in self.encoder:
return self.encoder[index]
elif index in self.added_tokens_encoder:
return self.added_tokens_encoder[index]
else:
A : Any = self.sp_model.IdToPiece(index - self.offset )
return token
def _UpperCAmelCase ( self : Dict , snake_case_ : Dict ):
"""simple docstring"""
A : List[Any] = []
A : Union[str, Any] = ''''''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(snake_case_ ) + token
A : Any = []
else:
current_sub_tokens.append(snake_case_ )
out_string += self.sp_model.decode(snake_case_ )
return out_string.strip()
def _UpperCAmelCase ( self : str , snake_case_ : Any=False ):
"""simple docstring"""
return 1
def _UpperCAmelCase ( self : int , snake_case_ : Union[str, Any] ):
"""simple docstring"""
A : str = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
return [1 if x in all_special_ids else 0 for x in seq]
def _UpperCAmelCase ( self : int , snake_case_ : List , snake_case_ : Optional[List] = None , snake_case_ : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
return self._special_token_mask(snake_case_ )
elif token_ids_a is None:
return self._special_token_mask(snake_case_ ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def _UpperCAmelCase ( self : List[Any] , snake_case_ : Any , snake_case_ : Tuple=None ):
"""simple docstring"""
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def _UpperCAmelCase ( self : Tuple , snake_case_ : str , snake_case_ : Optional[str] = None ):
"""simple docstring"""
if not os.path.isdir(snake_case_ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
A : int = os.path.join(
snake_case_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , snake_case_ )
elif not os.path.isfile(self.vocab_file ):
with open(snake_case_ , '''wb''' ) as fi:
A : Any = self.sp_model.serialized_model_proto()
fi.write(snake_case_ )
return (out_vocab_file,) | 256 | 1 |
from dataclasses import dataclass, field
from typing import Optional
@dataclass
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
lowerCamelCase : Optional[str] =field(
default="codeparrot/codeparrot" , metadata={"help": "Model name or path of model to be trained."} )
lowerCamelCase : Optional[str] =field(
default="./" , metadata={"help": "Save dir where model repo is cloned and models updates are saved to."} )
lowerCamelCase : Optional[str] =field(
default="codeparrot/codeparrot-clean-train" , metadata={"help": "Name or path of training dataset."} )
lowerCamelCase : Optional[str] =field(
default="codeparrot/codeparrot-clean-valid" , metadata={"help": "Name or path of validation dataset."} )
lowerCamelCase : Optional[int] =field(default=2 , metadata={"help": "Batch size for training."} )
lowerCamelCase : Optional[int] =field(default=2 , metadata={"help": "Batch size for evaluation."} )
lowerCamelCase : Optional[float] =field(default=0.1 , metadata={"help": "Value of weight decay."} )
lowerCamelCase : Optional[int] =field(
default=1_0000 , metadata={"help": "Size of buffer used to shuffle streaming dataset."} )
lowerCamelCase : Optional[float] =field(default=2e-4 , metadata={"help": "Learning rate fo training."} )
lowerCamelCase : Optional[str] =field(default="cosine" , metadata={"help": "Learning rate."} )
lowerCamelCase : Optional[int] =field(
default=750 , metadata={"help": "Number of warmup steps in the learning rate schedule."} )
lowerCamelCase : Optional[int] =field(
default=16 , metadata={"help": "Number of gradient accumulation steps."} )
lowerCamelCase : Optional[bool] =field(
default=a_ , metadata={"help": "Use gradient checkpointing to reduce memory footprint."} )
lowerCamelCase : Optional[int] =field(default=5_0000 , metadata={"help": "Maximum number of training steps."} )
lowerCamelCase : Optional[int] =field(
default=-1 , metadata={"help": "Maximum number of evaluation steps. If -1 the full dataset is evaluated."} )
lowerCamelCase : Optional[int] =field(default=1024 , metadata={"help": "Sequence lengths used for training."} )
lowerCamelCase : Optional[int] =field(default=1 , metadata={"help": "Training seed."} )
lowerCamelCase : Optional[int] =field(
default=1024 , metadata={"help": "Interval to save checkpoints. Measured as number of forward passes not training steps."} , )
lowerCamelCase : Optional[str] =field(
default=a_ , metadata={"help": "States path if the training should continue from a checkpoint folder."} )
lowerCamelCase : Optional[bool] =field(default=a_ , metadata={"help": "If True the data is pretokenized."} )
@dataclass
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
lowerCamelCase : Optional[str] =field(
default="codeparrot/codeparrot" , metadata={"help": "Model name or path of model to be evaluated."} )
lowerCamelCase : Optional[str] =field(
default="codeparrot/codeparrot-clean-valid" , metadata={"help": "Name or path of validation dataset."} )
lowerCamelCase : Optional[int] =field(default=2 , metadata={"help": "Batch size used for evaluation."} )
lowerCamelCase : Optional[int] =field(
default=-1 , metadata={"help": "Maximum number of evaluation steps. If -1 the full dataset is evaluated."} )
lowerCamelCase : Optional[int] =field(default=1024 , metadata={"help": "Length of sequences to be evaluated."} )
lowerCamelCase : Optional[int] =field(default=1 , metadata={"help": "Random seed used for evaluation."} )
@dataclass
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
lowerCamelCase : Optional[str] =field(
default="codeparrot/codeparrot" , metadata={"help": "Model name or path of model to be evaluated."} )
lowerCamelCase : Optional[int] =field(default=a_ , metadata={"help": "Number of workers used for code evaluation."} )
lowerCamelCase : Optional[int] =field(
default=a_ , metadata={"help": "The number of human-eval tasks to run. If not included all tasks are evaluated."} , )
lowerCamelCase : Optional[bool] =field(
default=a_ , metadata={"help": "Sample from the language model's output distribution."} )
lowerCamelCase : Optional[float] =field(default=0.2 , metadata={"help": "Sampling temperature used for generation."} )
lowerCamelCase : Optional[int] =field(default=256 , metadata={"help": "Maximum number of newly generated tokens."} )
lowerCamelCase : Optional[int] =field(default=0 , metadata={"help": "Top-k parameter used for generation."} )
lowerCamelCase : Optional[float] =field(default=0.95 , metadata={"help": "Top-p parameter used for nucleus sampling."} )
lowerCamelCase : Optional[int] =field(default=10 , metadata={"help": "Number of generations to run in parallel."} )
lowerCamelCase : Optional[int] =field(
default=200 , metadata={"help": "Number of completions to generate for each sample."} )
lowerCamelCase : Optional[int] =field(default=1 , metadata={"help": "Random seed used for evaluation."} )
lowerCamelCase : Optional[str] =field(
default="eval_results.json" , metadata={"help": "Random seed used for evaluation."} )
lowerCamelCase : Optional[str] =field(
default="0" , metadata={"help": "Allow `code_eval` to execute Python code on machine"} )
lowerCamelCase : Optional[int] =field(
default=-1 , metadata={
"help": (
"Determine which device to run the `text-generation` Pipeline on. -1 is CPU and any zero or positive"
" number corresponds to which GPU device id to run on."
)
} , )
@dataclass
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
lowerCamelCase : Optional[int] =field(
default=a_ , metadata={
"help": "The number of CPU cores to use for parallel preprocessing. Default uses the maximum available."
} , )
lowerCamelCase : Optional[str] =field(
default="transformersbook/codeparrot" , metadata={"help": "Folder or name of dataset to process."} )
lowerCamelCase : Optional[str] =field(
default="codeparrot-clean" , metadata={"help": "Folder to save processed processed dataset."} )
lowerCamelCase : Optional[int] =field(
default=10_0000 , metadata={"help": "Number of files to save per JSON output file."} )
lowerCamelCase : Optional[str] =field(default="content" , metadata={"help": "Column containing text data to process."} )
lowerCamelCase : Optional[float] =field(
default=1000 , metadata={"help": "Maximum line length in file, otherwise file is filtered."} )
lowerCamelCase : Optional[float] =field(
default=100 , metadata={"help": "Maximum mean line length in file, otherwise file is filtered."} )
lowerCamelCase : Optional[float] =field(
default=0.25 , metadata={"help": "Maximum fraction of non-alphanumeric characters, otherwise file is filtered."} )
lowerCamelCase : Optional[float] =field(
default=1.5 , metadata={"help": "Minimum character token ratio for the file, otherwise file is filtered."} )
lowerCamelCase : Optional[float] =field(
default=0.7 , metadata={"help": "Probability for filtering config, test and uncommon files."} )
lowerCamelCase : Optional[str] =field(
default="codeparrot/codeparrot" , metadata={"help": "Name or path to the tokenizer."} , )
lowerCamelCase : Optional[bool] =field(
default=a_ , metadata={"help": "If True, near-duplicate samples are removed."} )
lowerCamelCase : Optional[float] =field(
default=0.85 , metadata={"help": "Jaccard threshold for near-duplicate samples."} )
@dataclass
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
lowerCamelCase : Optional[str] =field(
default="gpt2" , metadata={"help": "Base tokenizer to build new tokenizer from."} )
lowerCamelCase : Optional[str] =field(
default="transformersbook/codeparrot-train" , metadata={"help": "Dataset to train tokenizer on."} )
lowerCamelCase : Optional[str] =field(default="content" , metadata={"help": "Column containing text data to process."} )
lowerCamelCase : Optional[int] =field(default=20_0000 , metadata={"help": "Number of examples to train tokenizer on."} )
lowerCamelCase : Optional[int] =field(
default=3_2768 , metadata={"help": "Number of examples to train the tokenizer on."} )
lowerCamelCase : Optional[str] =field(default="codeparrot" , metadata={"help": "Name of new tokenizer."} )
lowerCamelCase : Optional[bool] =field(default=a_ , metadata={"help": "Push saved tokenizer to the hub."} )
@dataclass
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
lowerCamelCase : Optional[str] =field(
default="codeparrot/codeparrot" , metadata={"help": "Name or path to the tokenizer."} )
lowerCamelCase : Optional[str] =field(
default="codeparrot/codeparrot-clean-train" , metadata={"help": "Name or path to the dataset to pretokenize."} )
lowerCamelCase : Optional[str] =field(
default="tokenized-codeparrot-train" , metadata={"help": "Repo name of the pretokenized data."} )
lowerCamelCase : Optional[int] =field(default=a_ , metadata={"help": "Number of workers used for code evaluation."} )
@dataclass
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
lowerCamelCase : Optional[str] =field(
default="gpt2-large" , metadata={"help": "Configuration to use for model initialization."} )
lowerCamelCase : Optional[str] =field(
default="codeparrot/codeparrot" , metadata={"help": "Tokenizer attached to model."} )
lowerCamelCase : Optional[str] =field(default="codeparrot" , metadata={"help": "Name of the created model."} )
lowerCamelCase : Optional[bool] =field(default=a_ , metadata={"help": "Push saved tokenizer to the hub."} )
| 218 |
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotConfig, is_flax_available
from transformers.testing_utils import jax_device, require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
__UpperCAmelCase = """platform"""
import jax
import jax.numpy as jnp
from transformers import BlenderbotTokenizer
from transformers.models.blenderbot.modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
shift_tokens_right,
)
def snake_case_ (__A : Tuple , __A : List[str] , __A : str=None , __A : Any=None , __A : Union[str, Any]=None , __A : str=None , __A : str=None , __A : Tuple=None , ) -> Optional[int]:
if attention_mask is None:
__lowerCAmelCase : Optional[int] = np.where(input_ids != config.pad_token_id , 1 , 0 )
if decoder_attention_mask is None:
__lowerCAmelCase : Union[str, Any] = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 )
if head_mask is None:
__lowerCAmelCase : int = np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
__lowerCAmelCase : Optional[Any] = np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
__lowerCAmelCase : Any = np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self : Optional[Any] , lowerCAmelCase : str , lowerCAmelCase : str=13 , lowerCAmelCase : Union[str, Any]=7 , lowerCAmelCase : int=True , lowerCAmelCase : int=False , lowerCAmelCase : Any=99 , lowerCAmelCase : Dict=16 , lowerCAmelCase : int=2 , lowerCAmelCase : int=4 , lowerCAmelCase : Union[str, Any]=4 , lowerCAmelCase : Dict="gelu" , lowerCAmelCase : Optional[int]=0.1 , lowerCAmelCase : str=0.1 , lowerCAmelCase : List[Any]=32 , lowerCAmelCase : Any=2 , lowerCAmelCase : Dict=1 , lowerCAmelCase : Dict=0 , lowerCAmelCase : List[str]=0.02 , ) -> Tuple:
"""simple docstring"""
__lowerCAmelCase : List[Any] = parent
__lowerCAmelCase : str = batch_size
__lowerCAmelCase : Any = seq_length
__lowerCAmelCase : int = is_training
__lowerCAmelCase : Tuple = use_labels
__lowerCAmelCase : Union[str, Any] = vocab_size
__lowerCAmelCase : Optional[int] = hidden_size
__lowerCAmelCase : Dict = num_hidden_layers
__lowerCAmelCase : Optional[int] = num_attention_heads
__lowerCAmelCase : str = intermediate_size
__lowerCAmelCase : Union[str, Any] = hidden_act
__lowerCAmelCase : Tuple = hidden_dropout_prob
__lowerCAmelCase : str = attention_probs_dropout_prob
__lowerCAmelCase : List[Any] = max_position_embeddings
__lowerCAmelCase : Optional[Any] = eos_token_id
__lowerCAmelCase : List[Any] = pad_token_id
__lowerCAmelCase : Optional[Any] = bos_token_id
__lowerCAmelCase : Dict = initializer_range
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase : Union[str, Any] = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size )
__lowerCAmelCase : List[str] = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 )
__lowerCAmelCase : Optional[int] = shift_tokens_right(lowerCAmelCase , 1 , 2 )
__lowerCAmelCase : Union[str, Any] = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=lowerCAmelCase , )
__lowerCAmelCase : Dict = prepare_blenderbot_inputs_dict(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
return config, inputs_dict
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase ,__lowerCAmelCase : Tuple = self.prepare_config_and_inputs()
return config, inputs_dict
def SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase : List[str] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Union[str, Any] ) -> int:
"""simple docstring"""
__lowerCAmelCase : List[str] = 20
__lowerCAmelCase : Tuple = model_class_name(lowerCAmelCase )
__lowerCAmelCase : str = model.encode(inputs_dict["""input_ids"""] )
__lowerCAmelCase ,__lowerCAmelCase : Dict = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
__lowerCAmelCase : Optional[Any] = model.init_cache(decoder_input_ids.shape[0] , lowerCAmelCase , lowerCAmelCase )
__lowerCAmelCase : Any = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="""i4""" )
__lowerCAmelCase : Optional[int] = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
__lowerCAmelCase : Dict = model.decode(
decoder_input_ids[:, :-1] , lowerCAmelCase , decoder_attention_mask=lowerCAmelCase , past_key_values=lowerCAmelCase , decoder_position_ids=lowerCAmelCase , )
__lowerCAmelCase : Optional[Any] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" )
__lowerCAmelCase : Any = model.decode(
decoder_input_ids[:, -1:] , lowerCAmelCase , decoder_attention_mask=lowerCAmelCase , past_key_values=outputs_cache.past_key_values , decoder_position_ids=lowerCAmelCase , )
__lowerCAmelCase : List[str] = model.decode(lowerCAmelCase , lowerCAmelCase )
__lowerCAmelCase : Dict = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f'''Max diff is {diff}''' )
def SCREAMING_SNAKE_CASE ( self : int , lowerCAmelCase : Dict , lowerCAmelCase : Any , lowerCAmelCase : List[str] ) -> Dict:
"""simple docstring"""
__lowerCAmelCase : List[Any] = 20
__lowerCAmelCase : Tuple = model_class_name(lowerCAmelCase )
__lowerCAmelCase : Tuple = model.encode(inputs_dict["""input_ids"""] )
__lowerCAmelCase ,__lowerCAmelCase : str = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
__lowerCAmelCase : Tuple = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
__lowerCAmelCase : Optional[int] = model.init_cache(decoder_input_ids.shape[0] , lowerCAmelCase , lowerCAmelCase )
__lowerCAmelCase : List[Any] = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
__lowerCAmelCase : List[str] = model.decode(
decoder_input_ids[:, :-1] , lowerCAmelCase , decoder_attention_mask=lowerCAmelCase , past_key_values=lowerCAmelCase , decoder_position_ids=lowerCAmelCase , )
__lowerCAmelCase : str = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" )
__lowerCAmelCase : Any = model.decode(
decoder_input_ids[:, -1:] , lowerCAmelCase , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=lowerCAmelCase , decoder_position_ids=lowerCAmelCase , )
__lowerCAmelCase : Any = model.decode(lowerCAmelCase , lowerCAmelCase , decoder_attention_mask=lowerCAmelCase )
__lowerCAmelCase : Any = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f'''Max diff is {diff}''' )
@require_flax
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
lowerCamelCase : Optional[Any] =99
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Tuple:
"""simple docstring"""
__lowerCAmelCase : Dict = np.array(
[
[71, 82, 18, 33, 46, 91, 2],
[68, 34, 26, 58, 30, 82, 2],
[5, 97, 17, 39, 94, 40, 2],
[76, 83, 94, 25, 70, 78, 2],
[87, 59, 41, 35, 48, 66, 2],
[55, 13, 16, 58, 5, 2, 1], # note padding
[64, 27, 31, 51, 12, 75, 2],
[52, 64, 86, 17, 83, 39, 2],
[48, 61, 9, 24, 71, 82, 2],
[26, 1, 60, 48, 22, 13, 2],
[21, 5, 62, 28, 14, 76, 2],
[45, 98, 37, 86, 59, 48, 2],
[70, 70, 50, 9, 28, 0, 2],
] , dtype=np.intaa , )
__lowerCAmelCase : Dict = input_ids.shape[0]
__lowerCAmelCase : Union[str, Any] = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=24 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=32 , decoder_ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def SCREAMING_SNAKE_CASE ( self : str ) -> Optional[int]:
"""simple docstring"""
__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase : Tuple = self._get_config_and_data()
__lowerCAmelCase : Union[str, Any] = FlaxBlenderbotForConditionalGeneration(lowerCAmelCase )
__lowerCAmelCase : Any = lm_model(input_ids=lowerCAmelCase )
__lowerCAmelCase : List[str] = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs["""logits"""].shape , lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : str ) -> str:
"""simple docstring"""
__lowerCAmelCase : Union[str, Any] = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=14 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=48 , )
__lowerCAmelCase : List[str] = FlaxBlenderbotForConditionalGeneration(lowerCAmelCase )
__lowerCAmelCase : Dict = np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]] , dtype=np.intaa )
__lowerCAmelCase : str = np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]] , dtype=np.intaa )
__lowerCAmelCase : List[str] = lm_model(input_ids=lowerCAmelCase , decoder_input_ids=lowerCAmelCase )
__lowerCAmelCase : Tuple = (*summary.shape, config.vocab_size)
self.assertEqual(outputs["""logits"""].shape , lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : Optional[int] = np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]] , dtype=np.intaa )
__lowerCAmelCase : Tuple = shift_tokens_right(lowerCAmelCase , 1 , 2 )
__lowerCAmelCase : int = np.equal(lowerCAmelCase , 1 ).astype(np.floataa ).sum()
__lowerCAmelCase : List[Any] = np.equal(lowerCAmelCase , 1 ).astype(np.floataa ).sum()
self.assertEqual(shifted.shape , input_ids.shape )
self.assertEqual(lowerCAmelCase , n_pad_before - 1 )
self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() )
@require_flax
class SCREAMING_SNAKE_CASE ( a_ , unittest.TestCase , a_ ):
"""simple docstring"""
lowerCamelCase : Dict =True
lowerCamelCase : List[Any] =(
(
FlaxBlenderbotModel,
FlaxBlenderbotForConditionalGeneration,
)
if is_flax_available()
else ()
)
lowerCamelCase : Tuple =(FlaxBlenderbotForConditionalGeneration,) if is_flax_available() else ()
def SCREAMING_SNAKE_CASE ( self : int ) -> Any:
"""simple docstring"""
__lowerCAmelCase : Union[str, Any] = FlaxBlenderbotModelTester(self )
def SCREAMING_SNAKE_CASE ( self : str ) -> Optional[int]:
"""simple docstring"""
__lowerCAmelCase ,__lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Any ) -> int:
"""simple docstring"""
__lowerCAmelCase ,__lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : int ) -> Dict:
"""simple docstring"""
__lowerCAmelCase ,__lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__lowerCAmelCase : Tuple = self._prepare_for_class(lowerCAmelCase , lowerCAmelCase )
__lowerCAmelCase : str = model_class(lowerCAmelCase )
@jax.jit
def encode_jitted(lowerCAmelCase : Optional[int] , lowerCAmelCase : Any=None , **lowerCAmelCase : Optional[Any] ):
return model.encode(input_ids=lowerCAmelCase , attention_mask=lowerCAmelCase )
with self.subTest("""JIT Enabled""" ):
__lowerCAmelCase : Optional[int] = encode_jitted(**lowerCAmelCase ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
__lowerCAmelCase : Tuple = encode_jitted(**lowerCAmelCase ).to_tuple()
self.assertEqual(len(lowerCAmelCase ) , len(lowerCAmelCase ) )
for jitted_output, output in zip(lowerCAmelCase , lowerCAmelCase ):
self.assertEqual(jitted_output.shape , output.shape )
def SCREAMING_SNAKE_CASE ( self : int ) -> Optional[int]:
"""simple docstring"""
__lowerCAmelCase ,__lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__lowerCAmelCase : List[Any] = model_class(lowerCAmelCase )
__lowerCAmelCase : Any = model.encode(inputs_dict["""input_ids"""] , inputs_dict["""attention_mask"""] )
__lowerCAmelCase : Union[str, Any] = {
"""decoder_input_ids""": inputs_dict["""decoder_input_ids"""],
"""decoder_attention_mask""": inputs_dict["""decoder_attention_mask"""],
"""encoder_outputs""": encoder_outputs,
}
@jax.jit
def decode_jitted(lowerCAmelCase : int , lowerCAmelCase : Any , lowerCAmelCase : Optional[int] ):
return model.decode(
decoder_input_ids=lowerCAmelCase , decoder_attention_mask=lowerCAmelCase , encoder_outputs=lowerCAmelCase , )
with self.subTest("""JIT Enabled""" ):
__lowerCAmelCase : Union[str, Any] = decode_jitted(**lowerCAmelCase ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
__lowerCAmelCase : Optional[Any] = decode_jitted(**lowerCAmelCase ).to_tuple()
self.assertEqual(len(lowerCAmelCase ) , len(lowerCAmelCase ) )
for jitted_output, output in zip(lowerCAmelCase , lowerCAmelCase ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def SCREAMING_SNAKE_CASE ( self : str ) -> int:
"""simple docstring"""
for model_class_name in self.all_model_classes:
__lowerCAmelCase : Optional[int] = model_class_name.from_pretrained("""facebook/blenderbot-400M-distill""" )
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
__lowerCAmelCase : Optional[int] = np.ones((1, 1) ) * model.config.eos_token_id
__lowerCAmelCase : Any = model(lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
@unittest.skipUnless(jax_device != """cpu""" , """3B test too slow on CPU.""" )
@slow
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase : List[Any] = {"""num_beams""": 1, """early_stopping""": True, """min_length""": 15, """max_length""": 25}
__lowerCAmelCase : str = {"""skip_special_tokens""": True, """clean_up_tokenization_spaces""": True}
__lowerCAmelCase : Optional[int] = FlaxBlenderbotForConditionalGeneration.from_pretrained("""facebook/blenderbot-3B""" , from_pt=lowerCAmelCase )
__lowerCAmelCase : str = BlenderbotTokenizer.from_pretrained("""facebook/blenderbot-3B""" )
__lowerCAmelCase : List[str] = ["""Sam"""]
__lowerCAmelCase : List[str] = tokenizer(lowerCAmelCase , return_tensors="""jax""" )
__lowerCAmelCase : Union[str, Any] = model.generate(**lowerCAmelCase , **lowerCAmelCase )
__lowerCAmelCase : Union[str, Any] = """Sam is a great name. It means \"sun\" in Gaelic."""
__lowerCAmelCase : List[Any] = tokenizer.batch_decode(lowerCAmelCase , **lowerCAmelCase )
assert generated_txt[0].strip() == tgt_text
| 218 | 1 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_pegasus import PegasusTokenizer
else:
_snake_case = None
_snake_case = logging.get_logger(__name__)
_snake_case = '''▁'''
_snake_case = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
_snake_case = {
'''vocab_file''': {'''google/pegasus-xsum''': '''https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model'''},
'''tokenizer_file''': {
'''google/pegasus-xsum''': '''https://huggingface.co/google/pegasus-xsum/resolve/main/tokenizer.json'''
},
}
_snake_case = {
'''google/pegasus-xsum''': 5_12,
}
class UpperCAmelCase_ ( UpperCamelCase ):
'''simple docstring'''
__A : int = VOCAB_FILES_NAMES
__A : Any = PRETRAINED_VOCAB_FILES_MAP
__A : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__A : int = PegasusTokenizer
__A : int = ["input_ids", "attention_mask"]
def __init__( self , __A=None , __A=None , __A="<pad>" , __A="</s>" , __A="<unk>" , __A="<mask_2>" , __A="<mask_1>" , __A=None , __A=103 , **__A , ):
"""simple docstring"""
lowerCamelCase : Union[str, Any] = offset
if additional_special_tokens is not None:
if not isinstance(__A , __A ):
raise TypeError(
F"""additional_special_tokens should be of type {type(__A )}, but is"""
F""" {type(__A )}""" )
lowerCamelCase : Optional[int] = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
F"""<unk_{i}>""" for i in range(len(__A ) , self.offset - 1 )
]
if len(set(__A ) ) != len(__A ):
raise ValueError(
"Please make sure that the provided additional_special_tokens do not contain an incorrectly"
F""" shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.""" )
lowerCamelCase : List[Any] = additional_special_tokens_extended
else:
lowerCamelCase : int = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [F"""<unk_{i}>""" for i in range(2 , self.offset )]
super().__init__(
__A , tokenizer_file=__A , pad_token=__A , eos_token=__A , unk_token=__A , mask_token=__A , mask_token_sent=__A , offset=__A , additional_special_tokens=__A , **__A , )
lowerCamelCase : Optional[Any] = vocab_file
lowerCamelCase : str = False if not self.vocab_file else True
def _snake_case ( self , __A ):
"""simple docstring"""
lowerCamelCase : Any = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
if all_special_ids != set(range(len(self.additional_special_tokens ) + 3 ) ):
raise ValueError(
"There should be 3 special tokens: mask_token, pad_token, and eos_token +"
F""" {len(self.additional_special_tokens )} additional_special_tokens, but got {all_special_ids}""" )
return [1 if x in all_special_ids else 0 for x in seq]
def _snake_case ( self , __A , __A = None , __A = False ):
"""simple docstring"""
if already_has_special_tokens:
return self._special_token_mask(__A )
elif token_ids_a is None:
return self._special_token_mask(__A ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def _snake_case ( self , __A , __A=None ):
"""simple docstring"""
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def _snake_case ( self , __A , __A = None ):
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(__A ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowerCamelCase : Dict = os.path.join(
__A , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__A ):
copyfile(self.vocab_file , __A )
return (out_vocab_file,)
| 340 |
import copy
import random
from transformers import CLIPTokenizer
class UpperCAmelCase_ ( UpperCamelCase ):
'''simple docstring'''
def __init__( self , *__A , **__A ):
"""simple docstring"""
super().__init__(*__A , **__A )
lowerCamelCase : Dict = {}
def _snake_case ( self , __A , *__A , **__A ):
"""simple docstring"""
lowerCamelCase : int = super().add_tokens(__A , *__A , **__A )
if num_added_tokens == 0:
raise ValueError(
F"""The tokenizer already contains the token {placeholder_token}. Please pass a different"""
" `placeholder_token` that is not already in the tokenizer." )
def _snake_case ( self , __A , *__A , __A=1 , **__A ):
"""simple docstring"""
lowerCamelCase : Optional[Any] = []
if num_vec_per_token == 1:
self.try_adding_tokens(__A , *__A , **__A )
output.append(__A )
else:
lowerCamelCase : Any = []
for i in range(__A ):
lowerCamelCase : List[str] = placeholder_token + F"""_{i}"""
self.try_adding_tokens(__A , *__A , **__A )
output.append(__A )
# handle cases where there is a new placeholder token that contains the current placeholder token but is larger
for token in self.token_map:
if token in placeholder_token:
raise ValueError(
F"""The tokenizer already has placeholder token {token} that can get confused with"""
F""" {placeholder_token}keep placeholder tokens independent""" )
lowerCamelCase : Tuple = output
def _snake_case ( self , __A , __A=False , __A=1.0 ):
"""simple docstring"""
if isinstance(__A , __A ):
lowerCamelCase : Optional[Any] = []
for i in range(len(__A ) ):
output.append(self.replace_placeholder_tokens_in_text(text[i] , vector_shuffle=__A ) )
return output
for placeholder_token in self.token_map:
if placeholder_token in text:
lowerCamelCase : Optional[int] = self.token_map[placeholder_token]
lowerCamelCase : str = tokens[: 1 + int(len(__A ) * prop_tokens_to_load )]
if vector_shuffle:
lowerCamelCase : List[str] = copy.copy(__A )
random.shuffle(__A )
lowerCamelCase : Any = text.replace(__A , " ".join(__A ) )
return text
def __call__( self , __A , *__A , __A=False , __A=1.0 , **__A ):
"""simple docstring"""
return super().__call__(
self.replace_placeholder_tokens_in_text(
__A , vector_shuffle=__A , prop_tokens_to_load=__A ) , *__A , **__A , )
def _snake_case ( self , __A , *__A , __A=False , __A=1.0 , **__A ):
"""simple docstring"""
return super().encode(
self.replace_placeholder_tokens_in_text(
__A , vector_shuffle=__A , prop_tokens_to_load=__A ) , *__A , **__A , )
| 340 | 1 |
"""simple docstring"""
def __lowerCAmelCase ( ) -> Union[str, Any]:
_UpperCamelCase : Optional[int] = []
_UpperCamelCase : str = 1
while len(__lowerCAmelCase ) < 1e6:
constant.append(str(__lowerCAmelCase ) )
i += 1
_UpperCamelCase : Tuple = "".join(__lowerCAmelCase )
return (
int(constant[0] )
* int(constant[9] )
* int(constant[99] )
* int(constant[999] )
* int(constant[9999] )
* int(constant[99999] )
* int(constant[999999] )
)
if __name__ == "__main__":
print(solution())
| 709 |
"""simple docstring"""
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__UpperCAmelCase = ["""image_processor""", """tokenizer"""]
__UpperCAmelCase = """BridgeTowerImageProcessor"""
__UpperCAmelCase = ("""RobertaTokenizer""", """RobertaTokenizerFast""")
def __init__(self , lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
super().__init__(lowerCAmelCase__ , lowerCAmelCase__ )
def __call__(self , lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = True , lowerCAmelCase__ = False , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = 0 , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = False , lowerCAmelCase__ = False , lowerCAmelCase__ = False , lowerCAmelCase__ = False , lowerCAmelCase__ = True , lowerCAmelCase__ = None , **lowerCAmelCase__ , ):
'''simple docstring'''
_UpperCamelCase : Any = self.tokenizer(
text=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=lowerCAmelCase__ , stride=lowerCAmelCase__ , pad_to_multiple_of=lowerCAmelCase__ , return_token_type_ids=lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ , return_overflowing_tokens=lowerCAmelCase__ , return_special_tokens_mask=lowerCAmelCase__ , return_offsets_mapping=lowerCAmelCase__ , return_length=lowerCAmelCase__ , verbose=lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , **lowerCAmelCase__ , )
# add pixel_values + pixel_mask
_UpperCamelCase : Union[str, Any] = self.image_processor(
lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , do_normalize=lowerCAmelCase__ , do_center_crop=lowerCAmelCase__ , **lowerCAmelCase__ )
encoding.update(lowerCAmelCase__ )
return encoding
def lowercase_ (self , *lowerCAmelCase__ , **lowerCAmelCase__ ):
'''simple docstring'''
return self.tokenizer.batch_decode(*lowerCAmelCase__ , **lowerCAmelCase__ )
def lowercase_ (self , *lowerCAmelCase__ , **lowerCAmelCase__ ):
'''simple docstring'''
return self.tokenizer.decode(*lowerCAmelCase__ , **lowerCAmelCase__ )
@property
def lowercase_ (self ):
'''simple docstring'''
_UpperCamelCase : List[str] = self.tokenizer.model_input_names
_UpperCamelCase : List[str] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 239 | 0 |
import json
import os
from typing import Dict, List, Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCamelCase__ : Tuple = logging.get_logger(__name__)
lowerCamelCase__ : str = {
"""vocab_file""": """vocab.json""",
"""merges_file""": """merges.txt""",
"""tokenizer_config_file""": """tokenizer_config.json""",
}
lowerCamelCase__ : List[str] = {
"""vocab_file""": {
"""facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json"""
},
"""merges_file""": {
"""facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt"""
},
"""tokenizer_config_file""": {
"""facebook/blenderbot_small-90M""": (
"""https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json"""
)
},
}
lowerCamelCase__ : str = {"""facebook/blenderbot_small-90M""": 5_1_2}
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> Optional[int]:
snake_case__ = set()
snake_case__ = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
snake_case__ = char
snake_case__ = set(__lowerCAmelCase )
return pairs
class __magic_name__ (snake_case_ ):
'''simple docstring'''
__lowercase : Tuple = VOCAB_FILES_NAMES
__lowercase : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
__lowercase : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowercase : Union[str, Any] = ['input_ids', 'attention_mask']
def __init__( self:Dict , _a:List[Any] , _a:List[Any] , _a:Dict="__start__" , _a:Optional[Any]="__end__" , _a:Tuple="__unk__" , _a:int="__null__" , **_a:Optional[Any] , ):
super().__init__(unk_token=_a , bos_token=_a , eos_token=_a , pad_token=_a , **_a )
with open(_a , encoding='''utf-8''' ) as vocab_handle:
snake_case__ = json.load(_a )
snake_case__ = {v: k for k, v in self.encoder.items()}
with open(_a , encoding='''utf-8''' ) as merges_handle:
snake_case__ = merges_handle.read().split('''\n''' )[1:-1]
snake_case__ = [tuple(merge.split() ) for merge in merges]
snake_case__ = dict(zip(_a , range(len(_a ) ) ) )
snake_case__ = {}
@property
def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ):
return len(self.encoder )
def SCREAMING_SNAKE_CASE__ ( self:List[Any] ):
return dict(self.encoder , **self.added_tokens_encoder )
def SCREAMING_SNAKE_CASE__ ( self:Any , _a:str ):
if token in self.cache:
return self.cache[token]
snake_case__ = re.sub('''([.,!?()])''' , r''' \1''' , _a )
snake_case__ = re.sub('''(\')''' , r''' \1 ''' , _a )
snake_case__ = re.sub(r'''\s{2,}''' , ''' ''' , _a )
if "\n" in token:
snake_case__ = token.replace('''\n''' , ''' __newln__''' )
snake_case__ = token.split(''' ''' )
snake_case__ = []
for token in tokens:
if not len(_a ):
continue
snake_case__ = token.lower()
snake_case__ = tuple(_a )
snake_case__ = tuple(list(word[:-1] ) + [word[-1] + '''</w>'''] )
snake_case__ = get_pairs(_a )
if not pairs:
words.append(_a )
continue
while True:
snake_case__ = min(_a , key=lambda _a : self.bpe_ranks.get(_a , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
snake_case__ , snake_case__ = bigram
snake_case__ = []
snake_case__ = 0
while i < len(_a ):
try:
snake_case__ = word.index(_a , _a )
new_word.extend(word[i:j] )
snake_case__ = j
except ValueError:
new_word.extend(word[i:] )
break
if word[i] == first and i < len(_a ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
snake_case__ = tuple(_a )
snake_case__ = new_word
if len(_a ) == 1:
break
else:
snake_case__ = get_pairs(_a )
snake_case__ = '''@@ '''.join(_a )
snake_case__ = word[:-4]
snake_case__ = word
words.append(_a )
return " ".join(_a )
def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] , _a:str ):
snake_case__ = []
snake_case__ = re.findall(r'''\S+\n?''' , _a )
for token in words:
split_tokens.extend(list(self.bpe(_a ).split(''' ''' ) ) )
return split_tokens
def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] , _a:str ):
snake_case__ = token.lower()
return self.encoder.get(_a , self.encoder.get(self.unk_token ) )
def SCREAMING_SNAKE_CASE__ ( self:List[Any] , _a:int ):
return self.decoder.get(_a , self.unk_token )
def SCREAMING_SNAKE_CASE__ ( self:str , _a:List[str] ):
snake_case__ = ''' '''.join(_a ).replace('''@@ ''' , '''''' ).strip()
return out_string
def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] , _a:str , _a:Optional[str] = None ):
if not os.path.isdir(_a ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
snake_case__ = os.path.join(
_a , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
snake_case__ = os.path.join(
_a , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(_a , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=_a , ensure_ascii=_a ) + '''\n''' )
snake_case__ = 0
with open(_a , '''w''' , encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda _a : kv[1] ):
if index != token_index:
logger.warning(
F"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
''' Please check that the tokenizer is not corrupted!''' )
snake_case__ = token_index
writer.write(''' '''.join(_a ) + '''\n''' )
index += 1
return vocab_file, merge_file
| 33 | """simple docstring"""
import warnings
from ...utils import logging
from .image_processing_donut import DonutImageProcessor
SCREAMING_SNAKE_CASE__:Dict = logging.get_logger(__name__)
class snake_case__ ( snake_case_ ):
def __init__( self , *lowerCamelCase , **lowerCamelCase ):
warnings.warn(
"The class DonutFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use DonutImageProcessor instead." , lowerCamelCase , )
super().__init__(*lowerCamelCase , **lowerCamelCase )
| 528 | 0 |
'''simple docstring'''
def _lowerCamelCase ( lowerCamelCase_ : int , lowerCamelCase_ : int ):
"""simple docstring"""
if a < 0 or b < 0:
raise ValueError('the value of both inputs must be positive' )
UpperCAmelCase_ : int = str(bin(lowerCamelCase_ ) )[2:] # remove the leading "0b"
UpperCAmelCase_ : Optional[Any] = str(bin(lowerCamelCase_ ) )[2:] # remove the leading "0b"
UpperCAmelCase_ : Union[str, Any] = max(len(lowerCamelCase_ ) , len(lowerCamelCase_ ) )
return "0b" + "".join(
str(int(char_a == '1' and char_b == '1' ) )
for char_a, char_b in zip(a_binary.zfill(lowerCamelCase_ ) , b_binary.zfill(lowerCamelCase_ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 389 | '''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
snake_case__ : Dict = {
'''configuration_git''': ['''GIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GitConfig''', '''GitVisionConfig'''],
'''processing_git''': ['''GitProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : Any = [
'''GIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GitForCausalLM''',
'''GitModel''',
'''GitPreTrainedModel''',
'''GitVisionModel''',
]
if TYPE_CHECKING:
from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig
from .processing_git import GitProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_git import (
GIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GitForCausalLM,
GitModel,
GitPreTrainedModel,
GitVisionModel,
)
else:
import sys
snake_case__ : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 389 | 1 |
def lowerCamelCase__ ( snake_case_ : Union[str, Any] ) -> Optional[int]:
if a < 0:
raise ValueError('''Input value must be a positive integer''' )
elif isinstance(__a , __a ):
raise TypeError('''Input value must be a \'int\' type''' )
return bin(__a ).count('''1''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 592 |
"""simple docstring"""
import inspect
import unittest
from transformers import YolosConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import YolosForObjectDetection, YolosModel
from transformers.models.yolos.modeling_yolos import YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self : Union[str, Any] , _snake_case : Optional[Any] , _snake_case : Any=13 , _snake_case : Union[str, Any]=[30, 30] , _snake_case : Optional[int]=2 , _snake_case : str=3 , _snake_case : List[str]=True , _snake_case : Tuple=True , _snake_case : Union[str, Any]=32 , _snake_case : List[str]=5 , _snake_case : str=4 , _snake_case : Tuple=37 , _snake_case : Tuple="gelu" , _snake_case : str=0.1 , _snake_case : Tuple=0.1 , _snake_case : Optional[Any]=10 , _snake_case : Dict=0.0_2 , _snake_case : int=3 , _snake_case : Optional[int]=None , _snake_case : str=8 , _snake_case : Dict=10 , ) -> int:
"""simple docstring"""
A_ = parent
A_ = batch_size
A_ = image_size
A_ = patch_size
A_ = num_channels
A_ = is_training
A_ = use_labels
A_ = hidden_size
A_ = num_hidden_layers
A_ = num_attention_heads
A_ = intermediate_size
A_ = hidden_act
A_ = hidden_dropout_prob
A_ = attention_probs_dropout_prob
A_ = type_sequence_label_size
A_ = initializer_range
A_ = num_labels
A_ = scope
A_ = n_targets
A_ = num_detection_tokens
# we set the expected sequence length (which is used in several tests)
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) + num_detection_tokens
A_ = (image_size[1] // patch_size) * (image_size[0] // patch_size)
A_ = num_patches + 1 + self.num_detection_tokens
def lowerCamelCase__ ( self : Any ) -> str:
"""simple docstring"""
A_ = floats_tensor([self.batch_size, self.num_channels, self.image_size[0], self.image_size[1]] )
A_ = None
if self.use_labels:
# labels is a list of Dict (each Dict being the labels for a given example in the batch)
A_ = []
for i in range(self.batch_size ):
A_ = {}
A_ = torch.randint(
high=self.num_labels , size=(self.n_targets,) , device=_snake_case )
A_ = torch.rand(self.n_targets , 4 , device=_snake_case )
labels.append(_snake_case )
A_ = self.get_config()
return config, pixel_values, labels
def lowerCamelCase__ ( self : List[str] ) -> str:
"""simple docstring"""
return YolosConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_snake_case , initializer_range=self.initializer_range , num_detection_tokens=self.num_detection_tokens , num_labels=self.num_labels , )
def lowerCamelCase__ ( self : Any , _snake_case : Union[str, Any] , _snake_case : Optional[Any] , _snake_case : Union[str, Any] ) -> List[str]:
"""simple docstring"""
A_ = YolosModel(config=_snake_case )
model.to(_snake_case )
model.eval()
A_ = model(_snake_case )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.expected_seq_len, self.hidden_size) )
def lowerCamelCase__ ( self : Tuple , _snake_case : Tuple , _snake_case : Optional[Any] , _snake_case : Any ) -> Optional[int]:
"""simple docstring"""
A_ = YolosForObjectDetection(_snake_case )
model.to(_snake_case )
model.eval()
A_ = model(pixel_values=_snake_case )
A_ = model(_snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
A_ = model(pixel_values=_snake_case , labels=_snake_case )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
def lowerCamelCase__ ( self : int ) -> Union[str, Any]:
"""simple docstring"""
A_ = self.prepare_config_and_inputs()
A_ , A_ , A_ = config_and_inputs
A_ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( _lowercase , _lowercase , unittest.TestCase ):
"""simple docstring"""
snake_case = (YolosModel, YolosForObjectDetection) if is_torch_available() else ()
snake_case = (
{"feature-extraction": YolosModel, "object-detection": YolosForObjectDetection} if is_torch_available() else {}
)
snake_case = False
snake_case = False
snake_case = False
snake_case = False
def lowerCamelCase__ ( self : int , _snake_case : Optional[int] , _snake_case : Any , _snake_case : Dict=False ) -> List[Any]:
"""simple docstring"""
A_ = super()._prepare_for_class(_snake_case , _snake_case , return_labels=_snake_case )
if return_labels:
if model_class.__name__ == "YolosForObjectDetection":
A_ = []
for i in range(self.model_tester.batch_size ):
A_ = {}
A_ = torch.ones(
size=(self.model_tester.n_targets,) , device=_snake_case , dtype=torch.long )
A_ = torch.ones(
self.model_tester.n_targets , 4 , device=_snake_case , dtype=torch.float )
labels.append(_snake_case )
A_ = labels
return inputs_dict
def lowerCamelCase__ ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
A_ = YolosModelTester(self )
A_ = ConfigTester(self , config_class=_snake_case , has_text_modality=_snake_case , hidden_size=37 )
def lowerCamelCase__ ( self : List[Any] ) -> str:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCamelCase__ ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
# YOLOS does not use inputs_embeds
pass
def lowerCamelCase__ ( self : Dict ) -> List[str]:
"""simple docstring"""
A_ , A_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ = model_class(_snake_case )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
A_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_snake_case , nn.Linear ) )
def lowerCamelCase__ ( self : List[Any] ) -> List[str]:
"""simple docstring"""
A_ , A_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ = model_class(_snake_case )
A_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A_ = [*signature.parameters.keys()]
A_ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , _snake_case )
def lowerCamelCase__ ( self : List[str] ) -> Tuple:
"""simple docstring"""
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case )
def lowerCamelCase__ ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
A_ , A_ = self.model_tester.prepare_config_and_inputs_for_common()
A_ = True
# in YOLOS, the seq_len is different
A_ = self.model_tester.expected_seq_len
for model_class in self.all_model_classes:
A_ = True
A_ = False
A_ = True
A_ = model_class(_snake_case )
model.to(_snake_case )
model.eval()
with torch.no_grad():
A_ = model(**self._prepare_for_class(_snake_case , _snake_case ) )
A_ = outputs.attentions
self.assertEqual(len(_snake_case ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
A_ = True
A_ = model_class(_snake_case )
model.to(_snake_case )
model.eval()
with torch.no_grad():
A_ = model(**self._prepare_for_class(_snake_case , _snake_case ) )
A_ = outputs.attentions
self.assertEqual(len(_snake_case ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
A_ = len(_snake_case )
# Check attention is always last and order is fine
A_ = True
A_ = True
A_ = model_class(_snake_case )
model.to(_snake_case )
model.eval()
with torch.no_grad():
A_ = model(**self._prepare_for_class(_snake_case , _snake_case ) )
A_ = 1
self.assertEqual(out_len + added_hidden_states , len(_snake_case ) )
A_ = outputs.attentions
self.assertEqual(len(_snake_case ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def lowerCamelCase__ ( self : Any ) -> List[Any]:
"""simple docstring"""
def check_hidden_states_output(_snake_case : str , _snake_case : Dict , _snake_case : Dict ):
A_ = model_class(_snake_case )
model.to(_snake_case )
model.eval()
with torch.no_grad():
A_ = model(**self._prepare_for_class(_snake_case , _snake_case ) )
A_ = outputs.hidden_states
A_ = getattr(
self.model_tester , "expected_num_hidden_layers" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(_snake_case ) , _snake_case )
# YOLOS has a different seq_length
A_ = self.model_tester.expected_seq_len
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
A_ , A_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ = True
check_hidden_states_output(_snake_case , _snake_case , _snake_case )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A_ = True
check_hidden_states_output(_snake_case , _snake_case , _snake_case )
def lowerCamelCase__ ( self : Dict ) -> Any:
"""simple docstring"""
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_object_detection(*_snake_case )
@slow
def lowerCamelCase__ ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
for model_name in YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ = YolosModel.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
def A_ ():
'''simple docstring'''
A_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def lowerCamelCase__ ( self : str ) -> Any:
"""simple docstring"""
return AutoImageProcessor.from_pretrained("hustvl/yolos-small" ) if is_vision_available() else None
@slow
def lowerCamelCase__ ( self : str ) -> Union[str, Any]:
"""simple docstring"""
A_ = YolosForObjectDetection.from_pretrained("hustvl/yolos-small" ).to(_snake_case )
A_ = self.default_image_processor
A_ = prepare_img()
A_ = image_processor(images=_snake_case , return_tensors="pt" ).to(_snake_case )
# forward pass
with torch.no_grad():
A_ = model(inputs.pixel_values )
# verify outputs
A_ = torch.Size((1, 100, 92) )
self.assertEqual(outputs.logits.shape , _snake_case )
A_ = torch.tensor(
[[-2_4.0_2_4_8, -1_0.3_0_2_4, -1_4.8_2_9_0], [-4_2.0_3_9_2, -1_6.8_2_0_0, -2_7.4_3_3_4], [-2_7.2_7_4_3, -1_1.8_1_5_4, -1_8.7_1_4_8]] , device=_snake_case , )
A_ = torch.tensor(
[[0.2_5_5_9, 0.5_4_5_5, 0.4_7_0_6], [0.2_9_8_9, 0.7_2_7_9, 0.1_8_7_5], [0.7_7_3_2, 0.4_0_1_7, 0.4_4_6_2]] , device=_snake_case )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , _snake_case , atol=1e-4 ) )
self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3] , _snake_case , atol=1e-4 ) )
# verify postprocessing
A_ = image_processor.post_process_object_detection(
_snake_case , threshold=0.3 , target_sizes=[image.size[::-1]] )[0]
A_ = torch.tensor([0.9_9_9_4, 0.9_7_9_0, 0.9_9_6_4, 0.9_9_7_2, 0.9_8_6_1] ).to(_snake_case )
A_ = [75, 75, 17, 63, 17]
A_ = torch.tensor([3_3_5.0_6_0_9, 7_9.3_8_4_8, 3_7_5.4_2_1_6, 1_8_7.2_4_9_5] ).to(_snake_case )
self.assertEqual(len(results["scores"] ) , 5 )
self.assertTrue(torch.allclose(results["scores"] , _snake_case , atol=1e-4 ) )
self.assertSequenceEqual(results["labels"].tolist() , _snake_case )
self.assertTrue(torch.allclose(results["boxes"][0, :] , _snake_case ) )
| 115 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
'uclanlp/visualbert-vqa': 'https://huggingface.co/uclanlp/visualbert-vqa/resolve/main/config.json',
'uclanlp/visualbert-vqa-pre': 'https://huggingface.co/uclanlp/visualbert-vqa-pre/resolve/main/config.json',
'uclanlp/visualbert-vqa-coco-pre': (
'https://huggingface.co/uclanlp/visualbert-vqa-coco-pre/resolve/main/config.json'
),
'uclanlp/visualbert-vcr': 'https://huggingface.co/uclanlp/visualbert-vcr/resolve/main/config.json',
'uclanlp/visualbert-vcr-pre': 'https://huggingface.co/uclanlp/visualbert-vcr-pre/resolve/main/config.json',
'uclanlp/visualbert-vcr-coco-pre': (
'https://huggingface.co/uclanlp/visualbert-vcr-coco-pre/resolve/main/config.json'
),
'uclanlp/visualbert-nlvr2': 'https://huggingface.co/uclanlp/visualbert-nlvr2/resolve/main/config.json',
'uclanlp/visualbert-nlvr2-pre': 'https://huggingface.co/uclanlp/visualbert-nlvr2-pre/resolve/main/config.json',
'uclanlp/visualbert-nlvr2-coco-pre': (
'https://huggingface.co/uclanlp/visualbert-nlvr2-coco-pre/resolve/main/config.json'
)
# See all VisualBERT models at https://huggingface.co/models?filter=visual_bert
}
class SCREAMING_SNAKE_CASE_ ( __lowercase ):
__magic_name__: Tuple = '''visual_bert'''
def __init__( self : Optional[Any] , _A : Union[str, Any]=30522 , _A : Tuple=768 , _A : Dict=512 , _A : Dict=12 , _A : Any=12 , _A : Optional[Any]=3072 , _A : Tuple="gelu" , _A : List[Any]=0.1 , _A : Tuple=0.1 , _A : Optional[int]=512 , _A : str=2 , _A : List[Any]=0.0_2 , _A : List[Any]=1E-12 , _A : List[str]=False , _A : Dict=True , _A : Optional[int]=1 , _A : str=0 , _A : Dict=2 , **_A : Union[str, Any] , ) -> Optional[int]:
"""simple docstring"""
super().__init__(pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , **__a )
snake_case_ : List[Any] = vocab_size
snake_case_ : Tuple = max_position_embeddings
snake_case_ : Optional[int] = hidden_size
snake_case_ : Optional[int] = visual_embedding_dim
snake_case_ : Any = num_hidden_layers
snake_case_ : Optional[int] = num_attention_heads
snake_case_ : Dict = intermediate_size
snake_case_ : Dict = hidden_act
snake_case_ : Dict = hidden_dropout_prob
snake_case_ : Optional[int] = attention_probs_dropout_prob
snake_case_ : Dict = initializer_range
snake_case_ : Optional[Any] = type_vocab_size
snake_case_ : str = layer_norm_eps
snake_case_ : List[Any] = bypass_transformer
snake_case_ : int = special_visual_initialize
| 720 |
import gc
import inspect
import unittest
import torch
from parameterized import parameterized
from diffusers import PriorTransformer
from diffusers.utils import floats_tensor, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin
enable_full_determinism()
class SCREAMING_SNAKE_CASE_ ( snake_case_ , unittest.TestCase ):
__magic_name__: Dict = PriorTransformer
__magic_name__: str = "hidden_states"
@property
def UpperCAmelCase_ ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
snake_case_ : Any = 4
snake_case_ : int = 8
snake_case_ : Dict = 7
snake_case_ : Union[str, Any] = floats_tensor((batch_size, embedding_dim) ).to(_A )
snake_case_ : int = floats_tensor((batch_size, embedding_dim) ).to(_A )
snake_case_ : str = floats_tensor((batch_size, num_embeddings, embedding_dim) ).to(_A )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
def UpperCAmelCase_ ( self : List[Any] , _A : List[Any]=0 ) -> str:
"""simple docstring"""
torch.manual_seed(_A )
snake_case_ : List[Any] = 4
snake_case_ : str = 8
snake_case_ : Any = 7
snake_case_ : List[Any] = torch.randn((batch_size, embedding_dim) ).to(_A )
snake_case_ : Optional[int] = torch.randn((batch_size, embedding_dim) ).to(_A )
snake_case_ : List[Any] = torch.randn((batch_size, num_embeddings, embedding_dim) ).to(_A )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
@property
def UpperCAmelCase_ ( self : str ) -> Optional[Any]:
"""simple docstring"""
return (4, 8)
@property
def UpperCAmelCase_ ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
return (4, 8)
def UpperCAmelCase_ ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
snake_case_ : Union[str, Any] = {
'num_attention_heads': 2,
'attention_head_dim': 4,
'num_layers': 2,
'embedding_dim': 8,
'num_embeddings': 7,
'additional_embeddings': 4,
}
snake_case_ : Tuple = self.dummy_input
return init_dict, inputs_dict
def UpperCAmelCase_ ( self : List[Any] ) -> Dict:
"""simple docstring"""
snake_case_ ,snake_case_ : str = PriorTransformer.from_pretrained(
'hf-internal-testing/prior-dummy' , output_loading_info=_A )
self.assertIsNotNone(_A )
self.assertEqual(len(loading_info['missing_keys'] ) , 0 )
model.to(_A )
snake_case_ : Optional[Any] = model(**self.dummy_input )[0]
assert hidden_states is not None, "Make sure output is not None"
def UpperCAmelCase_ ( self : Tuple ) -> List[Any]:
"""simple docstring"""
snake_case_ ,snake_case_ : Optional[int] = self.prepare_init_args_and_inputs_for_common()
snake_case_ : Tuple = self.model_class(**_A )
snake_case_ : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case_ : List[str] = [*signature.parameters.keys()]
snake_case_ : int = ['hidden_states', 'timestep']
self.assertListEqual(arg_names[:2] , _A )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
snake_case_ : str = PriorTransformer.from_pretrained('hf-internal-testing/prior-dummy' )
snake_case_ : str = model.to(_A )
if hasattr(_A , 'set_default_attn_processor' ):
model.set_default_attn_processor()
snake_case_ : Optional[int] = self.get_dummy_seed_input()
with torch.no_grad():
snake_case_ : Any = model(**_A )[0]
snake_case_ : Any = output[0, :5].flatten().cpu()
print(_A )
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
snake_case_ : str = torch.tensor([-1.3_4_3_6, -0.2_8_7_0, 0.7_5_3_8, 0.4_3_6_8, -0.0_2_3_9] )
self.assertTrue(torch_all_close(_A , _A , rtol=1E-2 ) )
@slow
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
def UpperCAmelCase_ ( self : Optional[Any] , _A : int=1 , _A : int=768 , _A : str=77 , _A : List[str]=0 ) -> Tuple:
"""simple docstring"""
torch.manual_seed(_A )
snake_case_ : Dict = batch_size
snake_case_ : Any = embedding_dim
snake_case_ : int = num_embeddings
snake_case_ : Dict = torch.randn((batch_size, embedding_dim) ).to(_A )
snake_case_ : List[Any] = torch.randn((batch_size, embedding_dim) ).to(_A )
snake_case_ : Optional[int] = torch.randn((batch_size, num_embeddings, embedding_dim) ).to(_A )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
def UpperCAmelCase_ ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@parameterized.expand(
[
# fmt: off
[13, [-0.5_8_6_1, 0.1_2_8_3, -0.0_9_3_1, 0.0_8_8_2, 0.4_4_7_6, 0.1_3_2_9, -0.0_4_9_8, 0.0_6_4_0]],
[37, [-0.4_9_1_3, 0.0_1_1_0, -0.0_4_8_3, 0.0_5_4_1, 0.4_9_5_4, -0.0_1_7_0, 0.0_3_5_4, 0.1_6_5_1]],
# fmt: on
] )
def UpperCAmelCase_ ( self : Tuple , _A : List[Any] , _A : List[str] ) -> Optional[int]:
"""simple docstring"""
snake_case_ : str = PriorTransformer.from_pretrained('kandinsky-community/kandinsky-2-1-prior' , subfolder='prior' )
model.to(_A )
snake_case_ : Optional[Any] = self.get_dummy_seed_input(seed=_A )
with torch.no_grad():
snake_case_ : str = model(**_A )[0]
assert list(sample.shape ) == [1, 768]
snake_case_ : Optional[Any] = sample[0, :8].flatten().cpu()
print(_A )
snake_case_ : int = torch.tensor(_A )
assert torch_all_close(_A , _A , atol=1E-3 )
| 534 | 0 |
from __future__ import annotations
import requests
def lowerCAmelCase_ ( __A ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase__ = f"""https://hacker-news.firebaseio.com/v0/item/{story_id}.json?print=pretty"""
return requests.get(snake_case__ ).json()
def lowerCAmelCase_ ( __A = 10 ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase__ = '''https://hacker-news.firebaseio.com/v0/topstories.json?print=pretty'''
UpperCAmelCase__ = requests.get(snake_case__ ).json()[:max_stories]
return [get_hackernews_story(snake_case__ ) for story_id in story_ids]
def lowerCAmelCase_ ( __A = 10 ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase__ = hackernews_top_stories(snake_case__ )
return "\n".join("* [{title}]({url})".format(**snake_case__ ) for story in stories )
if __name__ == "__main__":
print(hackernews_top_stories_as_markdown())
| 486 |
'''simple docstring'''
from pathlib import Path
import fire
from tqdm import tqdm
def _A ( snake_case__ : List[str]="ro" , snake_case__ : int="en" , snake_case__ : Any="wmt16" , snake_case__ : Optional[Any]=None ):
try:
import datasets
except (ModuleNotFoundError, ImportError):
raise ImportError('''run pip install datasets''' )
snake_case__ : List[Any] = f'''{src_lang}-{tgt_lang}'''
print(f'''Converting {dataset}-{pair}''' )
snake_case__ : Optional[Any] = datasets.load_dataset(snake_case__ , snake_case__ )
if save_dir is None:
snake_case__ : Optional[int] = f'''{dataset}-{pair}'''
snake_case__ : Optional[int] = Path(snake_case__ )
save_dir.mkdir(exist_ok=snake_case__ )
for split in ds.keys():
print(f'''Splitting {split} with {ds[split].num_rows} records''' )
# to save to val.source, val.target like summary datasets
snake_case__ : Optional[int] = '''val''' if split == '''validation''' else split
snake_case__ : Optional[Any] = save_dir.joinpath(f'''{fn}.source''' )
snake_case__ : Any = save_dir.joinpath(f'''{fn}.target''' )
snake_case__ : Union[str, Any] = src_path.open('''w+''' )
snake_case__ : str = tgt_path.open('''w+''' )
# reader is the bottleneck so writing one record at a time doesn't slow things down
for x in tqdm(ds[split] ):
snake_case__ : int = x['''translation''']
src_fp.write(ex[src_lang] + '''\n''' )
tgt_fp.write(ex[tgt_lang] + '''\n''' )
print(f'''Saved {dataset} dataset to {save_dir}''' )
if __name__ == "__main__":
fire.Fire(download_wmt_dataset)
| 261 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_speech_available, is_torch_available
_lowerCamelCase = {
'configuration_audio_spectrogram_transformer': [
'AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'ASTConfig',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase = [
'AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'ASTForAudioClassification',
'ASTModel',
'ASTPreTrainedModel',
]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase = ['ASTFeatureExtractor']
if TYPE_CHECKING:
from .configuration_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
ASTConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ASTForAudioClassification,
ASTModel,
ASTPreTrainedModel,
)
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_audio_spectrogram_transformer import ASTFeatureExtractor
else:
import sys
_lowerCamelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 613 |
import importlib.util
import os
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import (
is_accelerate_available,
is_flax_available,
is_safetensors_available,
is_tf_available,
is_torch_available,
)
from . import BaseTransformersCLICommand
def __UpperCAmelCase( lowercase_ ):
return EnvironmentCommand()
def __UpperCAmelCase( lowercase_ ):
return EnvironmentCommand(args.accelerate_config_file )
class __A ( lowerCamelCase__ ):
"""simple docstring"""
@staticmethod
def __snake_case ( a__):
"""simple docstring"""
_lowerCamelCase : List[Any] = parser.add_parser('''env''')
download_parser.set_defaults(func=a__)
download_parser.add_argument(
'''--accelerate-config_file''' , default=a__ , help='''The accelerate config file to use for the default values in the launching script.''' , )
download_parser.set_defaults(func=a__)
def __init__( self , a__ , *a__):
"""simple docstring"""
_lowerCamelCase : str = accelerate_config_file
def __snake_case ( self):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = '''not installed'''
if is_safetensors_available():
import safetensors
_lowerCamelCase : Optional[Any] = safetensors.__version__
elif importlib.util.find_spec('''safetensors''') is not None:
import safetensors
_lowerCamelCase : Optional[int] = F"""{safetensors.__version__} but is ignored because of PyTorch version too old."""
_lowerCamelCase : Union[str, Any] = '''not installed'''
_lowerCamelCase : Any = '''not found'''
if is_accelerate_available():
import accelerate
from accelerate.commands.config import default_config_file, load_config_from_file
_lowerCamelCase : Optional[int] = accelerate.__version__
# Get the default from the config file.
if self._accelerate_config_file is not None or os.path.isfile(a__):
_lowerCamelCase : Optional[int] = load_config_from_file(self._accelerate_config_file).to_dict()
_lowerCamelCase : str = (
'''\n'''.join([F"""\t- {prop}: {val}""" for prop, val in accelerate_config.items()])
if isinstance(a__ , a__)
else F"""\t{accelerate_config}"""
)
_lowerCamelCase : List[Any] = '''not installed'''
_lowerCamelCase : Tuple = '''NA'''
if is_torch_available():
import torch
_lowerCamelCase : int = torch.__version__
_lowerCamelCase : List[str] = torch.cuda.is_available()
_lowerCamelCase : str = '''not installed'''
_lowerCamelCase : Union[str, Any] = '''NA'''
if is_tf_available():
import tensorflow as tf
_lowerCamelCase : List[str] = tf.__version__
try:
# deprecated in v2.1
_lowerCamelCase : Optional[int] = tf.test.is_gpu_available()
except AttributeError:
# returns list of devices, convert to bool
_lowerCamelCase : Optional[int] = bool(tf.config.list_physical_devices('''GPU'''))
_lowerCamelCase : str = '''not installed'''
_lowerCamelCase : List[Any] = '''not installed'''
_lowerCamelCase : List[Any] = '''not installed'''
_lowerCamelCase : Optional[int] = '''NA'''
if is_flax_available():
import flax
import jax
import jaxlib
_lowerCamelCase : Any = flax.__version__
_lowerCamelCase : str = jax.__version__
_lowerCamelCase : Any = jaxlib.__version__
_lowerCamelCase : int = jax.lib.xla_bridge.get_backend().platform
_lowerCamelCase : int = {
'''`transformers` version''': version,
'''Platform''': platform.platform(),
'''Python version''': platform.python_version(),
'''Huggingface_hub version''': huggingface_hub.__version__,
'''Safetensors version''': F"""{safetensors_version}""",
'''Accelerate version''': F"""{accelerate_version}""",
'''Accelerate config''': F"""{accelerate_config_str}""",
'''PyTorch version (GPU?)''': F"""{pt_version} ({pt_cuda_available})""",
'''Tensorflow version (GPU?)''': F"""{tf_version} ({tf_cuda_available})""",
'''Flax version (CPU?/GPU?/TPU?)''': F"""{flax_version} ({jax_backend})""",
'''Jax version''': F"""{jax_version}""",
'''JaxLib version''': F"""{jaxlib_version}""",
'''Using GPU in script?''': '''<fill in>''',
'''Using distributed or parallel set-up in script?''': '''<fill in>''',
}
print('''\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n''')
print(self.format_dict(a__))
return info
@staticmethod
def __snake_case ( a__):
"""simple docstring"""
return "\n".join([F"""- {prop}: {val}""" for prop, val in d.items()]) + "\n"
| 613 | 1 |
import logging
import os
import quant_trainer
import torch
from torch.utils.data import DataLoader
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput
__A = logging.getLogger(__name__)
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class _A ( UpperCamelCase ):
"""simple docstring"""
def __init__( self : Union[str, Any] , *__SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Any=None , __SCREAMING_SNAKE_CASE : str=None , __SCREAMING_SNAKE_CASE : int=None , **__SCREAMING_SNAKE_CASE : int ) -> int:
super().__init__(*__UpperCAmelCase , **__UpperCAmelCase )
__UpperCAmelCase =eval_examples
__UpperCAmelCase =post_process_function
__UpperCAmelCase =quant_trainer_args
__UpperCAmelCase =128 # default number of calibration samples
def _a ( self : Dict , __SCREAMING_SNAKE_CASE : List[str]=None ) -> str:
if calib_dataset is None and self.calib_dataset is None:
raise ValueError("""Trainer: calibration requires an calib_dataset.""" )
__UpperCAmelCase =calib_dataset if calib_dataset is not None else self.calib_dataset
__UpperCAmelCase =self._remove_unused_columns(__UpperCAmelCase , description="""Calibration""" )
return DataLoader(
__UpperCAmelCase , batch_size=self.args.eval_batch_size , collate_fn=self.data_collator , drop_last=self.args.dataloader_drop_last , num_workers=self.args.dataloader_num_workers , pin_memory=self.args.dataloader_pin_memory , shuffle=__UpperCAmelCase , )
def _a ( self : Any , __SCREAMING_SNAKE_CASE : Tuple=None ) -> Dict:
__UpperCAmelCase =self.train_dataset if calib_dataset is None else calib_dataset
__UpperCAmelCase =self.get_calib_dataloader(__UpperCAmelCase )
__UpperCAmelCase =self.model
quant_trainer.configure_model(__UpperCAmelCase , self.quant_trainer_args , calib=__UpperCAmelCase )
model.eval()
quant_trainer.enable_calibration(__UpperCAmelCase )
logger.info("""***** Running calibration *****""" )
logger.info(f''' Num examples = {self.calib_num}''' )
logger.info(f''' Batch size = {calib_dataloader.batch_size}''' )
for step, inputs in enumerate(__UpperCAmelCase ):
# Prediction step
__UpperCAmelCase =self.prediction_step(__UpperCAmelCase , __UpperCAmelCase , prediction_loss_only=__UpperCAmelCase )
if (step + 1) * calib_dataloader.batch_size >= self.calib_num:
break
quant_trainer.finish_calibration(__UpperCAmelCase , self.quant_trainer_args )
__UpperCAmelCase =model
def _a ( self : List[Any] , __SCREAMING_SNAKE_CASE : str=None , __SCREAMING_SNAKE_CASE : List[Any]=None , __SCREAMING_SNAKE_CASE : Optional[int]=None , __SCREAMING_SNAKE_CASE : Optional[int] = "eval" ) -> Dict:
__UpperCAmelCase =self.eval_dataset if eval_dataset is None else eval_dataset
__UpperCAmelCase =self.get_eval_dataloader(__UpperCAmelCase )
__UpperCAmelCase =self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
__UpperCAmelCase =self.compute_metrics
__UpperCAmelCase =None
__UpperCAmelCase =self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
__UpperCAmelCase =eval_loop(
__UpperCAmelCase , description="""Evaluation""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__UpperCAmelCase , )
finally:
__UpperCAmelCase =compute_metrics
if self.post_process_function is not None and self.compute_metrics is not None:
__UpperCAmelCase =self.post_process_function(__UpperCAmelCase , __UpperCAmelCase , output.predictions )
__UpperCAmelCase =self.compute_metrics(__UpperCAmelCase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f'''{metric_key_prefix}_''' ):
__UpperCAmelCase =metrics.pop(__UpperCAmelCase )
self.log(__UpperCAmelCase )
else:
__UpperCAmelCase ={}
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
__UpperCAmelCase =self.callback_handler.on_evaluate(self.args , self.state , self.control , __UpperCAmelCase )
return metrics
def _a ( self : str , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Dict=None , __SCREAMING_SNAKE_CASE : Dict = "test" ) -> Union[str, Any]:
__UpperCAmelCase =self.get_test_dataloader(__UpperCAmelCase )
# Temporarily disable metric computation, we will do it in the loop here.
__UpperCAmelCase =self.compute_metrics
__UpperCAmelCase =None
__UpperCAmelCase =self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
__UpperCAmelCase =eval_loop(
__UpperCAmelCase , description="""Prediction""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__UpperCAmelCase , )
finally:
__UpperCAmelCase =compute_metrics
if self.post_process_function is None or self.compute_metrics is None:
return output
__UpperCAmelCase =self.post_process_function(__UpperCAmelCase , __UpperCAmelCase , output.predictions , """predict""" )
__UpperCAmelCase =self.compute_metrics(__UpperCAmelCase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f'''{metric_key_prefix}_''' ):
__UpperCAmelCase =metrics.pop(__UpperCAmelCase )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=__UpperCAmelCase )
def _a ( self : Dict , __SCREAMING_SNAKE_CASE : Union[str, Any]="./" ) -> List[Any]:
__UpperCAmelCase =self.eval_dataset
__UpperCAmelCase =self.get_eval_dataloader(__UpperCAmelCase )
__UpperCAmelCase =next(iter(__UpperCAmelCase ) )
# saving device - to make it consistent
__UpperCAmelCase =torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" )
# convert to tuple
__UpperCAmelCase =tuple(v.to(__UpperCAmelCase ) for k, v in batch.items() )
logger.info("""Converting model to be onnx compatible""" )
from pytorch_quantization.nn import TensorQuantizer
__UpperCAmelCase =True
__UpperCAmelCase =self.model.to(__UpperCAmelCase )
model.eval()
model.float()
__UpperCAmelCase =model.module if hasattr(__UpperCAmelCase , """module""" ) else model
quant_trainer.configure_model(__UpperCAmelCase , self.quant_trainer_args )
__UpperCAmelCase =os.path.join(__UpperCAmelCase , """model.onnx""" )
logger.info(f'''exporting model to {output_model_file}''' )
__UpperCAmelCase ={0: 'batch_size', 1: 'seq_len'}
torch.onnx.export(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , export_params=__UpperCAmelCase , opset_version=13 , do_constant_folding=__UpperCAmelCase , input_names=["""input_ids""", """attention_mask""", """token_type_ids"""] , output_names=["""output_start_logits""", """output_end_logits"""] , dynamic_axes={
"""input_ids""": axes,
"""attention_mask""": axes,
"""token_type_ids""": axes,
"""output_start_logits""": axes,
"""output_end_logits""": axes,
} , verbose=__UpperCAmelCase , )
logger.info("""onnx export finished""" )
| 68 |
"""simple docstring"""
import tempfile
import unittest
from make_student import create_student_by_copying_alternating_layers
from transformers import AutoConfig
from transformers.file_utils import cached_property
from transformers.testing_utils import require_torch
__A = """sshleifer/bart-tiny-random"""
__A = """patrickvonplaten/t5-tiny-random"""
@require_torch
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def snake_case ( self ):
'''simple docstring'''
return AutoConfig.from_pretrained(__UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ , *lowerCAmelCase__ :List[str] = create_student_by_copying_alternating_layers(__UpperCAmelCase , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.num_hidden_layers , 1 )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ , *lowerCAmelCase__ :Optional[int] = create_student_by_copying_alternating_layers(__UpperCAmelCase , tempfile.mkdtemp() , e=1 , d=__UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ , *lowerCAmelCase__ :List[Any] = create_student_by_copying_alternating_layers(__UpperCAmelCase , tempfile.mkdtemp() , e=1 , d=__UpperCAmelCase )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , self.teacher_config.encoder_layers )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ , *lowerCAmelCase__ :Optional[int] = create_student_by_copying_alternating_layers(__UpperCAmelCase , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , 1 )
def snake_case ( self ):
'''simple docstring'''
with self.assertRaises(__UpperCAmelCase ):
create_student_by_copying_alternating_layers(__UpperCAmelCase , tempfile.mkdtemp() , e=__UpperCAmelCase , d=__UpperCAmelCase )
| 93 | 0 |
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def __lowerCamelCase (UpperCAmelCase__ : Optional[Any] ):
SCREAMING_SNAKE_CASE = filter(lambda UpperCAmelCase__ : p.requires_grad , model.parameters() )
SCREAMING_SNAKE_CASE = sum([np.prod(p.size() ) for p in model_parameters] )
return params
_lowerCamelCase : Optional[int] = logging.getLogger(__name__)
def __lowerCamelCase (UpperCAmelCase__ : int , UpperCAmelCase__ : Union[str, Any] ):
if metric == "rouge2":
SCREAMING_SNAKE_CASE = "{val_avg_rouge2:.4f}-{step_count}"
elif metric == "bleu":
SCREAMING_SNAKE_CASE = "{val_avg_bleu:.4f}-{step_count}"
elif metric == "em":
SCREAMING_SNAKE_CASE = "{val_avg_em:.4f}-{step_count}"
elif metric == "loss":
SCREAMING_SNAKE_CASE = "{val_avg_loss:.4f}-{step_count}"
else:
raise NotImplementedError(
F"seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this"
" function." )
SCREAMING_SNAKE_CASE = ModelCheckpoint(
dirpath=UpperCAmelCase__ , filename=UpperCAmelCase__ , monitor=F"val_{metric}" , mode="max" , save_top_k=1 , every_n_epochs=1 , )
return checkpoint_callback
def __lowerCamelCase (UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : str ):
return EarlyStopping(
monitor=F"val_{metric}" , mode="min" if "loss" in metric else "max" , patience=UpperCAmelCase__ , verbose=UpperCAmelCase__ , )
class lowercase ( pl.Callback ):
def __snake_case( self : Optional[int] , _UpperCamelCase : List[Any] , _UpperCamelCase : Union[str, Any] ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = {F"lr_group_{i}": param["lr"] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(__UpperCamelCase )
@rank_zero_only
def __snake_case( self : Union[str, Any] , _UpperCamelCase : pl.Trainer , _UpperCamelCase : pl.LightningModule , _UpperCamelCase : str , _UpperCamelCase : List[str]=True ) -> Any:
'''simple docstring'''
logger.info(F"***** {type_path} results at step {trainer.global_step:05d} *****" )
SCREAMING_SNAKE_CASE = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ["log", "progress_bar", "preds"]} )
# Log results
SCREAMING_SNAKE_CASE = Path(pl_module.hparams.output_dir )
if type_path == "test":
SCREAMING_SNAKE_CASE = od / "test_results.txt"
SCREAMING_SNAKE_CASE = od / "test_generations.txt"
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
SCREAMING_SNAKE_CASE = od / F"{type_path}_results/{trainer.global_step:05d}.txt"
SCREAMING_SNAKE_CASE = od / F"{type_path}_generations/{trainer.global_step:05d}.txt"
results_file.parent.mkdir(exist_ok=__UpperCamelCase )
generations_file.parent.mkdir(exist_ok=__UpperCamelCase )
with open(__UpperCamelCase , "a+" ) as writer:
for key in sorted(__UpperCamelCase ):
if key in ["log", "progress_bar", "preds"]:
continue
SCREAMING_SNAKE_CASE = metrics[key]
if isinstance(__UpperCamelCase , torch.Tensor ):
SCREAMING_SNAKE_CASE = val.item()
SCREAMING_SNAKE_CASE = F"{key}: {val:.6f}\n"
writer.write(__UpperCamelCase )
if not save_generations:
return
if "preds" in metrics:
SCREAMING_SNAKE_CASE = "\n".join(metrics["preds"] )
generations_file.open("w+" ).write(__UpperCamelCase )
@rank_zero_only
def __snake_case( self : Optional[int] , _UpperCamelCase : Optional[int] , _UpperCamelCase : List[str] ) -> Tuple:
'''simple docstring'''
try:
SCREAMING_SNAKE_CASE = pl_module.model.model.num_parameters()
except AttributeError:
SCREAMING_SNAKE_CASE = pl_module.model.num_parameters()
SCREAMING_SNAKE_CASE = count_trainable_parameters(__UpperCamelCase )
# mp stands for million parameters
trainer.logger.log_metrics({"n_params": npars, "mp": npars / 1e6, "grad_mp": n_trainable_pars / 1e6} )
@rank_zero_only
def __snake_case( self : str , _UpperCamelCase : pl.Trainer , _UpperCamelCase : pl.LightningModule ) -> Tuple:
'''simple docstring'''
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(__UpperCamelCase , __UpperCamelCase , "test" )
@rank_zero_only
def __snake_case( self : List[str] , _UpperCamelCase : pl.Trainer , _UpperCamelCase : int ) -> Any:
'''simple docstring'''
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 715 | import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetaImageProcessor
class lowercase ( unittest.TestCase ):
def __init__( self : Optional[int] , _UpperCamelCase : Any , _UpperCamelCase : Dict=7 , _UpperCamelCase : Union[str, Any]=3 , _UpperCamelCase : Optional[int]=30 , _UpperCamelCase : List[Any]=400 , _UpperCamelCase : Dict=True , _UpperCamelCase : Union[str, Any]=None , _UpperCamelCase : Any=True , _UpperCamelCase : List[Any]=[0.5, 0.5, 0.5] , _UpperCamelCase : Tuple=[0.5, 0.5, 0.5] , _UpperCamelCase : Tuple=True , _UpperCamelCase : List[Any]=1 / 255 , _UpperCamelCase : Optional[Any]=True , ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = size if size is not None else {"shortest_edge": 18, "longest_edge": 1_333}
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = num_channels
SCREAMING_SNAKE_CASE = min_resolution
SCREAMING_SNAKE_CASE = max_resolution
SCREAMING_SNAKE_CASE = do_resize
SCREAMING_SNAKE_CASE = size
SCREAMING_SNAKE_CASE = do_normalize
SCREAMING_SNAKE_CASE = image_mean
SCREAMING_SNAKE_CASE = image_std
SCREAMING_SNAKE_CASE = do_rescale
SCREAMING_SNAKE_CASE = rescale_factor
SCREAMING_SNAKE_CASE = do_pad
def __snake_case( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def __snake_case( self : Any , _UpperCamelCase : List[str] , _UpperCamelCase : List[Any]=False ) -> List[Any]:
'''simple docstring'''
if not batched:
SCREAMING_SNAKE_CASE = image_inputs[0]
if isinstance(_UpperCamelCase , Image.Image ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = image.size
else:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = image.shape[1], image.shape[2]
if w < h:
SCREAMING_SNAKE_CASE = int(self.size["shortest_edge"] * h / w )
SCREAMING_SNAKE_CASE = self.size["shortest_edge"]
elif w > h:
SCREAMING_SNAKE_CASE = self.size["shortest_edge"]
SCREAMING_SNAKE_CASE = int(self.size["shortest_edge"] * w / h )
else:
SCREAMING_SNAKE_CASE = self.size["shortest_edge"]
SCREAMING_SNAKE_CASE = self.size["shortest_edge"]
else:
SCREAMING_SNAKE_CASE = []
for image in image_inputs:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
SCREAMING_SNAKE_CASE = max(_UpperCamelCase , key=lambda _UpperCamelCase : item[0] )[0]
SCREAMING_SNAKE_CASE = max(_UpperCamelCase , key=lambda _UpperCamelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class lowercase ( a , unittest.TestCase ):
lowercase__ : Optional[int] = DetaImageProcessor if is_vision_available() else None
def __snake_case( self : List[str] ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = DetaImageProcessingTester(self )
@property
def __snake_case( self : int ) -> Tuple:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def __snake_case( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_UpperCamelCase , "image_mean" ) )
self.assertTrue(hasattr(_UpperCamelCase , "image_std" ) )
self.assertTrue(hasattr(_UpperCamelCase , "do_normalize" ) )
self.assertTrue(hasattr(_UpperCamelCase , "do_resize" ) )
self.assertTrue(hasattr(_UpperCamelCase , "do_rescale" ) )
self.assertTrue(hasattr(_UpperCamelCase , "do_pad" ) )
self.assertTrue(hasattr(_UpperCamelCase , "size" ) )
def __snake_case( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 18, "longest_edge": 1_333} )
self.assertEqual(image_processor.do_pad , _UpperCamelCase )
def __snake_case( self : str ) -> List[Any]:
'''simple docstring'''
pass
def __snake_case( self : List[Any] ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCamelCase , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.image_processor_tester.get_expected_values(_UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.image_processor_tester.get_expected_values(_UpperCamelCase , batched=_UpperCamelCase )
SCREAMING_SNAKE_CASE = image_processing(_UpperCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __snake_case( self : List[str] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCamelCase , numpify=_UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCamelCase , np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.image_processor_tester.get_expected_values(_UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE = image_processing(_UpperCamelCase , return_tensors="pt" ).pixel_values
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.image_processor_tester.get_expected_values(_UpperCamelCase , batched=_UpperCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __snake_case( self : str ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCamelCase , torchify=_UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCamelCase , torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.image_processor_tester.get_expected_values(_UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE = image_processing(_UpperCamelCase , return_tensors="pt" ).pixel_values
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.image_processor_tester.get_expected_values(_UpperCamelCase , batched=_UpperCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def __snake_case( self : Dict ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r" ) as f:
SCREAMING_SNAKE_CASE = json.loads(f.read() )
SCREAMING_SNAKE_CASE = {"image_id": 39_769, "annotations": target}
# encode them
SCREAMING_SNAKE_CASE = DetaImageProcessor()
SCREAMING_SNAKE_CASE = image_processing(images=_UpperCamelCase , annotations=_UpperCamelCase , return_tensors="pt" )
# verify pixel values
SCREAMING_SNAKE_CASE = torch.Size([1, 3, 800, 1_066] )
self.assertEqual(encoding["pixel_values"].shape , _UpperCamelCase )
SCREAMING_SNAKE_CASE = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , _UpperCamelCase , atol=1e-4 ) )
# verify area
SCREAMING_SNAKE_CASE = torch.tensor([5_8_8_7.9_6_0_0, 1_1_2_5_0.2_0_6_1, 4_8_9_3_5_3.8_4_3_8, 8_3_7_1_2_2.7_5_0_0, 1_4_7_9_6_7.5_1_5_6, 1_6_5_7_3_2.3_4_3_8] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , _UpperCamelCase ) )
# verify boxes
SCREAMING_SNAKE_CASE = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , _UpperCamelCase )
SCREAMING_SNAKE_CASE = torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , _UpperCamelCase , atol=1e-3 ) )
# verify image_id
SCREAMING_SNAKE_CASE = torch.tensor([39_769] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , _UpperCamelCase ) )
# verify is_crowd
SCREAMING_SNAKE_CASE = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , _UpperCamelCase ) )
# verify class_labels
SCREAMING_SNAKE_CASE = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , _UpperCamelCase ) )
# verify orig_size
SCREAMING_SNAKE_CASE = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , _UpperCamelCase ) )
# verify size
SCREAMING_SNAKE_CASE = torch.tensor([800, 1_066] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , _UpperCamelCase ) )
@slow
def __snake_case( self : Dict ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r" ) as f:
SCREAMING_SNAKE_CASE = json.loads(f.read() )
SCREAMING_SNAKE_CASE = {"file_name": "000000039769.png", "image_id": 39_769, "segments_info": target}
SCREAMING_SNAKE_CASE = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" )
# encode them
SCREAMING_SNAKE_CASE = DetaImageProcessor(format="coco_panoptic" )
SCREAMING_SNAKE_CASE = image_processing(images=_UpperCamelCase , annotations=_UpperCamelCase , masks_path=_UpperCamelCase , return_tensors="pt" )
# verify pixel values
SCREAMING_SNAKE_CASE = torch.Size([1, 3, 800, 1_066] )
self.assertEqual(encoding["pixel_values"].shape , _UpperCamelCase )
SCREAMING_SNAKE_CASE = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , _UpperCamelCase , atol=1e-4 ) )
# verify area
SCREAMING_SNAKE_CASE = torch.tensor([1_4_7_9_7_9.6_8_7_5, 1_6_5_5_2_7.0_4_6_9, 4_8_4_6_3_8.5_9_3_8, 1_1_2_9_2.9_3_7_5, 5_8_7_9.6_5_6_2, 7_6_3_4.1_1_4_7] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , _UpperCamelCase ) )
# verify boxes
SCREAMING_SNAKE_CASE = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , _UpperCamelCase )
SCREAMING_SNAKE_CASE = torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , _UpperCamelCase , atol=1e-3 ) )
# verify image_id
SCREAMING_SNAKE_CASE = torch.tensor([39_769] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , _UpperCamelCase ) )
# verify is_crowd
SCREAMING_SNAKE_CASE = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , _UpperCamelCase ) )
# verify class_labels
SCREAMING_SNAKE_CASE = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , _UpperCamelCase ) )
# verify masks
SCREAMING_SNAKE_CASE = 822_873
self.assertEqual(encoding["labels"][0]["masks"].sum().item() , _UpperCamelCase )
# verify orig_size
SCREAMING_SNAKE_CASE = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , _UpperCamelCase ) )
# verify size
SCREAMING_SNAKE_CASE = torch.tensor([800, 1_066] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , _UpperCamelCase ) )
| 647 | 0 |
_lowerCAmelCase: Tuple = 'Alexander Joslin'
import operator as op
from .stack import Stack
def _lowercase( __a : str ):
a__ ={'*': op.mul, '/': op.truediv, '+': op.add, '-': op.sub}
a__ =Stack()
a__ =Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(__a ) )
elif i in operators:
# RULE 2
operator_stack.push(__a )
elif i == ")":
# RULE 4
a__ =operator_stack.peek()
operator_stack.pop()
a__ =operand_stack.peek()
operand_stack.pop()
a__ =operand_stack.peek()
operand_stack.pop()
a__ =operators[opr](__a , __a )
operand_stack.push(__a )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
_lowerCAmelCase: Dict = '(5 + ((4 * 2) * (2 + 3)))'
# answer = 45
print(F"""{equation} = {dijkstras_two_stack_algorithm(equation)}""")
| 20 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_SCREAMING_SNAKE_CASE = {
"configuration_xmod": [
"XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP",
"XmodConfig",
"XmodOnnxConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
"XMOD_PRETRAINED_MODEL_ARCHIVE_LIST",
"XmodForCausalLM",
"XmodForMaskedLM",
"XmodForMultipleChoice",
"XmodForQuestionAnswering",
"XmodForSequenceClassification",
"XmodForTokenClassification",
"XmodModel",
"XmodPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_xmod import XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP, XmodConfig, XmodOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xmod import (
XMOD_PRETRAINED_MODEL_ARCHIVE_LIST,
XmodForCausalLM,
XmodForMaskedLM,
XmodForMultipleChoice,
XmodForQuestionAnswering,
XmodForSequenceClassification,
XmodForTokenClassification,
XmodModel,
XmodPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 181 | 0 |
"""simple docstring"""
import numpy as np
def _UpperCamelCase ( _A ) -> np.array:
"""simple docstring"""
return 1 / (1 + np.exp(-vector ))
if __name__ == "__main__":
import doctest
doctest.testmod() | 19 |
"""simple docstring"""
import argparse
import logging
import os
import sys
import numpy as np
import onnxruntime
import torch
from bart_onnx.generation_onnx import BARTBeamSearchGenerator
from bart_onnx.reduce_onnx_size import remove_dup_initializers
import transformers
from transformers import BartForConditionalGeneration, BartTokenizer
logging.basicConfig(
format='''%(asctime)s | %(levelname)s | %(name)s | [%(filename)s:%(lineno)d] %(message)s''',
datefmt='''%Y-%m-%d %H:%M:%S''',
level=os.environ.get('''LOGLEVEL''', '''INFO''').upper(),
stream=sys.stdout,
)
a : List[str] = logging.getLogger(__name__)
a : int = {'''facebook/bart-base''': BartForConditionalGeneration}
a : Dict = {'''facebook/bart-base''': BartTokenizer}
def _UpperCamelCase ( ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = argparse.ArgumentParser(description="""Export Bart model + Beam Search to ONNX graph.""" )
parser.add_argument(
"""--validation_file""" , type=_A , default=_A , help="""A csv or a json file containing the validation data.""" )
parser.add_argument(
"""--max_length""" , type=_A , default=5 , help="""The maximum total input sequence length after tokenization.""" , )
parser.add_argument(
"""--num_beams""" , type=_A , default=_A , help=(
"""Number of beams to use for evaluation. This argument will be """
"""passed to ``model.generate``, which is used during ``evaluate`` and ``predict``."""
) , )
parser.add_argument(
"""--model_name_or_path""" , type=_A , help="""Path to pretrained model or model identifier from huggingface.co/models.""" , required=_A , )
parser.add_argument(
"""--config_name""" , type=_A , default=_A , help="""Pretrained config name or path if not the same as model_name""" , )
parser.add_argument(
"""--device""" , type=_A , default="""cpu""" , help="""Device where the model will be run""" , )
parser.add_argument("""--output_file_path""" , type=_A , default=_A , help="""Where to store the final ONNX file.""" )
_UpperCAmelCase = parser.parse_args()
return args
def _UpperCamelCase ( _A , _A="cpu" ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = model_dict[model_name].from_pretrained(_A ).to(_A )
_UpperCAmelCase = tokenizer_dict[model_name].from_pretrained(_A )
if model_name in ["facebook/bart-base"]:
_UpperCAmelCase = 0
_UpperCAmelCase = None
_UpperCAmelCase = 0
return huggingface_model, tokenizer
def _UpperCamelCase ( _A , _A , _A , _A , _A ) -> Optional[int]:
"""simple docstring"""
model.eval()
_UpperCAmelCase = None
_UpperCAmelCase = torch.jit.script(BARTBeamSearchGenerator(_A ) )
with torch.no_grad():
_UpperCAmelCase = """My friends are cool but they eat too many carbs."""
_UpperCAmelCase = tokenizer([ARTICLE_TO_SUMMARIZE] , max_length=1_0_2_4 , return_tensors="""pt""" ).to(model.device )
_UpperCAmelCase = model.generate(
inputs["""input_ids"""] , attention_mask=inputs["""attention_mask"""] , num_beams=_A , max_length=_A , early_stopping=_A , decoder_start_token_id=model.config.decoder_start_token_id , )
torch.onnx.export(
_A , (
inputs["""input_ids"""],
inputs["""attention_mask"""],
num_beams,
max_length,
model.config.decoder_start_token_id,
) , _A , opset_version=1_4 , input_names=["""input_ids""", """attention_mask""", """num_beams""", """max_length""", """decoder_start_token_id"""] , output_names=["""output_ids"""] , dynamic_axes={
"""input_ids""": {0: """batch""", 1: """seq"""},
"""output_ids""": {0: """batch""", 1: """seq_out"""},
} , example_outputs=_A , )
logger.info("""Model exported to {}""".format(_A ) )
_UpperCAmelCase = remove_dup_initializers(os.path.abspath(_A ) )
logger.info("""Deduplicated and optimized model written to {}""".format(_A ) )
_UpperCAmelCase = onnxruntime.InferenceSession(_A )
_UpperCAmelCase = ort_sess.run(
_A , {
"""input_ids""": inputs["""input_ids"""].cpu().numpy(),
"""attention_mask""": inputs["""attention_mask"""].cpu().numpy(),
"""num_beams""": np.array(_A ),
"""max_length""": np.array(_A ),
"""decoder_start_token_id""": np.array(model.config.decoder_start_token_id ),
} , )
np.testing.assert_allclose(summary_ids.cpu().numpy() , ort_out[0] , rtol=1e-3 , atol=1e-3 )
logger.info("""Model outputs from torch and ONNX Runtime are similar.""" )
logger.info("""Success.""" )
def _UpperCamelCase ( ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = parse_args()
_UpperCAmelCase = 5
_UpperCAmelCase = 4
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO , )
logger.setLevel(logging.INFO )
transformers.utils.logging.set_verbosity_error()
_UpperCAmelCase = torch.device(args.device )
_UpperCAmelCase ,_UpperCAmelCase = load_model_tokenizer(args.model_name_or_path , _A )
if model.config.decoder_start_token_id is None:
raise ValueError("""Make sure that `config.decoder_start_token_id` is correctly defined""" )
model.to(_A )
if args.max_length:
_UpperCAmelCase = args.max_length
if args.num_beams:
_UpperCAmelCase = args.num_beams
if args.output_file_path:
_UpperCAmelCase = args.output_file_path
else:
_UpperCAmelCase = """BART.onnx"""
logger.info("""Exporting model to ONNX""" )
export_and_validate_model(_A , _A , _A , _A , _A )
if __name__ == "__main__":
main() | 19 | 1 |
import torch
from torch import nn
class lowerCamelCase_ ( nn.Module ):
def __init__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=1 , __lowerCAmelCase=False ):
"""simple docstring"""
super().__init__()
__magic_name__ :Union[str, Any] = n_token
__magic_name__ :Union[str, Any] = d_embed
__magic_name__ :int = d_proj
__magic_name__ :List[Any] = cutoffs + [n_token]
__magic_name__ :str = [0] + self.cutoffs
__magic_name__ :int = div_val
__magic_name__ :Any = self.cutoffs[0]
__magic_name__ :Optional[int] = len(self.cutoffs ) - 1
__magic_name__ :Union[str, Any] = self.shortlist_size + self.n_clusters
if self.n_clusters > 0:
__magic_name__ :str = nn.Parameter(torch.zeros(self.n_clusters , self.d_embed ) )
__magic_name__ :Tuple = nn.Parameter(torch.zeros(self.n_clusters ) )
__magic_name__ :Union[str, Any] = nn.ModuleList()
__magic_name__ :Any = nn.ParameterList()
if div_val == 1:
for i in range(len(self.cutoffs ) ):
if d_proj != d_embed:
self.out_projs.append(nn.Parameter(torch.FloatTensor(__lowerCAmelCase , __lowerCAmelCase ) ) )
else:
self.out_projs.append(__lowerCAmelCase )
self.out_layers.append(nn.Linear(__lowerCAmelCase , __lowerCAmelCase ) )
else:
for i in range(len(self.cutoffs ) ):
__magic_name__ , __magic_name__ :str = self.cutoff_ends[i], self.cutoff_ends[i + 1]
__magic_name__ :Any = d_embed // (div_val**i)
self.out_projs.append(nn.Parameter(torch.FloatTensor(__lowerCAmelCase , __lowerCAmelCase ) ) )
self.out_layers.append(nn.Linear(__lowerCAmelCase , r_idx - l_idx ) )
__magic_name__ :List[str] = keep_order
def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
if proj is None:
__magic_name__ :Any = nn.functional.linear(__lowerCAmelCase , __lowerCAmelCase , bias=__lowerCAmelCase )
else:
# if CUDA_MAJOR <= 9 and CUDA_MINOR <= 1:
__magic_name__ :Any = nn.functional.linear(__lowerCAmelCase , proj.t().contiguous() )
__magic_name__ :str = nn.functional.linear(__lowerCAmelCase , __lowerCAmelCase , bias=__lowerCAmelCase )
# else:
# logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t()))
# if bias is not None:
# logit = logit + bias
return logit
def A ( self , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=False ):
"""simple docstring"""
if labels is not None:
# Shift so that tokens < n predict n
__magic_name__ :List[str] = hidden[..., :-1, :].contiguous()
__magic_name__ :Dict = labels[..., 1:].contiguous()
__magic_name__ :Any = hidden.view(-1 , hidden.size(-1 ) )
__magic_name__ :Optional[int] = labels.view(-1 )
if hidden.size(0 ) != labels.size(0 ):
raise RuntimeError('''Input and labels should have the same size in the batch dimension.''' )
else:
__magic_name__ :Union[str, Any] = hidden.view(-1 , hidden.size(-1 ) )
if self.n_clusters == 0:
__magic_name__ :int = self._compute_logit(__lowerCAmelCase , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
if labels is not None:
__magic_name__ :Optional[Any] = labels != -1_0_0
__magic_name__ :int = torch.zeros_like(__lowerCAmelCase , dtype=hidden.dtype , device=hidden.device )
__magic_name__ :Dict = (
-nn.functional.log_softmax(__lowerCAmelCase , dim=-1 )[mask].gather(1 , labels[mask].unsqueeze(1 ) ).squeeze(1 )
)
else:
__magic_name__ :str = nn.functional.log_softmax(__lowerCAmelCase , dim=-1 )
else:
# construct weights and biases
__magic_name__ , __magic_name__ :List[Any] = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
__magic_name__ , __magic_name__ :List[str] = self.cutoff_ends[i], self.cutoff_ends[i + 1]
__magic_name__ :Optional[Any] = self.out_layers[0].weight[l_idx:r_idx]
__magic_name__ :Optional[int] = self.out_layers[0].bias[l_idx:r_idx]
else:
__magic_name__ :List[str] = self.out_layers[i].weight
__magic_name__ :Union[str, Any] = self.out_layers[i].bias
if i == 0:
__magic_name__ :List[str] = torch.cat([weight_i, self.cluster_weight] , dim=0 )
__magic_name__ :int = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(__lowerCAmelCase )
biases.append(__lowerCAmelCase )
__magic_name__ , __magic_name__ , __magic_name__ :int = weights[0], biases[0], self.out_projs[0]
__magic_name__ :List[Any] = self._compute_logit(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
__magic_name__ :List[Any] = nn.functional.log_softmax(__lowerCAmelCase , dim=1 )
if labels is None:
__magic_name__ :str = hidden.new_empty((head_logit.size(0 ), self.n_token) )
else:
__magic_name__ :int = torch.zeros_like(__lowerCAmelCase , dtype=hidden.dtype , device=hidden.device )
__magic_name__ :Tuple = 0
__magic_name__ :Optional[Any] = [0] + self.cutoffs
for i in range(len(__lowerCAmelCase ) - 1 ):
__magic_name__ , __magic_name__ :str = cutoff_values[i], cutoff_values[i + 1]
if labels is not None:
__magic_name__ :Tuple = (labels >= l_idx) & (labels < r_idx)
__magic_name__ :Optional[Any] = mask_i.nonzero().squeeze()
if indices_i.numel() == 0:
continue
__magic_name__ :Union[str, Any] = labels.index_select(0 , __lowerCAmelCase ) - l_idx
__magic_name__ :Tuple = head_logprob.index_select(0 , __lowerCAmelCase )
__magic_name__ :List[Any] = hidden.index_select(0 , __lowerCAmelCase )
else:
__magic_name__ :Any = hidden
if i == 0:
if labels is not None:
__magic_name__ :Optional[Any] = head_logprob_i.gather(1 , target_i[:, None] ).squeeze(1 )
else:
__magic_name__ :List[Any] = head_logprob[:, : self.cutoffs[0]]
else:
__magic_name__ , __magic_name__ , __magic_name__ :Optional[int] = weights[i], biases[i], self.out_projs[i]
__magic_name__ :Union[str, Any] = self._compute_logit(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
__magic_name__ :Union[str, Any] = nn.functional.log_softmax(__lowerCAmelCase , dim=1 )
__magic_name__ :Dict = self.cutoffs[0] + i - 1 # No probability for the head cluster
if labels is not None:
__magic_name__ :Union[str, Any] = head_logprob_i[:, cluster_prob_idx] + tail_logprob_i.gather(
1 , target_i[:, None] ).squeeze(1 )
else:
__magic_name__ :Union[str, Any] = head_logprob[:, cluster_prob_idx, None] + tail_logprob_i
__magic_name__ :int = logprob_i
if labels is not None:
if (hasattr(self , '''keep_order''' ) and self.keep_order) or keep_order:
out.index_copy_(0 , __lowerCAmelCase , -logprob_i )
else:
out[offset : offset + logprob_i.size(0 )].copy_(-logprob_i )
offset += logprob_i.size(0 )
return out
def A ( self , __lowerCAmelCase ):
"""simple docstring"""
if self.n_clusters == 0:
__magic_name__ :Optional[Any] = self._compute_logit(__lowerCAmelCase , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
return nn.functional.log_softmax(__lowerCAmelCase , dim=-1 )
else:
# construct weights and biases
__magic_name__ , __magic_name__ :List[str] = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
__magic_name__ , __magic_name__ :Any = self.cutoff_ends[i], self.cutoff_ends[i + 1]
__magic_name__ :Optional[Any] = self.out_layers[0].weight[l_idx:r_idx]
__magic_name__ :str = self.out_layers[0].bias[l_idx:r_idx]
else:
__magic_name__ :Optional[int] = self.out_layers[i].weight
__magic_name__ :List[str] = self.out_layers[i].bias
if i == 0:
__magic_name__ :Union[str, Any] = torch.cat([weight_i, self.cluster_weight] , dim=0 )
__magic_name__ :Dict = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(__lowerCAmelCase )
biases.append(__lowerCAmelCase )
__magic_name__ , __magic_name__ , __magic_name__ :str = weights[0], biases[0], self.out_projs[0]
__magic_name__ :Dict = self._compute_logit(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
__magic_name__ :Optional[int] = hidden.new_empty((head_logit.size(0 ), self.n_token) )
__magic_name__ :Tuple = nn.functional.log_softmax(__lowerCAmelCase , dim=1 )
__magic_name__ :str = [0] + self.cutoffs
for i in range(len(__lowerCAmelCase ) - 1 ):
__magic_name__ , __magic_name__ :List[str] = cutoff_values[i], cutoff_values[i + 1]
if i == 0:
__magic_name__ :Tuple = head_logprob[:, : self.cutoffs[0]]
else:
__magic_name__ , __magic_name__ , __magic_name__ :Any = weights[i], biases[i], self.out_projs[i]
__magic_name__ :Union[str, Any] = self._compute_logit(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
__magic_name__ :Optional[Any] = nn.functional.log_softmax(__lowerCAmelCase , dim=1 )
__magic_name__ :Any = head_logprob[:, -i] + tail_logprob_i
__magic_name__ :Union[str, Any] = logprob_i
return out
| 0 |
"""simple docstring"""
import operator as op
def _a ( _snake_case ):
"""simple docstring"""
UpperCAmelCase = []
UpperCAmelCase = lambda _snake_case , _snake_case : int(x / y ) # noqa: E731 integer division operation
UpperCAmelCase = {
"""^""": op.pow,
"""*""": op.mul,
"""/""": div,
"""+""": op.add,
"""-""": op.sub,
} # operators & their respective operation
# print table header
print("""Symbol""".center(8 ) , """Action""".center(12 ) , """Stack""" , sep=""" | """ )
print("""-""" * (30 + len(_snake_case )) )
for x in post_fix:
if x.isdigit(): # if x in digit
stack.append(_snake_case ) # append x to stack
# output in tabular format
print(x.rjust(8 ) , ("""push(""" + x + """)""").ljust(12 ) , """,""".join(_snake_case ) , sep=""" | """ )
else:
UpperCAmelCase = stack.pop() # pop stack
# output in tabular format
print("""""".rjust(8 ) , ("""pop(""" + b + """)""").ljust(12 ) , """,""".join(_snake_case ) , sep=""" | """ )
UpperCAmelCase = stack.pop() # pop stack
# output in tabular format
print("""""".rjust(8 ) , ("""pop(""" + a + """)""").ljust(12 ) , """,""".join(_snake_case ) , sep=""" | """ )
stack.append(
str(opr[x](int(_snake_case ) , int(_snake_case ) ) ) ) # evaluate the 2 values popped from stack & push result to stack
# output in tabular format
print(
x.rjust(8 ) , ("""push(""" + a + x + b + """)""").ljust(12 ) , """,""".join(_snake_case ) , sep=""" | """ , )
return int(stack[0] )
if __name__ == "__main__":
_UpperCamelCase = input("""\n\nEnter a Postfix Equation (space separated) = """).split(""" """)
print("""\n\tResult = """, solve(Postfix))
| 341 | 0 |
def snake_case__ ( UpperCAmelCase : float , UpperCAmelCase : list[float] ):
if discount_rate < 0:
raise ValueError("Discount rate cannot be negative" )
if not cash_flows:
raise ValueError("Cash flows list cannot be empty" )
lowerCAmelCase__ :List[Any] = sum(
cash_flow / ((1 + discount_rate) ** i) for i, cash_flow in enumerate(UpperCAmelCase ) )
return round(UpperCAmelCase , ndigits=2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 111 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_a : Dict = logging.get_logger(__name__)
_a : Optional[Any] = {
"""caidas/swin2sr-classicalsr-x2-64""": (
"""https://huggingface.co/caidas/swin2sr-classicalsr-x2-64/resolve/main/config.json"""
),
}
class _UpperCAmelCase ( _A ):
"""simple docstring"""
A = '''swin2sr'''
A = {
'''hidden_size''': '''embed_dim''',
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self , _lowerCAmelCase=64 , _lowerCAmelCase=1 , _lowerCAmelCase=3 , _lowerCAmelCase=180 , _lowerCAmelCase=[6, 6, 6, 6, 6, 6] , _lowerCAmelCase=[6, 6, 6, 6, 6, 6] , _lowerCAmelCase=8 , _lowerCAmelCase=2.0 , _lowerCAmelCase=True , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.1 , _lowerCAmelCase="gelu" , _lowerCAmelCase=False , _lowerCAmelCase=0.02 , _lowerCAmelCase=1e-5 , _lowerCAmelCase=2 , _lowerCAmelCase=1.0 , _lowerCAmelCase="1conv" , _lowerCAmelCase="pixelshuffle" , **_lowerCAmelCase , ):
'''simple docstring'''
super().__init__(**_lowerCAmelCase )
lowerCAmelCase__ :Union[str, Any] = image_size
lowerCAmelCase__ :List[Any] = patch_size
lowerCAmelCase__ :Any = num_channels
lowerCAmelCase__ :Union[str, Any] = embed_dim
lowerCAmelCase__ :Optional[int] = depths
lowerCAmelCase__ :int = len(_lowerCAmelCase )
lowerCAmelCase__ :List[str] = num_heads
lowerCAmelCase__ :str = window_size
lowerCAmelCase__ :List[str] = mlp_ratio
lowerCAmelCase__ :List[Any] = qkv_bias
lowerCAmelCase__ :Union[str, Any] = hidden_dropout_prob
lowerCAmelCase__ :Optional[int] = attention_probs_dropout_prob
lowerCAmelCase__ :Dict = drop_path_rate
lowerCAmelCase__ :Tuple = hidden_act
lowerCAmelCase__ :Dict = use_absolute_embeddings
lowerCAmelCase__ :Tuple = layer_norm_eps
lowerCAmelCase__ :Dict = initializer_range
lowerCAmelCase__ :Optional[int] = upscale
lowerCAmelCase__ :Optional[Any] = img_range
lowerCAmelCase__ :List[str] = resi_connection
lowerCAmelCase__ :Union[str, Any] = upsampler
| 111 | 1 |
def a__ ( _UpperCamelCase : int ,_UpperCamelCase : int ):
while a != 0:
__lowerCamelCase ,__lowerCamelCase = b % a, a
return b
def a__ ( _UpperCamelCase : int ,_UpperCamelCase : int ):
if gcd(_UpperCamelCase ,_UpperCamelCase ) != 1:
__lowerCamelCase = F"""mod inverse of {a!r} and {m!r} does not exist"""
raise ValueError(_UpperCamelCase )
__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = 1, 0, a
__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = 0, 1, m
while va != 0:
__lowerCamelCase = ua // va
__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va
return ua % m
| 175 |
import unittest
from transformers import SqueezeBertConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
)
class __lowerCAmelCase ( lowerCAmelCase__ ):
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=13 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase=99 , __UpperCAmelCase=32 , __UpperCAmelCase=5 , __UpperCAmelCase=4 , __UpperCAmelCase=64 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=512 , __UpperCAmelCase=16 , __UpperCAmelCase=2 , __UpperCAmelCase=0.02 , __UpperCAmelCase=3 , __UpperCAmelCase=4 , __UpperCAmelCase=None , __UpperCAmelCase=2 , __UpperCAmelCase=2 , __UpperCAmelCase=2 , __UpperCAmelCase=2 , __UpperCAmelCase=4 , __UpperCAmelCase=1 , ):
'''simple docstring'''
__lowerCamelCase = parent
__lowerCamelCase = batch_size
__lowerCamelCase = seq_length
__lowerCamelCase = is_training
__lowerCamelCase = use_input_mask
__lowerCamelCase = use_token_type_ids
__lowerCamelCase = use_labels
__lowerCamelCase = vocab_size
__lowerCamelCase = hidden_size
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = intermediate_size
__lowerCamelCase = hidden_act
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = type_vocab_size
__lowerCamelCase = type_sequence_label_size
__lowerCamelCase = initializer_range
__lowerCamelCase = num_labels
__lowerCamelCase = num_choices
__lowerCamelCase = scope
__lowerCamelCase = q_groups
__lowerCamelCase = k_groups
__lowerCamelCase = v_groups
__lowerCamelCase = post_attention_groups
__lowerCamelCase = intermediate_groups
__lowerCamelCase = output_groups
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCamelCase = None
if self.use_input_mask:
__lowerCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCamelCase = None
__lowerCamelCase = None
__lowerCamelCase = None
if self.use_labels:
__lowerCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowerCamelCase = ids_tensor([self.batch_size] , self.num_choices )
__lowerCamelCase = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCamelCase ( self ):
'''simple docstring'''
return SqueezeBertConfig(
embedding_size=self.hidden_size , vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , attention_probs_dropout_prob=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , q_groups=self.q_groups , k_groups=self.k_groups , v_groups=self.v_groups , post_attention_groups=self.post_attention_groups , intermediate_groups=self.intermediate_groups , output_groups=self.output_groups , )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = SqueezeBertModel(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__lowerCamelCase = model(__UpperCAmelCase , __UpperCAmelCase )
__lowerCamelCase = model(__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = SqueezeBertForMaskedLM(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__lowerCamelCase = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = SqueezeBertForQuestionAnswering(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__lowerCamelCase = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , start_positions=__UpperCAmelCase , end_positions=__UpperCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = self.num_labels
__lowerCamelCase = SqueezeBertForSequenceClassification(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__lowerCamelCase = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = self.num_labels
__lowerCamelCase = SqueezeBertForTokenClassification(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__lowerCamelCase = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = self.num_choices
__lowerCamelCase = SqueezeBertForMultipleChoice(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__lowerCamelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowerCamelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowerCamelCase = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , labels=__UpperCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.prepare_config_and_inputs()
((__lowerCamelCase) ,(__lowerCamelCase) ,(__lowerCamelCase) ,(__lowerCamelCase) ,(__lowerCamelCase) ,(__lowerCamelCase)) = config_and_inputs
__lowerCamelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
lowerCAmelCase__ = (
(
SqueezeBertModel,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
)
if is_torch_available()
else None
)
lowerCAmelCase__ = (
{
"""feature-extraction""": SqueezeBertModel,
"""fill-mask""": SqueezeBertForMaskedLM,
"""question-answering""": SqueezeBertForQuestionAnswering,
"""text-classification""": SqueezeBertForSequenceClassification,
"""token-classification""": SqueezeBertForTokenClassification,
"""zero-shot""": SqueezeBertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCAmelCase__ = False
lowerCAmelCase__ = True
lowerCAmelCase__ = False
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = SqueezeBertModelTester(self )
__lowerCamelCase = ConfigTester(self , config_class=__UpperCAmelCase , dim=37 )
def lowerCamelCase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_model(*__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_masked_lm(*__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_question_answering(*__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_sequence_classification(*__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_token_classification(*__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_multiple_choice(*__UpperCAmelCase )
@slow
def lowerCamelCase ( self ):
'''simple docstring'''
for model_name in SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCamelCase = SqueezeBertModel.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
@require_sentencepiece
@require_tokenizers
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
@slow
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = SqueezeBertForSequenceClassification.from_pretrained('''squeezebert/squeezebert-mnli''' )
__lowerCamelCase = torch.tensor([[1, 29414, 232, 328, 740, 1140, 12695, 69, 13, 1588, 2]] )
__lowerCamelCase = model(__UpperCAmelCase )[0]
__lowerCamelCase = torch.Size((1, 3) )
self.assertEqual(output.shape , __UpperCAmelCase )
__lowerCamelCase = torch.tensor([[0.6_401, -0.0_349, -0.6_041]] )
self.assertTrue(torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-4 ) )
| 175 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase : Optional[int] = logging.get_logger(__name__)
lowercase : Tuple = {
"""edbeeching/decision-transformer-gym-hopper-medium""": (
"""https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json"""
),
# See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer
}
class A__ ( UpperCAmelCase__ ):
"""simple docstring"""
__A : Tuple = "decision_transformer"
__A : Dict = ["past_key_values"]
__A : int = {
"max_position_embeddings": "n_positions",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self , lowercase=17 , lowercase=4 , lowercase=128 , lowercase=4096 , lowercase=True , lowercase=1 , lowercase=1024 , lowercase=3 , lowercase=1 , lowercase=None , lowercase="relu" , lowercase=0.1 , lowercase=0.1 , lowercase=0.1 , lowercase=1e-5 , lowercase=0.02 , lowercase=True , lowercase=True , lowercase=5_0256 , lowercase=5_0256 , lowercase=False , lowercase=False , **lowercase , ) -> str:
'''simple docstring'''
a__ : Optional[int] = state_dim
a__ : List[Any] = act_dim
a__ : Dict = hidden_size
a__ : Union[str, Any] = max_ep_len
a__ : Dict = action_tanh
a__ : List[str] = vocab_size
a__ : Any = n_positions
a__ : Tuple = n_layer
a__ : List[str] = n_head
a__ : str = n_inner
a__ : int = activation_function
a__ : Optional[int] = resid_pdrop
a__ : Any = embd_pdrop
a__ : Union[str, Any] = attn_pdrop
a__ : Optional[Any] = layer_norm_epsilon
a__ : Tuple = initializer_range
a__ : str = scale_attn_weights
a__ : Tuple = use_cache
a__ : str = scale_attn_by_inverse_layer_idx
a__ : Union[str, Any] = reorder_and_upcast_attn
a__ : List[Any] = bos_token_id
a__ : int = eos_token_id
super().__init__(bos_token_id=lowerCamelCase__ , eos_token_id=lowerCamelCase__ , **lowerCamelCase__)
| 718 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class A__ ( __UpperCAmelCase ):
"""simple docstring"""
__A : Tuple = ['''image_processor''', '''tokenizer''']
__A : Any = '''ChineseCLIPImageProcessor'''
__A : Tuple = ('''BertTokenizer''', '''BertTokenizerFast''')
def __init__( self , lowercase=None , lowercase=None , **lowercase) -> List[str]:
'''simple docstring'''
a__ : Optional[int] = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , lowercase , )
a__ : Optional[Any] = kwargs.pop('feature_extractor')
a__ : Optional[int] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.')
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.')
super().__init__(lowercase , lowercase)
a__ : List[str] = self.image_processor
def __call__( self , lowercase=None , lowercase=None , lowercase=None , **lowercase) -> List[str]:
'''simple docstring'''
if text is None and images is None:
raise ValueError('You have to specify either text or images. Both cannot be none.')
if text is not None:
a__ : str = self.tokenizer(lowercase , return_tensors=lowercase , **lowercase)
if images is not None:
a__ : Optional[Any] = self.image_processor(lowercase , return_tensors=lowercase , **lowercase)
if text is not None and images is not None:
a__ : Tuple = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowercase) , tensor_type=lowercase)
def __lowercase ( self , *lowercase , **lowercase) -> Any:
'''simple docstring'''
return self.tokenizer.batch_decode(*lowercase , **lowercase)
def __lowercase ( self , *lowercase , **lowercase) -> Optional[int]:
'''simple docstring'''
return self.tokenizer.decode(*lowercase , **lowercase)
@property
def __lowercase ( self) -> Optional[int]:
'''simple docstring'''
a__ : List[Any] = self.tokenizer.model_input_names
a__ : Optional[int] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
@property
def __lowercase ( self) -> Tuple:
'''simple docstring'''
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , lowercase , )
return self.image_processor_class
| 392 | 0 |
"""simple docstring"""
import unittest
from transformers import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING, is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class SCREAMING_SNAKE_CASE__ :
@staticmethod
def _UpperCAmelCase ( *lowerCAmelCase_ : List[str] , **lowerCAmelCase_ : Optional[int]):
"""simple docstring"""
pass
@is_pipeline_test
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
lowercase__ = MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
def _UpperCAmelCase ( self : Optional[Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : str):
"""simple docstring"""
lowercase_ = pipeline("""visual-question-answering""" , model="""hf-internal-testing/tiny-vilt-random-vqa""")
lowercase_ = [
{
"""image""": Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png"""),
"""question""": """How many cats are there?""",
},
{
"""image""": """./tests/fixtures/tests_samples/COCO/000000039769.png""",
"""question""": """How many cats are there?""",
},
]
return vqa_pipeline, examples
def _UpperCAmelCase ( self : Tuple , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[str]):
"""simple docstring"""
lowercase_ = vqa_pipeline(lowerCAmelCase_ , top_k=1)
self.assertEqual(
lowerCAmelCase_ , [
[{"""score""": ANY(lowerCAmelCase_), """answer""": ANY(lowerCAmelCase_)}],
[{"""score""": ANY(lowerCAmelCase_), """answer""": ANY(lowerCAmelCase_)}],
] , )
@require_torch
def _UpperCAmelCase ( self : Optional[Any]):
"""simple docstring"""
lowercase_ = pipeline("""visual-question-answering""" , model="""hf-internal-testing/tiny-vilt-random-vqa""")
lowercase_ = """./tests/fixtures/tests_samples/COCO/000000039769.png"""
lowercase_ = """How many cats are there?"""
lowercase_ = vqa_pipeline(image=lowerCAmelCase_ , question="""How many cats are there?""" , top_k=2)
self.assertEqual(
lowerCAmelCase_ , [{"""score""": ANY(lowerCAmelCase_), """answer""": ANY(lowerCAmelCase_)}, {"""score""": ANY(lowerCAmelCase_), """answer""": ANY(lowerCAmelCase_)}])
lowercase_ = vqa_pipeline({"""image""": image, """question""": question} , top_k=2)
self.assertEqual(
lowerCAmelCase_ , [{"""score""": ANY(lowerCAmelCase_), """answer""": ANY(lowerCAmelCase_)}, {"""score""": ANY(lowerCAmelCase_), """answer""": ANY(lowerCAmelCase_)}])
@slow
@require_torch
def _UpperCAmelCase ( self : Tuple):
"""simple docstring"""
lowercase_ = pipeline("""visual-question-answering""" , model="""dandelin/vilt-b32-finetuned-vqa""")
lowercase_ = """./tests/fixtures/tests_samples/COCO/000000039769.png"""
lowercase_ = """How many cats are there?"""
lowercase_ = vqa_pipeline(image=lowerCAmelCase_ , question=lowerCAmelCase_ , top_k=2)
self.assertEqual(
nested_simplify(lowerCAmelCase_ , decimals=4) , [{"""score""": 0.8_799, """answer""": """2"""}, {"""score""": 0.296, """answer""": """1"""}])
lowercase_ = vqa_pipeline({"""image""": image, """question""": question} , top_k=2)
self.assertEqual(
nested_simplify(lowerCAmelCase_ , decimals=4) , [{"""score""": 0.8_799, """answer""": """2"""}, {"""score""": 0.296, """answer""": """1"""}])
lowercase_ = vqa_pipeline(
[{"""image""": image, """question""": question}, {"""image""": image, """question""": question}] , top_k=2)
self.assertEqual(
nested_simplify(lowerCAmelCase_ , decimals=4) , [[{"""score""": 0.8_799, """answer""": """2"""}, {"""score""": 0.296, """answer""": """1"""}]] * 2 , )
@require_tf
@unittest.skip("""Visual question answering not implemented in TF""")
def _UpperCAmelCase ( self : Optional[int]):
"""simple docstring"""
pass
| 567 |
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
# Register SEW's fairseq modules
from sew_asapp import tasks # noqa: F401
from transformers import (
SEWConfig,
SEWForCTC,
SEWModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
UpperCAmelCase : int = logging.get_logger(__name__)
UpperCAmelCase : Tuple = {
"post_extract_proj": "feature_projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.upsample.0": "encoder.upsample.projection",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "layer_norm",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
}
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Any:
'''simple docstring'''
for attribute in key.split(""".""" ):
lowercase_ = getattr(__lowerCAmelCase , __lowerCAmelCase )
if weight_type is not None:
lowercase_ = getattr(__lowerCAmelCase , __lowerCAmelCase ).shape
else:
lowercase_ = hf_pointer.shape
assert hf_shape == value.shape, (
F'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
lowercase_ = value
elif weight_type == "weight_g":
lowercase_ = value
elif weight_type == "weight_v":
lowercase_ = value
elif weight_type == "bias":
lowercase_ = value
else:
lowercase_ = value
logger.info(F'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' )
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Optional[int]:
'''simple docstring'''
lowercase_ = []
lowercase_ = fairseq_model.state_dict()
lowercase_ = hf_model.sew.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
lowercase_ = False
if "conv_layers" in name:
load_conv_layer(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , hf_model.config.feat_extract_norm == """group""" , )
lowercase_ = True
else:
for key, mapped_key in MAPPING.items():
lowercase_ = """sew.""" + mapped_key if (is_finetuned and mapped_key != """lm_head""") else mapped_key
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
lowercase_ = True
if "*" in mapped_key:
lowercase_ = name.split(__lowerCAmelCase )[0].split(""".""" )[-2]
lowercase_ = mapped_key.replace("""*""" , __lowerCAmelCase )
if "weight_g" in name:
lowercase_ = """weight_g"""
elif "weight_v" in name:
lowercase_ = """weight_v"""
elif "weight" in name:
lowercase_ = """weight"""
elif "bias" in name:
lowercase_ = """bias"""
else:
lowercase_ = None
set_recursively(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
continue
if not is_used:
unused_weights.append(__lowerCAmelCase )
logger.warning(F'''Unused weights: {unused_weights}''' )
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> str:
'''simple docstring'''
lowercase_ = full_name.split("""conv_layers.""" )[-1]
lowercase_ = name.split(""".""" )
lowercase_ = int(items[0] )
lowercase_ = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
lowercase_ = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
lowercase_ = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
lowercase_ = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
lowercase_ = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(__lowerCAmelCase )
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ = SEWConfig()
if is_finetuned:
lowercase_ = model.wav_encoder.wav_model.cfg
else:
lowercase_ = model.cfg
lowercase_ = fs_config.conv_bias
lowercase_ = eval(fs_config.conv_feature_layers )
lowercase_ = [x[0] for x in conv_layers]
lowercase_ = [x[1] for x in conv_layers]
lowercase_ = [x[2] for x in conv_layers]
lowercase_ = """gelu"""
lowercase_ = """layer""" if fs_config.extractor_mode == """layer_norm""" else """group"""
lowercase_ = 0.0
lowercase_ = fs_config.activation_fn.name
lowercase_ = fs_config.encoder_embed_dim
lowercase_ = 0.02
lowercase_ = fs_config.encoder_ffn_embed_dim
lowercase_ = 1E-5
lowercase_ = fs_config.encoder_layerdrop
lowercase_ = fs_config.encoder_attention_heads
lowercase_ = fs_config.conv_pos_groups
lowercase_ = fs_config.conv_pos
lowercase_ = len(__lowerCAmelCase )
lowercase_ = fs_config.encoder_layers
lowercase_ = fs_config.squeeze_factor
# take care of any params that are overridden by the Wav2VecCtc model
if is_finetuned:
lowercase_ = model.cfg
lowercase_ = fs_config.final_dropout
lowercase_ = fs_config.layerdrop
lowercase_ = fs_config.activation_dropout
lowercase_ = fs_config.mask_prob > 0 or fs_config.mask_channel_prob > 0
lowercase_ = fs_config.attention_dropout
lowercase_ = fs_config.dropout_input
lowercase_ = fs_config.dropout
lowercase_ = fs_config.mask_channel_length
lowercase_ = fs_config.mask_channel_prob
lowercase_ = fs_config.mask_length
lowercase_ = fs_config.mask_prob
lowercase_ = """Wav2Vec2FeatureExtractor"""
lowercase_ = """Wav2Vec2CTCTokenizer"""
return config
@torch.no_grad()
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=True ) -> Union[str, Any]:
'''simple docstring'''
if is_finetuned:
lowercase_ , lowercase_ , lowercase_ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
else:
lowercase_ , lowercase_ , lowercase_ = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
if config_path is not None:
lowercase_ = SEWConfig.from_pretrained(__lowerCAmelCase )
else:
lowercase_ = convert_config(model[0] , __lowerCAmelCase )
lowercase_ = model[0].eval()
lowercase_ = True if config.feat_extract_norm == """layer""" else False
lowercase_ = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=__lowerCAmelCase , return_attention_mask=__lowerCAmelCase , )
if is_finetuned:
if dict_path:
lowercase_ = Dictionary.load(__lowerCAmelCase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
lowercase_ = target_dict.pad_index
lowercase_ = target_dict.bos_index
lowercase_ = target_dict.pad_index
lowercase_ = target_dict.bos_index
lowercase_ = target_dict.eos_index
lowercase_ = len(target_dict.symbols )
lowercase_ = os.path.join(__lowerCAmelCase , """vocab.json""" )
if not os.path.isdir(__lowerCAmelCase ):
logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(__lowerCAmelCase ) )
return
os.makedirs(__lowerCAmelCase , exist_ok=__lowerCAmelCase )
with open(__lowerCAmelCase , """w""" , encoding="""utf-8""" ) as vocab_handle:
json.dump(target_dict.indices , __lowerCAmelCase )
lowercase_ = WavaVecaCTCTokenizer(
__lowerCAmelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=__lowerCAmelCase , )
lowercase_ = WavaVecaProcessor(feature_extractor=__lowerCAmelCase , tokenizer=__lowerCAmelCase )
processor.save_pretrained(__lowerCAmelCase )
lowercase_ = SEWForCTC(__lowerCAmelCase )
else:
lowercase_ = SEWModel(__lowerCAmelCase )
feature_extractor.save_pretrained(__lowerCAmelCase )
recursively_load_weights(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
hf_model.save_pretrained(__lowerCAmelCase )
if __name__ == "__main__":
UpperCAmelCase : int = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--is_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
UpperCAmelCase : str = parser.parse_args()
convert_sew_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, args.is_finetuned
)
| 567 | 1 |
"""simple docstring"""
def _snake_case ( UpperCAmelCase_ : float , UpperCAmelCase_ : int ):
if digit_amount > 0:
return round(number - int(UpperCAmelCase_ ) , UpperCAmelCase_ )
return number - int(UpperCAmelCase_ )
if __name__ == "__main__":
print(decimal_isolate(1.53, 0))
print(decimal_isolate(35.345, 1))
print(decimal_isolate(35.345, 2))
print(decimal_isolate(35.345, 3))
print(decimal_isolate(-14.789, 3))
print(decimal_isolate(0, 2))
print(decimal_isolate(-14.123, 1))
print(decimal_isolate(-14.123, 2))
print(decimal_isolate(-14.123, 3))
| 718 |
"""simple docstring"""
from itertools import count
def _snake_case ( UpperCAmelCase_ : int = 50 ):
A__ = [1] * min_block_length
for n in count(UpperCAmelCase_ ):
fill_count_functions.append(1 )
for block_length in range(UpperCAmelCase_ , n + 1 ):
for block_start in range(n - block_length ):
fill_count_functions[n] += fill_count_functions[
n - block_start - block_length - 1
]
fill_count_functions[n] += 1
if fill_count_functions[n] > 100_0000:
break
return n
if __name__ == "__main__":
print(f"""{solution() = }""")
| 500 | 0 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_xlnet import XLNetTokenizer
else:
__magic_name__ : str = None
__magic_name__ : Union[str, Any] = logging.get_logger(__name__)
__magic_name__ : str = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
__magic_name__ : Dict = {
'vocab_file': {
'xlnet-base-cased': 'https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model',
'xlnet-large-cased': 'https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model',
},
'tokenizer_file': {
'xlnet-base-cased': 'https://huggingface.co/xlnet-base-cased/resolve/main/tokenizer.json',
'xlnet-large-cased': 'https://huggingface.co/xlnet-large-cased/resolve/main/tokenizer.json',
},
}
__magic_name__ : Union[str, Any] = {
'xlnet-base-cased': None,
'xlnet-large-cased': None,
}
__magic_name__ : Optional[Any] = '▁'
# Segments (not really needed)
__magic_name__ : Tuple = 0
__magic_name__ : List[Any] = 1
__magic_name__ : Tuple = 2
__magic_name__ : Any = 3
__magic_name__ : str = 4
class __snake_case (lowerCamelCase ):
__a = VOCAB_FILES_NAMES
__a = PRETRAINED_VOCAB_FILES_MAP
__a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__a = '''left'''
__a = XLNetTokenizer
def __init__( self: List[Any] , A_: Tuple=None , A_: Optional[Any]=None , A_: Tuple=False , A_: Optional[int]=True , A_: int=False , A_: Tuple="<s>" , A_: Dict="</s>" , A_: Union[str, Any]="<unk>" , A_: str="<sep>" , A_: Union[str, Any]="<pad>" , A_: int="<cls>" , A_: Dict="<mask>" , A_: str=["<eop>", "<eod>"] , **A_: Tuple , ):
# Mask token behave like a normal word, i.e. include the space before it
__lowerCamelCase = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else mask_token
super().__init__(
vocab_file=A_ , tokenizer_file=A_ , do_lower_case=A_ , remove_space=A_ , keep_accents=A_ , bos_token=A_ , eos_token=A_ , unk_token=A_ , sep_token=A_ , pad_token=A_ , cls_token=A_ , mask_token=A_ , additional_special_tokens=A_ , **A_ , )
__lowerCamelCase = 3
__lowerCamelCase = do_lower_case
__lowerCamelCase = remove_space
__lowerCamelCase = keep_accents
__lowerCamelCase = vocab_file
__lowerCamelCase = False if not self.vocab_file else True
def __a ( self: List[Any] , A_: List[int] , A_: Optional[List[int]] = None ):
__lowerCamelCase = [self.sep_token_id]
__lowerCamelCase = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def __a ( self: Tuple , A_: List[int] , A_: Optional[List[int]] = None ):
__lowerCamelCase = [self.sep_token_id]
__lowerCamelCase = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def __a ( self: List[Any] , A_: str , A_: Optional[str] = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(A_ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
__lowerCamelCase = os.path.join(
A_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A_ ):
copyfile(self.vocab_file , A_ )
return (out_vocab_file,)
| 281 |
"""simple docstring"""
import unittest
from transformers import CamembertTokenizer, CamembertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
__magic_name__ : Dict = get_tests_dir('fixtures/test_sentencepiece.model')
__magic_name__ : int = get_tests_dir('fixtures/test_sentencepiece_bpe.model')
__magic_name__ : Any = 'pt' if is_torch_available() else 'tf'
@require_sentencepiece
@require_tokenizers
class __snake_case (lowerCamelCase , unittest.TestCase ):
__a = CamembertTokenizer
__a = CamembertTokenizerFast
__a = True
__a = True
def __a ( self: List[Any] ):
super().setUp()
# We have a SentencePiece fixture for testing
__lowerCamelCase = CamembertTokenizer(A_ )
tokenizer.save_pretrained(self.tmpdirname )
def __a ( self: Union[str, Any] ):
__lowerCamelCase = """<pad>"""
__lowerCamelCase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(A_ ) , A_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(A_ ) , A_ )
def __a ( self: Any ):
__lowerCamelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>NOTUSED""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(vocab_keys[-1] , """<mask>""" )
self.assertEqual(len(A_ ) , 10_04 )
def __a ( self: Any ):
self.assertEqual(self.get_tokenizer().vocab_size , 10_05 )
def __a ( self: Optional[Any] ):
__lowerCamelCase = CamembertTokenizer(A_ )
tokenizer.save_pretrained(self.tmpdirname )
__lowerCamelCase = CamembertTokenizerFast.from_pretrained(self.tmpdirname )
__lowerCamelCase = """I was born in 92000, and this is falsé."""
__lowerCamelCase = tokenizer.encode(A_ )
__lowerCamelCase = rust_tokenizer.encode(A_ )
self.assertListEqual(A_ , A_ )
__lowerCamelCase = tokenizer.encode(A_ , add_special_tokens=A_ )
__lowerCamelCase = rust_tokenizer.encode(A_ , add_special_tokens=A_ )
self.assertListEqual(A_ , A_ )
# <unk> tokens are not the same for `rust` than for `slow`.
# Because spm gives back raw token instead of `unk` in EncodeAsPieces
# tokens = tokenizer.tokenize(sequence)
__lowerCamelCase = tokenizer.convert_ids_to_tokens(A_ )
__lowerCamelCase = rust_tokenizer.tokenize(A_ )
self.assertListEqual(A_ , A_ )
def __a ( self: List[str] ):
if not self.test_rust_tokenizer:
return
__lowerCamelCase = self.get_tokenizer()
__lowerCamelCase = self.get_rust_tokenizer()
__lowerCamelCase = """I was born in 92000, and this is falsé."""
__lowerCamelCase = tokenizer.tokenize(A_ )
__lowerCamelCase = rust_tokenizer.tokenize(A_ )
self.assertListEqual(A_ , A_ )
__lowerCamelCase = tokenizer.encode(A_ , add_special_tokens=A_ )
__lowerCamelCase = rust_tokenizer.encode(A_ , add_special_tokens=A_ )
self.assertListEqual(A_ , A_ )
__lowerCamelCase = self.get_rust_tokenizer()
__lowerCamelCase = tokenizer.encode(A_ )
__lowerCamelCase = rust_tokenizer.encode(A_ )
self.assertListEqual(A_ , A_ )
@slow
def __a ( self: Optional[Any] ):
# fmt: off
__lowerCamelCase = {"""input_ids""": [[5, 54, 71_96, 2_97, 30, 23, 7_76, 18, 11, 32_15, 37_05, 82_52, 22, 31_64, 11_81, 21_16, 29, 16, 8_13, 25, 7_91, 33_14, 20, 34_46, 38, 2_75_75, 1_20, 6, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [5, 4_68, 17, 11, 90_88, 20, 15_17, 8, 2_28_04, 1_88_18, 10, 38, 6_29, 6_07, 6_07, 1_42, 19, 71_96, 8_67, 56, 1_03_26, 24, 22_67, 20, 4_16, 50_72, 1_56_12, 2_33, 7_34, 7, 23_99, 27, 16, 30_15, 16_49, 7, 24, 20, 43_38, 23_99, 27, 13, 34_00, 14, 13, 61_89, 8, 9_30, 9, 6]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# camembert is a french model. So we also use french texts.
__lowerCamelCase = [
"""Le transformeur est un modèle d'apprentissage profond introduit en 2017, """
"""utilisé principalement dans le domaine du traitement automatique des langues (TAL).""",
"""À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus """
"""pour gérer des données séquentielles, telles que le langage naturel, pour des tâches """
"""telles que la traduction et la synthèse de texte.""",
]
self.tokenizer_integration_test_util(
expected_encoding=A_ , model_name="""camembert-base""" , revision="""3a0641d9a1aeb7e848a74299e7e4c4bca216b4cf""" , sequences=A_ , )
| 281 | 1 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BlipaProcessor, BlipImageProcessor, GPTaTokenizer, PreTrainedTokenizerFast
@require_vision
class lowercase__ (unittest.TestCase ):
"""simple docstring"""
def lowercase ( self : Optional[Any] ):
snake_case__ : Optional[Any] = tempfile.mkdtemp()
snake_case__ : Dict = BlipImageProcessor()
snake_case__ : Union[str, Any] = GPTaTokenizer.from_pretrained("""hf-internal-testing/tiny-random-GPT2Model""" )
snake_case__ : Optional[int] = BlipaProcessor(__lowerCAmelCase , __lowerCAmelCase )
processor.save_pretrained(self.tmpdirname )
def lowercase ( self : str , **__a : Optional[int] ):
return AutoProcessor.from_pretrained(self.tmpdirname , **__lowerCAmelCase ).tokenizer
def lowercase ( self : Optional[int] , **__a : List[Any] ):
return AutoProcessor.from_pretrained(self.tmpdirname , **__lowerCAmelCase ).image_processor
def lowercase ( self : str ):
shutil.rmtree(self.tmpdirname )
def lowercase ( self : Union[str, Any] ):
snake_case__ : Union[str, Any] = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )]
snake_case__ : List[Any] = [Image.fromarray(np.moveaxis(__lowerCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowercase ( self : Union[str, Any] ):
snake_case__ : Dict = BlipaProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
snake_case__ : Optional[Any] = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
snake_case__ : Dict = self.get_image_processor(do_normalize=__lowerCAmelCase , padding_value=1.0 )
snake_case__ : List[str] = BlipaProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=__lowerCAmelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __lowerCAmelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __lowerCAmelCase )
def lowercase ( self : List[Any] ):
snake_case__ : List[str] = self.get_image_processor()
snake_case__ : str = self.get_tokenizer()
snake_case__ : Any = BlipaProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase )
snake_case__ : Union[str, Any] = self.prepare_image_inputs()
snake_case__ : Union[str, Any] = image_processor(__lowerCAmelCase , return_tensors="""np""" )
snake_case__ : Any = processor(images=__lowerCAmelCase , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def lowercase ( self : Optional[int] ):
snake_case__ : Optional[int] = self.get_image_processor()
snake_case__ : Dict = self.get_tokenizer()
snake_case__ : int = BlipaProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase )
snake_case__ : Optional[Any] = """lower newer"""
snake_case__ : Optional[Any] = processor(text=__lowerCAmelCase )
snake_case__ : Union[str, Any] = tokenizer(__lowerCAmelCase , return_token_type_ids=__lowerCAmelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowercase ( self : str ):
snake_case__ : List[Any] = self.get_image_processor()
snake_case__ : Tuple = self.get_tokenizer()
snake_case__ : List[str] = BlipaProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase )
snake_case__ : List[str] = """lower newer"""
snake_case__ : List[str] = self.prepare_image_inputs()
snake_case__ : Optional[Any] = processor(text=__lowerCAmelCase , images=__lowerCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ["""pixel_values""", """input_ids""", """attention_mask"""] )
# test if it raises when no input is passed
with pytest.raises(__lowerCAmelCase ):
processor()
def lowercase ( self : int ):
snake_case__ : Dict = self.get_image_processor()
snake_case__ : Optional[Any] = self.get_tokenizer()
snake_case__ : int = BlipaProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase )
snake_case__ : Dict = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
snake_case__ : List[str] = processor.batch_decode(__lowerCAmelCase )
snake_case__ : List[Any] = tokenizer.batch_decode(__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
def lowercase ( self : Optional[int] ):
snake_case__ : Tuple = self.get_image_processor()
snake_case__ : int = self.get_tokenizer()
snake_case__ : Optional[int] = BlipaProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase )
snake_case__ : List[str] = """lower newer"""
snake_case__ : Dict = self.prepare_image_inputs()
snake_case__ : str = processor(text=__lowerCAmelCase , images=__lowerCAmelCase )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) , ["""pixel_values""", """input_ids""", """attention_mask"""] )
| 707 |
from __future__ import annotations
from collections.abc import Callable
from typing import Any, Generic, TypeVar
lowercase_: Union[str, Any] = TypeVar('T')
class lowercase__ (Generic[T] ):
"""simple docstring"""
def __init__( self : List[Any] , __a : list[T] , __a : Callable[[T, T], T] ):
snake_case__ : Any | T = None
snake_case__ : int = len(__a )
snake_case__ : list[T] = [any_type for _ in range(self.N )] + arr
snake_case__ : Tuple = fnc
self.build()
def lowercase ( self : int ):
for p in range(self.N - 1 , 0 , -1 ):
snake_case__ : Tuple = self.fn(self.st[p * 2] , self.st[p * 2 + 1] )
def lowercase ( self : Optional[Any] , __a : int , __a : T ):
p += self.N
snake_case__ : Optional[int] = v
while p > 1:
snake_case__ : int = p // 2
snake_case__ : Dict = self.fn(self.st[p * 2] , self.st[p * 2 + 1] )
def lowercase ( self : int , __a : int , __a : int ): # noqa: E741
snake_case__ , snake_case__ : List[Any] = l + self.N, r + self.N
snake_case__ : T | None = None
while l <= r:
if l % 2 == 1:
snake_case__ : List[str] = self.st[l] if res is None else self.fn(__a , self.st[l] )
if r % 2 == 0:
snake_case__ : Any = self.st[r] if res is None else self.fn(__a , self.st[r] )
snake_case__ , snake_case__ : Tuple = (l + 1) // 2, (r - 1) // 2
return res
if __name__ == "__main__":
from functools import reduce
lowercase_: List[str] = [1, 10, -2, 9, -3, 8, 4, -7, 5, 6, 11, -12]
lowercase_: Optional[Any] = {
0: 7,
1: 2,
2: 6,
3: -14,
4: 5,
5: 4,
6: 7,
7: -10,
8: 9,
9: 10,
10: 12,
11: 1,
}
lowercase_: Optional[Any] = SegmentTree(test_array, min)
lowercase_: Any = SegmentTree(test_array, max)
lowercase_: Optional[int] = SegmentTree(test_array, lambda a, b: a + b)
def _lowercase ( ):
"""simple docstring"""
for i in range(len(UpperCAmelCase_)):
for j in range(UpperCAmelCase_ , len(UpperCAmelCase_)):
snake_case__ : Tuple = reduce(UpperCAmelCase_ , test_array[i : j + 1])
snake_case__ : int = reduce(UpperCAmelCase_ , test_array[i : j + 1])
snake_case__ : Union[str, Any] = reduce(lambda UpperCAmelCase_ , UpperCAmelCase_: a + b , test_array[i : j + 1])
assert min_range == min_segment_tree.query(UpperCAmelCase_ , UpperCAmelCase_)
assert max_range == max_segment_tree.query(UpperCAmelCase_ , UpperCAmelCase_)
assert sum_range == sum_segment_tree.query(UpperCAmelCase_ , UpperCAmelCase_)
test_all_segments()
for index, value in test_updates.items():
lowercase_: Optional[int] = value
min_segment_tree.update(index, value)
max_segment_tree.update(index, value)
sum_segment_tree.update(index, value)
test_all_segments()
| 127 | 0 |
'''simple docstring'''
from typing import List
import datasets
from datasets.tasks import AudioClassification
from ..folder_based_builder import folder_based_builder
UpperCAmelCase_ = datasets.utils.logging.get_logger(__name__)
class lowerCAmelCase_ ( folder_based_builder.FolderBasedBuilderConfig ):
'''simple docstring'''
lowerCAmelCase_ : Union[str, Any] = None
lowerCAmelCase_ : Tuple = None
class lowerCAmelCase_ ( folder_based_builder.FolderBasedBuilder ):
'''simple docstring'''
lowerCAmelCase_ : Tuple = datasets.Audio()
lowerCAmelCase_ : Optional[int] = """audio"""
lowerCAmelCase_ : Dict = AudioFolderConfig
lowerCAmelCase_ : List[str] = 42 # definition at the bottom of the script
lowerCAmelCase_ : Tuple = AudioClassification(audio_column="""audio""" , label_column="""label""" )
UpperCAmelCase_ = [
".aiff",
".au",
".avr",
".caf",
".flac",
".htk",
".svx",
".mat4",
".mat5",
".mpc2k",
".ogg",
".paf",
".pvf",
".raw",
".rf64",
".sd2",
".sds",
".ircam",
".voc",
".w64",
".wav",
".nist",
".wavex",
".wve",
".xi",
".mp3",
".opus",
]
UpperCAmelCase_ = AUDIO_EXTENSIONS
| 603 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__a : Optional[Any] = logging.get_logger(__name__)
__a : Dict = {
"google/realm-cc-news-pretrained-embedder": (
"https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/config.json"
),
"google/realm-cc-news-pretrained-encoder": (
"https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/config.json"
),
"google/realm-cc-news-pretrained-scorer": (
"https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/config.json"
),
"google/realm-cc-news-pretrained-openqa": (
"https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/config.json"
),
"google/realm-orqa-nq-openqa": "https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/config.json",
"google/realm-orqa-nq-reader": "https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/config.json",
"google/realm-orqa-wq-openqa": "https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/config.json",
"google/realm-orqa-wq-reader": "https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/config.json",
# See all REALM models at https://huggingface.co/models?filter=realm
}
class __lowercase ( lowercase_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = "realm"
def __init__( self : str , UpperCamelCase_ : List[Any]=30_522 , UpperCamelCase_ : Dict=768 , UpperCamelCase_ : Union[str, Any]=128 , UpperCamelCase_ : Tuple=12 , UpperCamelCase_ : Optional[Any]=12 , UpperCamelCase_ : Optional[int]=8 , UpperCamelCase_ : str=3_072 , UpperCamelCase_ : List[str]="gelu_new" , UpperCamelCase_ : Optional[int]=0.1 , UpperCamelCase_ : Optional[int]=0.1 , UpperCamelCase_ : str=512 , UpperCamelCase_ : int=2 , UpperCamelCase_ : str=0.02 , UpperCamelCase_ : List[Any]=1e-12 , UpperCamelCase_ : str=256 , UpperCamelCase_ : List[str]=10 , UpperCamelCase_ : int=1e-3 , UpperCamelCase_ : List[str]=5 , UpperCamelCase_ : Tuple=320 , UpperCamelCase_ : List[str]=13_353_718 , UpperCamelCase_ : Tuple=5_000 , UpperCamelCase_ : Optional[int]=1 , UpperCamelCase_ : List[str]=0 , UpperCamelCase_ : Union[str, Any]=2 , **UpperCamelCase_ : List[Any] , ):
"""simple docstring"""
super().__init__(pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , **UpperCamelCase_ )
# Common config
__A = vocab_size
__A = max_position_embeddings
__A = hidden_size
__A = retriever_proj_size
__A = num_hidden_layers
__A = num_attention_heads
__A = num_candidates
__A = intermediate_size
__A = hidden_act
__A = hidden_dropout_prob
__A = attention_probs_dropout_prob
__A = initializer_range
__A = type_vocab_size
__A = layer_norm_eps
# Reader config
__A = span_hidden_size
__A = max_span_width
__A = reader_layer_norm_eps
__A = reader_beam_size
__A = reader_seq_len
# Retrieval config
__A = num_block_records
__A = searcher_beam_size
| 637 | 0 |
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
_snake_case = logging.get_logger(__name__) # pylint: disable=invalid-name
_snake_case = """
Examples:
```py
>>> import torch
>>> import numpy as np
>>> from diffusers import KandinskyV22PriorPipeline, KandinskyV22ControlnetPipeline
>>> from transformers import pipeline
>>> from diffusers.utils import load_image
>>> def make_hint(image, depth_estimator):
... image = depth_estimator(image)[\"depth\"]
... image = np.array(image)
... image = image[:, :, None]
... image = np.concatenate([image, image, image], axis=2)
... detected_map = torch.from_numpy(image).float() / 255.0
... hint = detected_map.permute(2, 0, 1)
... return hint
>>> depth_estimator = pipeline(\"depth-estimation\")
>>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(
... \"kandinsky-community/kandinsky-2-2-prior\", torch_dtype=torch.float16
... )
>>> pipe_prior = pipe_prior.to(\"cuda\")
>>> pipe = KandinskyV22ControlnetPipeline.from_pretrained(
... \"kandinsky-community/kandinsky-2-2-controlnet-depth\", torch_dtype=torch.float16
... )
>>> pipe = pipe.to(\"cuda\")
>>> img = load_image(
... \"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main\"
... \"/kandinsky/cat.png\"
... ).resize((768, 768))
>>> hint = make_hint(img, depth_estimator).unsqueeze(0).half().to(\"cuda\")
>>> prompt = \"A robot, 4k photo\"
>>> negative_prior_prompt = \"lowres, text, error, cropped, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, out of frame, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, username, watermark, signature\"
>>> generator = torch.Generator(device=\"cuda\").manual_seed(43)
>>> image_emb, zero_image_emb = pipe_prior(
... prompt=prompt, negative_prompt=negative_prior_prompt, generator=generator
... ).to_tuple()
>>> images = pipe(
... image_embeds=image_emb,
... negative_image_embeds=zero_image_emb,
... hint=hint,
... num_inference_steps=50,
... generator=generator,
... height=768,
... width=768,
... ).images
>>> images[0].save(\"robot_cat.png\")
```
"""
def _A ( __magic_name__ , __magic_name__ , __magic_name__=8 ):
lowercase__ = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
lowercase__ = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class lowerCAmelCase ( lowercase_ ):
def __init__( self :List[str] , _lowercase :UNetaDConditionModel , _lowercase :DDPMScheduler , _lowercase :VQModel , ):
'''simple docstring'''
super().__init__()
self.register_modules(
unet=_lowercase , scheduler=_lowercase , movq=_lowercase , )
lowercase__ = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def UpperCAmelCase ( self :Optional[int] , _lowercase :List[Any] , _lowercase :Any , _lowercase :Tuple , _lowercase :int , _lowercase :str , _lowercase :List[Any] ):
'''simple docstring'''
if latents is None:
lowercase__ = randn_tensor(_lowercase , generator=_lowercase , device=_lowercase , dtype=_lowercase )
else:
if latents.shape != shape:
raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {shape}''' )
lowercase__ = latents.to(_lowercase )
lowercase__ = latents * scheduler.init_noise_sigma
return latents
def UpperCAmelCase ( self :Any , _lowercase :Dict=0 ):
'''simple docstring'''
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
lowercase__ = torch.device(f'''cuda:{gpu_id}''' )
lowercase__ = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(_lowercase , _lowercase )
def UpperCAmelCase ( self :Union[str, Any] , _lowercase :List[Any]=0 ):
'''simple docstring'''
if is_accelerate_available() and is_accelerate_version(">=" , "0.17.0.dev0" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher." )
lowercase__ = torch.device(f'''cuda:{gpu_id}''' )
if self.device.type != "cpu":
self.to("cpu" , silence_dtype_warnings=_lowercase )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
lowercase__ = None
for cpu_offloaded_model in [self.unet, self.movq]:
lowercase__ , lowercase__ = cpu_offload_with_hook(_lowercase , _lowercase , prev_module_hook=_lowercase )
# We'll offload the last model manually.
lowercase__ = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def UpperCAmelCase ( self :Any ):
'''simple docstring'''
if not hasattr(self.unet , "_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(_lowercase , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(_lowercase )
def __call__( self :Any , _lowercase :Union[torch.FloatTensor, List[torch.FloatTensor]] , _lowercase :Union[torch.FloatTensor, List[torch.FloatTensor]] , _lowercase :torch.FloatTensor , _lowercase :int = 5_12 , _lowercase :int = 5_12 , _lowercase :int = 1_00 , _lowercase :float = 4.0 , _lowercase :int = 1 , _lowercase :Optional[Union[torch.Generator, List[torch.Generator]]] = None , _lowercase :Optional[torch.FloatTensor] = None , _lowercase :Optional[str] = "pil" , _lowercase :bool = True , ):
'''simple docstring'''
lowercase__ = self._execution_device
lowercase__ = guidance_scale > 1.0
if isinstance(_lowercase , _lowercase ):
lowercase__ = torch.cat(_lowercase , dim=0 )
if isinstance(_lowercase , _lowercase ):
lowercase__ = torch.cat(_lowercase , dim=0 )
if isinstance(_lowercase , _lowercase ):
lowercase__ = torch.cat(_lowercase , dim=0 )
lowercase__ = image_embeds.shape[0] * num_images_per_prompt
if do_classifier_free_guidance:
lowercase__ = image_embeds.repeat_interleave(_lowercase , dim=0 )
lowercase__ = negative_image_embeds.repeat_interleave(_lowercase , dim=0 )
lowercase__ = hint.repeat_interleave(_lowercase , dim=0 )
lowercase__ = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=_lowercase )
lowercase__ = torch.cat([hint, hint] , dim=0 ).to(dtype=self.unet.dtype , device=_lowercase )
self.scheduler.set_timesteps(_lowercase , device=_lowercase )
lowercase__ = self.scheduler.timesteps
lowercase__ = self.movq.config.latent_channels
lowercase__ , lowercase__ = downscale_height_and_width(_lowercase , _lowercase , self.movq_scale_factor )
# create initial latent
lowercase__ = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , _lowercase , _lowercase , _lowercase , self.scheduler , )
for i, t in enumerate(self.progress_bar(_lowercase ) ):
# expand the latents if we are doing classifier free guidance
lowercase__ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowercase__ = {"image_embeds": image_embeds, "hint": hint}
lowercase__ = self.unet(
sample=_lowercase , timestep=_lowercase , encoder_hidden_states=_lowercase , added_cond_kwargs=_lowercase , return_dict=_lowercase , )[0]
if do_classifier_free_guidance:
lowercase__ , lowercase__ = noise_pred.split(latents.shape[1] , dim=1 )
lowercase__ , lowercase__ = noise_pred.chunk(2 )
lowercase__ , lowercase__ = variance_pred.chunk(2 )
lowercase__ = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
lowercase__ = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , "variance_type" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
lowercase__ , lowercase__ = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
lowercase__ = self.scheduler.step(
_lowercase , _lowercase , _lowercase , generator=_lowercase , )[0]
# post-processing
lowercase__ = self.movq.decode(_lowercase , force_not_quantize=_lowercase )["sample"]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f'''Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}''' )
if output_type in ["np", "pil"]:
lowercase__ = image * 0.5 + 0.5
lowercase__ = image.clamp(0 , 1 )
lowercase__ = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
lowercase__ = self.numpy_to_pil(_lowercase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_lowercase )
| 611 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"""andreasmadsen/efficient_mlm_m0.40""": (
"""https://huggingface.co/andreasmadsen/efficient_mlm_m0.40/resolve/main/config.json"""
),
}
class lowerCAmelCase ( lowercase_ ):
__lowerCamelCase = 'roberta-prelayernorm'
def __init__( self :List[str] , _lowercase :Tuple=5_02_65 , _lowercase :int=7_68 , _lowercase :Dict=12 , _lowercase :Optional[Any]=12 , _lowercase :Union[str, Any]=30_72 , _lowercase :List[str]="gelu" , _lowercase :Dict=0.1 , _lowercase :List[str]=0.1 , _lowercase :List[Any]=5_12 , _lowercase :List[Any]=2 , _lowercase :str=0.02 , _lowercase :Union[str, Any]=1e-12 , _lowercase :Optional[int]=1 , _lowercase :Union[str, Any]=0 , _lowercase :Optional[Any]=2 , _lowercase :str="absolute" , _lowercase :int=True , _lowercase :Optional[Any]=None , **_lowercase :Tuple , ):
'''simple docstring'''
super().__init__(pad_token_id=_lowercase , bos_token_id=_lowercase , eos_token_id=_lowercase , **_lowercase )
lowercase__ = vocab_size
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = hidden_act
lowercase__ = intermediate_size
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = max_position_embeddings
lowercase__ = type_vocab_size
lowercase__ = initializer_range
lowercase__ = layer_norm_eps
lowercase__ = position_embedding_type
lowercase__ = use_cache
lowercase__ = classifier_dropout
class lowerCAmelCase ( lowercase_ ):
@property
def UpperCAmelCase ( self :Optional[int] ):
'''simple docstring'''
if self.task == "multiple-choice":
lowercase__ = {0: "batch", 1: "choice", 2: "sequence"}
else:
lowercase__ = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 611 | 1 |
import torch
from diffusers import DPMSolverSDEScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import require_torchsde
from .test_schedulers import SchedulerCommonTest
@require_torchsde
class UpperCAmelCase_ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
A : Tuple = (DPMSolverSDEScheduler,)
A : List[Any] = 10
def _lowerCAmelCase ( self , **_SCREAMING_SNAKE_CASE ) -> List[str]:
snake_case_ : Optional[int] = {
"num_train_timesteps": 1100,
"beta_start": 0.0001,
"beta_end": 0.02,
"beta_schedule": "linear",
"noise_sampler_seed": 0,
}
config.update(**_SCREAMING_SNAKE_CASE )
return config
def _lowerCAmelCase ( self ) -> List[str]:
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=_SCREAMING_SNAKE_CASE )
def _lowerCAmelCase ( self ) -> Optional[int]:
for beta_start, beta_end in zip([0.0_0001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ):
self.check_over_configs(beta_start=_SCREAMING_SNAKE_CASE , beta_end=_SCREAMING_SNAKE_CASE )
def _lowerCAmelCase ( self ) -> int:
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=_SCREAMING_SNAKE_CASE )
def _lowerCAmelCase ( self ) -> str:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_SCREAMING_SNAKE_CASE )
def _lowerCAmelCase ( self ) -> Any:
snake_case_ : Optional[int] = self.scheduler_classes[0]
snake_case_ : str = self.get_scheduler_config()
snake_case_ : Tuple = scheduler_class(**_SCREAMING_SNAKE_CASE )
scheduler.set_timesteps(self.num_inference_steps )
snake_case_ : Union[str, Any] = self.dummy_model()
snake_case_ : str = self.dummy_sample_deter * scheduler.init_noise_sigma
snake_case_ : Union[str, Any] = sample.to(_SCREAMING_SNAKE_CASE )
for i, t in enumerate(scheduler.timesteps ):
snake_case_ : str = scheduler.scale_model_input(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
snake_case_ : int = model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
snake_case_ : int = scheduler.step(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
snake_case_ : Optional[int] = output.prev_sample
snake_case_ : Optional[int] = torch.sum(torch.abs(_SCREAMING_SNAKE_CASE ) )
snake_case_ : List[Any] = torch.mean(torch.abs(_SCREAMING_SNAKE_CASE ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.47_8210_4492_1875 ) < 1e-2
assert abs(result_mean.item() - 0.2178_7059_6456_5277 ) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59_3521_1181_6406 ) < 1e-2
assert abs(result_mean.item() - 0.2_2342_9068_9229_9652 ) < 1e-3
else:
assert abs(result_sum.item() - 162.52_3834_2285_1562 ) < 1e-2
assert abs(result_mean.item() - 0.211_6195_7085_1326 ) < 1e-3
def _lowerCAmelCase ( self ) -> List[Any]:
snake_case_ : Union[str, Any] = self.scheduler_classes[0]
snake_case_ : Tuple = self.get_scheduler_config(prediction_type="v_prediction" )
snake_case_ : str = scheduler_class(**_SCREAMING_SNAKE_CASE )
scheduler.set_timesteps(self.num_inference_steps )
snake_case_ : Optional[int] = self.dummy_model()
snake_case_ : str = self.dummy_sample_deter * scheduler.init_noise_sigma
snake_case_ : List[str] = sample.to(_SCREAMING_SNAKE_CASE )
for i, t in enumerate(scheduler.timesteps ):
snake_case_ : Dict = scheduler.scale_model_input(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
snake_case_ : int = model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
snake_case_ : str = scheduler.step(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
snake_case_ : List[str] = output.prev_sample
snake_case_ : List[str] = torch.sum(torch.abs(_SCREAMING_SNAKE_CASE ) )
snake_case_ : int = torch.mean(torch.abs(_SCREAMING_SNAKE_CASE ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 124.77_1492_0043_9453 ) < 1e-2
assert abs(result_mean.item() - 0.1_6226_2890_1481_6284 ) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 128.1_6633_6059_5703 ) < 1e-2
assert abs(result_mean.item() - 0.1_6688_3260_0116_7297 ) < 1e-3
else:
assert abs(result_sum.item() - 119.8_4875_4882_8125 ) < 1e-2
assert abs(result_mean.item() - 0.1560_5306_6253_6621 ) < 1e-3
def _lowerCAmelCase ( self ) -> List[str]:
snake_case_ : List[str] = self.scheduler_classes[0]
snake_case_ : Union[str, Any] = self.get_scheduler_config()
snake_case_ : Optional[Any] = scheduler_class(**_SCREAMING_SNAKE_CASE )
scheduler.set_timesteps(self.num_inference_steps , device=_SCREAMING_SNAKE_CASE )
snake_case_ : Tuple = self.dummy_model()
snake_case_ : Optional[Any] = self.dummy_sample_deter.to(_SCREAMING_SNAKE_CASE ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
snake_case_ : Dict = scheduler.scale_model_input(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
snake_case_ : Tuple = model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
snake_case_ : Optional[Any] = scheduler.step(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
snake_case_ : Optional[Any] = output.prev_sample
snake_case_ : Any = torch.sum(torch.abs(_SCREAMING_SNAKE_CASE ) )
snake_case_ : List[str] = torch.mean(torch.abs(_SCREAMING_SNAKE_CASE ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.46_9573_9746_0938 ) < 1e-2
assert abs(result_mean.item() - 0.2_1805_9346_0798_2635 ) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59_3536_3769_5312 ) < 1e-2
assert abs(result_mean.item() - 0.2_2342_9083_8241_5771 ) < 1e-3
else:
assert abs(result_sum.item() - 162.52_3834_2285_1562 ) < 1e-2
assert abs(result_mean.item() - 0.211_6195_7085_1326 ) < 1e-3
def _lowerCAmelCase ( self ) -> Optional[Any]:
snake_case_ : Any = self.scheduler_classes[0]
snake_case_ : Union[str, Any] = self.get_scheduler_config()
snake_case_ : Any = scheduler_class(**_SCREAMING_SNAKE_CASE , use_karras_sigmas=_SCREAMING_SNAKE_CASE )
scheduler.set_timesteps(self.num_inference_steps , device=_SCREAMING_SNAKE_CASE )
snake_case_ : Optional[Any] = self.dummy_model()
snake_case_ : int = self.dummy_sample_deter.to(_SCREAMING_SNAKE_CASE ) * scheduler.init_noise_sigma
snake_case_ : int = sample.to(_SCREAMING_SNAKE_CASE )
for t in scheduler.timesteps:
snake_case_ : Optional[int] = scheduler.scale_model_input(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
snake_case_ : Optional[Any] = model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
snake_case_ : Union[str, Any] = scheduler.step(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
snake_case_ : Any = output.prev_sample
snake_case_ : Optional[Any] = torch.sum(torch.abs(_SCREAMING_SNAKE_CASE ) )
snake_case_ : List[Any] = torch.mean(torch.abs(_SCREAMING_SNAKE_CASE ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 176.66_9741_3574_2188 ) < 1e-2
assert abs(result_mean.item() - 0.2_3003_8727_3098_1811 ) < 1e-2
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 177.63_6535_6445_3125 ) < 1e-2
assert abs(result_mean.item() - 0.2_3003_8727_3098_1811 ) < 1e-2
else:
assert abs(result_sum.item() - 170.3_1352_2338_8672 ) < 1e-2
assert abs(result_mean.item() - 0.2_3003_8727_3098_1811 ) < 1e-2
| 568 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_rembert import RemBertTokenizer
else:
lowercase : int = None
lowercase : str = logging.get_logger(__name__)
lowercase : Any = {'''vocab_file''': '''sentencepiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
lowercase : Dict = {
'''vocab_file''': {
'''google/rembert''': '''https://huggingface.co/google/rembert/resolve/main/sentencepiece.model''',
},
'''tokenizer_file''': {
'''google/rembert''': '''https://huggingface.co/google/rembert/resolve/main/tokenizer.json''',
},
}
lowercase : Dict = {
'''google/rembert''': 2_56,
}
lowercase : Dict = '''▁'''
class UpperCAmelCase_ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
A : Union[str, Any] = VOCAB_FILES_NAMES
A : Any = PRETRAINED_VOCAB_FILES_MAP
A : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A : Optional[Any] = RemBertTokenizer
def __init__( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE="[CLS]" , _SCREAMING_SNAKE_CASE="[SEP]" , _SCREAMING_SNAKE_CASE="<unk>" , _SCREAMING_SNAKE_CASE="[SEP]" , _SCREAMING_SNAKE_CASE="<pad>" , _SCREAMING_SNAKE_CASE="[CLS]" , _SCREAMING_SNAKE_CASE="[MASK]" , **_SCREAMING_SNAKE_CASE , ) -> Optional[Any]:
# Mask token behave like a normal word, i.e. include the space before it
snake_case_ : Optional[int] = AddedToken(_SCREAMING_SNAKE_CASE , lstrip=_SCREAMING_SNAKE_CASE , rstrip=_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else mask_token
super().__init__(
_SCREAMING_SNAKE_CASE , tokenizer_file=_SCREAMING_SNAKE_CASE , do_lower_case=_SCREAMING_SNAKE_CASE , remove_space=_SCREAMING_SNAKE_CASE , keep_accents=_SCREAMING_SNAKE_CASE , bos_token=_SCREAMING_SNAKE_CASE , eos_token=_SCREAMING_SNAKE_CASE , unk_token=_SCREAMING_SNAKE_CASE , sep_token=_SCREAMING_SNAKE_CASE , pad_token=_SCREAMING_SNAKE_CASE , cls_token=_SCREAMING_SNAKE_CASE , mask_token=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
snake_case_ : Any = do_lower_case
snake_case_ : Dict = remove_space
snake_case_ : Optional[Any] = keep_accents
snake_case_ : Tuple = vocab_file
snake_case_ : Union[str, Any] = False if not self.vocab_file else True
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> List[int]:
snake_case_ : List[Any] = [self.sep_token_id]
snake_case_ : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = False ) -> List[int]:
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"You should not supply a second sequence if the provided sequence of "
"ids is already formatted with special tokens for the model." )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(_SCREAMING_SNAKE_CASE )) + [1] + ([0] * len(_SCREAMING_SNAKE_CASE )) + [1]
return [1] + ([0] * len(_SCREAMING_SNAKE_CASE )) + [1]
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> List[int]:
snake_case_ : Dict = [self.sep_token_id]
snake_case_ : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> Tuple[str]:
if not os.path.isdir(_SCREAMING_SNAKE_CASE ):
logger.error("Vocabulary path ({}) should be a directory".format(_SCREAMING_SNAKE_CASE ) )
return
snake_case_ : str = os.path.join(
_SCREAMING_SNAKE_CASE , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_SCREAMING_SNAKE_CASE ):
copyfile(self.vocab_file , _SCREAMING_SNAKE_CASE )
return (out_vocab_file,)
| 568 | 1 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
a__ = logging.get_logger(__name__)
def lowercase ( SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Union[str, Any]=False ) -> List[str]:
_snake_case : Dict = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'''blocks.{i}.norm1.weight''', F'''vit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((F'''blocks.{i}.norm1.bias''', F'''vit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append((F'''blocks.{i}.attn.proj.weight''', F'''vit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((F'''blocks.{i}.attn.proj.bias''', F'''vit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((F'''blocks.{i}.norm2.weight''', F'''vit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((F'''blocks.{i}.norm2.bias''', F'''vit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((F'''blocks.{i}.mlp.fc1.weight''', F'''vit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((F'''blocks.{i}.mlp.fc1.bias''', F'''vit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((F'''blocks.{i}.mlp.fc2.weight''', F'''vit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((F'''blocks.{i}.mlp.fc2.bias''', F'''vit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
("""cls_token""", """vit.embeddings.cls_token"""),
("""patch_embed.proj.weight""", """vit.embeddings.patch_embeddings.projection.weight"""),
("""patch_embed.proj.bias""", """vit.embeddings.patch_embeddings.projection.bias"""),
("""pos_embed""", """vit.embeddings.position_embeddings"""),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("""norm.weight""", """layernorm.weight"""),
("""norm.bias""", """layernorm.bias"""),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
_snake_case : str = [(pair[0], pair[1][4:]) if pair[1].startswith("""vit""" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("""norm.weight""", """vit.layernorm.weight"""),
("""norm.bias""", """vit.layernorm.bias"""),
("""head.weight""", """classifier.weight"""),
("""head.bias""", """classifier.bias"""),
] )
return rename_keys
def lowercase ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[Any]=False ) -> Any:
for i in range(config.num_hidden_layers ):
if base_model:
_snake_case : int = """"""
else:
_snake_case : int = """vit."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_snake_case : Dict = state_dict.pop(F'''blocks.{i}.attn.qkv.weight''' )
_snake_case : Any = state_dict.pop(F'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
_snake_case : Tuple = in_proj_weight[
: config.hidden_size, :
]
_snake_case : List[Any] = in_proj_bias[: config.hidden_size]
_snake_case : List[Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_snake_case : Dict = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_snake_case : List[str] = in_proj_weight[
-config.hidden_size :, :
]
_snake_case : Any = in_proj_bias[-config.hidden_size :]
def lowercase ( SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Dict:
_snake_case : Dict = ["""head.weight""", """head.bias"""]
for k in ignore_keys:
state_dict.pop(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def lowercase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Union[str, Any]:
_snake_case : List[Any] = dct.pop(SCREAMING_SNAKE_CASE__ )
_snake_case : int = val
def lowercase ( ) -> Optional[Any]:
_snake_case : int = """http://images.cocodataset.org/val2017/000000039769.jpg"""
_snake_case : Optional[Any] = Image.open(requests.get(SCREAMING_SNAKE_CASE__ , stream=SCREAMING_SNAKE_CASE__ ).raw )
return im
@torch.no_grad()
def lowercase ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Any=True ) -> List[str]:
_snake_case : str = ViTConfig()
# patch_size
if model_name[-1] == "8":
_snake_case : Optional[Any] = 8
# set labels if required
if not base_model:
_snake_case : Any = 1_000
_snake_case : List[str] = """huggingface/label-files"""
_snake_case : List[Any] = """imagenet-1k-id2label.json"""
_snake_case : Tuple = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , repo_type="""dataset""" ) , """r""" ) )
_snake_case : Any = {int(SCREAMING_SNAKE_CASE__ ): v for k, v in idalabel.items()}
_snake_case : str = idalabel
_snake_case : Tuple = {v: k for k, v in idalabel.items()}
# size of the architecture
if model_name in ["dino_vits8", "dino_vits16"]:
_snake_case : Optional[Any] = 384
_snake_case : Optional[int] = 1_536
_snake_case : Any = 12
_snake_case : Optional[int] = 6
# load original model from torch hub
_snake_case : Dict = torch.hub.load("""facebookresearch/dino:main""" , SCREAMING_SNAKE_CASE__ )
original_model.eval()
# load state_dict of original model, remove and rename some keys
_snake_case : Optional[int] = original_model.state_dict()
if base_model:
remove_classification_head_(SCREAMING_SNAKE_CASE__ )
_snake_case : Optional[int] = create_rename_keys(SCREAMING_SNAKE_CASE__ , base_model=SCREAMING_SNAKE_CASE__ )
for src, dest in rename_keys:
rename_key(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
read_in_q_k_v(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# load HuggingFace model
if base_model:
_snake_case : Dict = ViTModel(SCREAMING_SNAKE_CASE__ , add_pooling_layer=SCREAMING_SNAKE_CASE__ ).eval()
else:
_snake_case : str = ViTForImageClassification(SCREAMING_SNAKE_CASE__ ).eval()
model.load_state_dict(SCREAMING_SNAKE_CASE__ )
# Check outputs on an image, prepared by ViTImageProcessor
_snake_case : List[str] = ViTImageProcessor()
_snake_case : Optional[Any] = image_processor(images=prepare_img() , return_tensors="""pt""" )
_snake_case : Optional[int] = encoding["""pixel_values"""]
_snake_case : int = model(SCREAMING_SNAKE_CASE__ )
if base_model:
_snake_case : str = original_model(SCREAMING_SNAKE_CASE__ )
assert torch.allclose(SCREAMING_SNAKE_CASE__ , outputs.last_hidden_state[:, 0, :] , atol=1e-1 )
else:
_snake_case : Optional[int] = original_model(SCREAMING_SNAKE_CASE__ )
assert logits.shape == outputs.logits.shape
assert torch.allclose(SCREAMING_SNAKE_CASE__ , outputs.logits , atol=1e-3 )
Path(SCREAMING_SNAKE_CASE__ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE__ )
print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
a__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""dino_vitb16""",
type=str,
help="""Name of the model trained with DINO you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--base_model""",
action="""store_true""",
help="""Whether to only convert the base model (no projection head weights).""",
)
parser.set_defaults(base_model=True)
a__ = parser.parse_args()
convert_vit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.base_model)
| 198 |
from __future__ import annotations
from math import pi
# Define the Reduced Planck Constant ℏ (H bar), speed of light C, value of
# Pi and the function
a__ = 1.0_54_57_18_17E-34 # unit of ℏ : J * s
a__ = 3E8 # unit of c : m * s^-1
def lowercase ( SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : float ) -> dict[str, float]:
if (force, area, distance).count(0 ) != 1:
raise ValueError("""One and only one argument must be 0""" )
if force < 0:
raise ValueError("""Magnitude of force can not be negative""" )
if distance < 0:
raise ValueError("""Distance can not be negative""" )
if area < 0:
raise ValueError("""Area can not be negative""" )
if force == 0:
_snake_case : Union[str, Any] = (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (
240 * (distance) ** 4
)
return {"force": force}
elif area == 0:
_snake_case : Optional[Any] = (240 * force * (distance) ** 4) / (
REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2
)
return {"area": area}
elif distance == 0:
_snake_case : str = (
(REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (240 * force)
) ** (1 / 4)
return {"distance": distance}
raise ValueError("""One and only one argument must be 0""" )
# Run doctest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 198 | 1 |
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import debug_launcher
from accelerate.test_utils import (
execute_subprocess_async,
require_cpu,
require_huggingface_suite,
require_multi_gpu,
require_single_gpu,
)
from accelerate.utils import patch_environment
@require_huggingface_suite
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCamelCase__ ( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase = inspect.getfile(accelerate.test_utils )
lowerCamelCase = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ['scripts', 'external_deps', 'test_metrics.py'] )
from accelerate.test_utils.scripts.external_deps import test_metrics # noqa: F401
lowerCamelCase = test_metrics
@require_cpu
def lowerCamelCase__ ( self : List[str] ) -> str:
'''simple docstring'''
debug_launcher(self.test_metrics.main , num_processes=1 )
@require_cpu
def lowerCamelCase__ ( self : List[str] ) -> Optional[int]:
'''simple docstring'''
debug_launcher(self.test_metrics.main )
@require_single_gpu
def lowerCamelCase__ ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
self.test_metrics.main()
@require_multi_gpu
def lowerCamelCase__ ( self : str ) -> Optional[Any]:
'''simple docstring'''
print(F'''Found {torch.cuda.device_count()} devices.''' )
lowerCamelCase = ['torchrun', F'''--nproc_per_node={torch.cuda.device_count()}''', self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__snake_case , env=os.environ.copy() )
| 246 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
_lowerCAmelCase : str = logging.get_logger(__name__)
_lowerCAmelCase : List[str] = {
'microsoft/table-transformer-detection': (
'https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json'
),
}
class lowerCAmelCase ( __UpperCamelCase ):
'''simple docstring'''
snake_case = 'table-transformer'
snake_case = ['past_key_values']
snake_case = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self : Any , __snake_case : List[Any]=True , __snake_case : List[str]=None , __snake_case : Tuple=3 , __snake_case : Optional[int]=100 , __snake_case : Tuple=6 , __snake_case : List[Any]=2048 , __snake_case : Optional[Any]=8 , __snake_case : Tuple=6 , __snake_case : Optional[int]=2048 , __snake_case : Optional[Any]=8 , __snake_case : Tuple=0.0 , __snake_case : Dict=0.0 , __snake_case : int=True , __snake_case : int="relu" , __snake_case : Dict=256 , __snake_case : Any=0.1 , __snake_case : Optional[int]=0.0 , __snake_case : Optional[Any]=0.0 , __snake_case : Any=0.02 , __snake_case : int=1.0 , __snake_case : List[str]=False , __snake_case : Optional[Any]="sine" , __snake_case : Optional[Any]="resnet50" , __snake_case : Tuple=True , __snake_case : List[str]=False , __snake_case : List[Any]=1 , __snake_case : List[str]=5 , __snake_case : List[Any]=2 , __snake_case : Optional[Any]=1 , __snake_case : str=1 , __snake_case : Union[str, Any]=5 , __snake_case : Union[str, Any]=2 , __snake_case : Union[str, Any]=0.1 , **__snake_case : Union[str, Any] , ) -> Optional[Any]:
'''simple docstring'''
if backbone_config is not None and use_timm_backbone:
raise ValueError('You can\'t specify both `backbone_config` and `use_timm_backbone`.' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.' )
lowerCamelCase = CONFIG_MAPPING['resnet'](out_features=['stage4'] )
elif isinstance(__snake_case , __snake_case ):
lowerCamelCase = backbone_config.get('model_type' )
lowerCamelCase = CONFIG_MAPPING[backbone_model_type]
lowerCamelCase = config_class.from_dict(__snake_case )
# set timm attributes to None
lowerCamelCase , lowerCamelCase , lowerCamelCase = None, None, None
lowerCamelCase = use_timm_backbone
lowerCamelCase = backbone_config
lowerCamelCase = num_channels
lowerCamelCase = num_queries
lowerCamelCase = d_model
lowerCamelCase = encoder_ffn_dim
lowerCamelCase = encoder_layers
lowerCamelCase = encoder_attention_heads
lowerCamelCase = decoder_ffn_dim
lowerCamelCase = decoder_layers
lowerCamelCase = decoder_attention_heads
lowerCamelCase = dropout
lowerCamelCase = attention_dropout
lowerCamelCase = activation_dropout
lowerCamelCase = activation_function
lowerCamelCase = init_std
lowerCamelCase = init_xavier_std
lowerCamelCase = encoder_layerdrop
lowerCamelCase = decoder_layerdrop
lowerCamelCase = encoder_layers
lowerCamelCase = auxiliary_loss
lowerCamelCase = position_embedding_type
lowerCamelCase = backbone
lowerCamelCase = use_pretrained_backbone
lowerCamelCase = dilation
# Hungarian matcher
lowerCamelCase = class_cost
lowerCamelCase = bbox_cost
lowerCamelCase = giou_cost
# Loss coefficients
lowerCamelCase = mask_loss_coefficient
lowerCamelCase = dice_loss_coefficient
lowerCamelCase = bbox_loss_coefficient
lowerCamelCase = giou_loss_coefficient
lowerCamelCase = eos_coefficient
super().__init__(is_encoder_decoder=__snake_case , **__snake_case )
@property
def lowerCamelCase__ ( self : Dict ) -> int:
'''simple docstring'''
return self.encoder_attention_heads
@property
def lowerCamelCase__ ( self : str ) -> int:
'''simple docstring'''
return self.d_model
class lowerCAmelCase ( __UpperCamelCase ):
'''simple docstring'''
snake_case = version.parse('1.11' )
@property
def lowerCamelCase__ ( self : List[str] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
('pixel_mask', {0: 'batch'}),
] )
@property
def lowerCamelCase__ ( self : Optional[Any] ) -> float:
'''simple docstring'''
return 1e-5
@property
def lowerCamelCase__ ( self : Optional[Any] ) -> int:
'''simple docstring'''
return 12
| 246 | 1 |
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
if is_torch_available():
import torch
from transformers import XLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_torch
class UpperCamelCase__( unittest.TestCase ):
@slow
def snake_case__ ( self ) -> int:
A__ = XLMRobertaModel.from_pretrained('xlm-roberta-base' )
A__ = torch.tensor([[0, 5_81, 1_02_69, 83, 9_99_42, 1_36, 6_07_42, 23, 70, 8_05_83, 1_82_76, 2]] )
# The dog is cute and lives in the garden house
A__ = torch.Size((1, 12, 7_68) ) # batch_size, sequence_length, embedding_vector_dim
A__ = torch.tensor(
[[-0.0_1_0_1, 0.1_2_1_8, -0.0_8_0_3, 0.0_8_0_1, 0.1_3_2_7, 0.0_7_7_6, -0.1_2_1_5, 0.2_3_8_3, 0.3_3_3_8, 0.3_1_0_6, 0.0_3_0_0, 0.0_2_5_2]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
A__ = model(_UpperCamelCase )["""last_hidden_state"""].detach()
self.assertEqual(output.shape ,_UpperCamelCase )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] ,_UpperCamelCase ,atol=1e-3 ) )
@slow
def snake_case__ ( self ) -> Optional[Any]:
A__ = XLMRobertaModel.from_pretrained('xlm-roberta-large' )
A__ = torch.tensor([[0, 5_81, 1_02_69, 83, 9_99_42, 1_36, 6_07_42, 23, 70, 8_05_83, 1_82_76, 2]] )
# The dog is cute and lives in the garden house
A__ = torch.Size((1, 12, 10_24) ) # batch_size, sequence_length, embedding_vector_dim
A__ = torch.tensor(
[[-0.0_6_9_9, -0.0_3_1_8, 0.0_7_0_5, -0.1_2_4_1, 0.0_9_9_9, -0.0_5_2_0, 0.1_0_0_4, -0.1_8_3_8, -0.4_7_0_4, 0.1_4_3_7, 0.0_8_2_1, 0.0_1_2_6]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
A__ = model(_UpperCamelCase )["""last_hidden_state"""].detach()
self.assertEqual(output.shape ,_UpperCamelCase )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] ,_UpperCamelCase ,atol=1e-3 ) )
| 715 | """simple docstring"""
import unittest
from transformers.testing_utils import require_bsa
from transformers.utils import is_bsa_available
from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin
if is_bsa_available():
from transformers import MarkupLMFeatureExtractor
class UpperCamelCase__( unittest.TestCase ):
def __init__( self ,__UpperCAmelCase ) -> str:
A__ = parent
def snake_case__ ( self ) -> int:
return {}
def UpperCAmelCase ( ):
"""simple docstring"""
A__ = '<HTML>\n\n <HEAD>\n <TITLE>sample document</TITLE>\n </HEAD>\n\n <BODY BGCOLOR="FFFFFF">\n <HR>\n <a href="http://google.com">Goog</a>\n <H1>This is one header</H1>\n <H2>This is a another Header</H2>\n <P>Travel from\n <P>\n <B>SFO to JFK</B>\n <BR>\n <B><I>on May 2, 2015 at 2:00 pm. For details go to confirm.com </I></B>\n <HR>\n <div style="color:#0000FF">\n <h3>Traveler <b> name </b> is\n <p> John Doe </p>\n </div>'
A__ = '\n <!DOCTYPE html>\n <html>\n <body>\n\n <h1>My First Heading</h1>\n <p>My first paragraph.</p>\n\n </body>\n </html>\n '
return [html_string_a, html_string_a]
@require_bsa
class UpperCamelCase__( __A , unittest.TestCase ):
lowerCAmelCase__ : Dict = MarkupLMFeatureExtractor if is_bsa_available() else None
def snake_case__ ( self ) -> Dict:
A__ = MarkupLMFeatureExtractionTester(self )
@property
def snake_case__ ( self ) -> Optional[Any]:
return self.feature_extract_tester.prepare_feat_extract_dict()
def snake_case__ ( self ) -> Any:
# Initialize feature_extractor
A__ = self.feature_extraction_class()
# Test not batched input
A__ = get_html_strings()[0]
A__ = feature_extractor(__UpperCAmelCase )
# fmt: off
A__ = [['sample document', 'Goog', 'This is one header', 'This is a another Header', 'Travel from', 'SFO to JFK', 'on May 2, 2015 at 2:00 pm. For details go to confirm.com', 'Traveler', 'name', 'is', 'John Doe']]
A__ = [['/html/head/title', '/html/body/a', '/html/body/h1', '/html/body/h2', '/html/body/p', '/html/body/p/p/b[1]', '/html/body/p/p/b[2]/i', '/html/body/p/p/div/h3', '/html/body/p/p/div/h3/b', '/html/body/p/p/div/h3', '/html/body/p/p/div/h3/p']]
# fmt: on
self.assertEqual(encoding.nodes ,__UpperCAmelCase )
self.assertEqual(encoding.xpaths ,__UpperCAmelCase )
# Test batched
A__ = get_html_strings()
A__ = feature_extractor(__UpperCAmelCase )
# fmt: off
A__ = expected_nodes + [['My First Heading', 'My first paragraph.']]
A__ = expected_xpaths + [['/html/body/h1', '/html/body/p']]
self.assertEqual(len(encoding.nodes ) ,2 )
self.assertEqual(len(encoding.xpaths ) ,2 )
self.assertEqual(encoding.nodes ,__UpperCAmelCase )
self.assertEqual(encoding.xpaths ,__UpperCAmelCase )
| 536 | 0 |
"""simple docstring"""
import argparse
import pathlib
import fairseq
import torch
from fairseq.models.roberta import RobertaModel as FairseqRobertaModel
from fairseq.modules import TransformerSentenceEncoderLayer
from packaging import version
from transformers import XLMRobertaConfig, XLMRobertaXLForMaskedLM, XLMRobertaXLForSequenceClassification
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.models.roberta.modeling_roberta import RobertaAttention
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse('1.0.0a'):
raise Exception('requires fairseq >= 1.0.0a')
logging.set_verbosity_info()
_snake_case = logging.get_logger(__name__)
_snake_case = 'Hello world! cécé herlolip'
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
_a : int = FairseqRobertaModel.from_pretrained(UpperCamelCase__ )
roberta.eval() # disable dropout
_a : Tuple = roberta.model.encoder.sentence_encoder
_a : int = XLMRobertaConfig(
vocab_size=roberta_sent_encoder.embed_tokens.num_embeddings , hidden_size=roberta.cfg.model.encoder_embed_dim , num_hidden_layers=roberta.cfg.model.encoder_layers , num_attention_heads=roberta.cfg.model.encoder_attention_heads , intermediate_size=roberta.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=5_1_4 , type_vocab_size=1 , layer_norm_eps=1e-5 , )
if classification_head:
_a : Any = roberta.model.classification_heads["""mnli"""].out_proj.weight.shape[0]
print("""Our RoBERTa config:""" , UpperCamelCase__ )
_a : Union[str, Any] = XLMRobertaXLForSequenceClassification(UpperCamelCase__ ) if classification_head else XLMRobertaXLForMaskedLM(UpperCamelCase__ )
model.eval()
# Now let's copy all the weights.
# Embeddings
_a : List[Any] = roberta_sent_encoder.embed_tokens.weight
_a : Optional[int] = roberta_sent_encoder.embed_positions.weight
_a : List[str] = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c RoBERTa doesn't use them.
_a : List[str] = roberta_sent_encoder.layer_norm.weight
_a : Dict = roberta_sent_encoder.layer_norm.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
_a : BertLayer = model.roberta.encoder.layer[i]
_a : TransformerSentenceEncoderLayer = roberta_sent_encoder.layers[i]
_a : RobertaAttention = layer.attention
_a : List[str] = roberta_layer.self_attn_layer_norm.weight
_a : List[str] = roberta_layer.self_attn_layer_norm.bias
# self attention
_a : BertSelfAttention = layer.attention.self
assert (
roberta_layer.self_attn.k_proj.weight.data.shape
== roberta_layer.self_attn.q_proj.weight.data.shape
== roberta_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
)
_a : Union[str, Any] = roberta_layer.self_attn.q_proj.weight
_a : Any = roberta_layer.self_attn.q_proj.bias
_a : int = roberta_layer.self_attn.k_proj.weight
_a : Any = roberta_layer.self_attn.k_proj.bias
_a : Any = roberta_layer.self_attn.v_proj.weight
_a : Any = roberta_layer.self_attn.v_proj.bias
# self-attention output
_a : BertSelfOutput = layer.attention.output
assert self_output.dense.weight.shape == roberta_layer.self_attn.out_proj.weight.shape
_a : str = roberta_layer.self_attn.out_proj.weight
_a : Tuple = roberta_layer.self_attn.out_proj.bias
# this one is final layer norm
_a : int = roberta_layer.final_layer_norm.weight
_a : Optional[int] = roberta_layer.final_layer_norm.bias
# intermediate
_a : BertIntermediate = layer.intermediate
assert intermediate.dense.weight.shape == roberta_layer.fca.weight.shape
_a : List[Any] = roberta_layer.fca.weight
_a : Union[str, Any] = roberta_layer.fca.bias
# output
_a : BertOutput = layer.output
assert bert_output.dense.weight.shape == roberta_layer.fca.weight.shape
_a : Tuple = roberta_layer.fca.weight
_a : Any = roberta_layer.fca.bias
# end of layer
if classification_head:
_a : Dict = roberta.model.classification_heads["""mnli"""].dense.weight
_a : Optional[int] = roberta.model.classification_heads["""mnli"""].dense.bias
_a : Optional[int] = roberta.model.classification_heads["""mnli"""].out_proj.weight
_a : List[Any] = roberta.model.classification_heads["""mnli"""].out_proj.bias
else:
# LM Head
_a : Union[str, Any] = roberta.model.encoder.lm_head.dense.weight
_a : Tuple = roberta.model.encoder.lm_head.dense.bias
_a : Dict = roberta.model.encoder.lm_head.layer_norm.weight
_a : List[str] = roberta.model.encoder.lm_head.layer_norm.bias
_a : List[str] = roberta.model.encoder.lm_head.weight
_a : int = roberta.model.encoder.lm_head.bias
# Let's check that we get the same results.
_a : torch.Tensor = roberta.encode(UpperCamelCase__ ).unsqueeze(0 ) # batch of size 1
_a : List[str] = model(UpperCamelCase__ )[0]
if classification_head:
_a : str = roberta.model.classification_heads["""mnli"""](roberta.extract_features(UpperCamelCase__ ) )
else:
_a : Optional[Any] = roberta.model(UpperCamelCase__ )[0]
print(our_output.shape , their_output.shape )
_a : Optional[int] = torch.max(torch.abs(our_output - their_output ) ).item()
print(F"""max_absolute_diff = {max_absolute_diff}""" ) # ~ 1e-7
_a : List[Any] = torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-3 )
print("""Do both models output the same tensors?""" , """🔥""" if success else """💩""" )
if not success:
raise Exception("""Something went wRoNg""" )
pathlib.Path(UpperCamelCase__ ).mkdir(parents=UpperCamelCase__ , exist_ok=UpperCamelCase__ )
print(F"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(UpperCamelCase__ )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--roberta_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--classification_head', action='store_true', help='Whether to convert a final classification head.'
)
_snake_case = parser.parse_args()
convert_xlm_roberta_xl_checkpoint_to_pytorch(
args.roberta_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 389 |
"""simple docstring"""
from collections.abc import Iterable
from typing import Any
class UpperCamelCase :
def __init__( self : str , UpperCAmelCase__ : int | None = None ) -> Tuple:
_a : List[str] = value
_a : Node | None = None # Added in order to delete a node easier
_a : Node | None = None
_a : Node | None = None
def __repr__( self : Any ) -> str:
from pprint import pformat
if self.left is None and self.right is None:
return str(self.value )
return pformat({f"""{self.value}""": (self.left, self.right)} , indent=1 )
class UpperCamelCase :
def __init__( self : Optional[Any] , UpperCAmelCase__ : Node | None = None ) -> Any:
_a : Tuple = root
def __str__( self : Any ) -> str:
return str(self.root )
def _lowercase ( self : List[str] , UpperCAmelCase__ : Node , UpperCAmelCase__ : Node | None ) -> None:
if new_children is not None: # reset its kids
_a : Optional[Any] = node.parent
if node.parent is not None: # reset its parent
if self.is_right(UpperCAmelCase__ ): # If it is the right children
_a : List[Any] = new_children
else:
_a : Tuple = new_children
else:
_a : Any = new_children
def _lowercase ( self : List[str] , UpperCAmelCase__ : Node ) -> bool:
if node.parent and node.parent.right:
return node == node.parent.right
return False
def _lowercase ( self : str ) -> bool:
return self.root is None
def _lowercase ( self : Tuple , UpperCAmelCase__ : Optional[int] ) -> None:
_a : Tuple = Node(UpperCAmelCase__ ) # create a new Node
if self.empty(): # if Tree is empty
_a : Optional[Any] = new_node # set its root
else: # Tree is not empty
_a : Tuple = self.root # from root
if parent_node is None:
return
while True: # While we don't get to a leaf
if value < parent_node.value: # We go left
if parent_node.left is None:
_a : Optional[Any] = new_node # We insert the new node in a leaf
break
else:
_a : Optional[Any] = parent_node.left
else:
if parent_node.right is None:
_a : Union[str, Any] = new_node
break
else:
_a : int = parent_node.right
_a : Any = parent_node
def _lowercase ( self : Optional[Any] , *UpperCAmelCase__ : Optional[Any] ) -> None:
for value in values:
self.__insert(UpperCAmelCase__ )
def _lowercase ( self : Optional[Any] , UpperCAmelCase__ : List[str] ) -> Node | None:
if self.empty():
raise IndexError("""Warning: Tree is empty! please use another.""" )
else:
_a : Any = self.root
# use lazy evaluation here to avoid NoneType Attribute error
while node is not None and node.value is not value:
_a : int = node.left if value < node.value else node.right
return node
def _lowercase ( self : Optional[int] , UpperCAmelCase__ : Node | None = None ) -> Node | None:
if node is None:
if self.root is None:
return None
_a : Optional[Any] = self.root
if not self.empty():
while node.right is not None:
_a : Union[str, Any] = node.right
return node
def _lowercase ( self : Optional[Any] , UpperCAmelCase__ : Node | None = None ) -> Node | None:
if node is None:
_a : Union[str, Any] = self.root
if self.root is None:
return None
if not self.empty():
_a : Optional[Any] = self.root
while node.left is not None:
_a : List[str] = node.left
return node
def _lowercase ( self : int , UpperCAmelCase__ : int ) -> None:
_a : Tuple = self.search(UpperCAmelCase__ ) # Look for the node with that label
if node is not None:
if node.left is None and node.right is None: # If it has no children
self.__reassign_nodes(UpperCAmelCase__ , UpperCAmelCase__ )
elif node.left is None: # Has only right children
self.__reassign_nodes(UpperCAmelCase__ , node.right )
elif node.right is None: # Has only left children
self.__reassign_nodes(UpperCAmelCase__ , node.left )
else:
_a : int = self.get_max(
node.left ) # Gets the max value of the left branch
self.remove(tmp_node.value ) # type: ignore
_a : Union[str, Any] = (
tmp_node.value # type: ignore
) # Assigns the value to the node to delete and keep tree structure
def _lowercase ( self : Optional[Any] , UpperCAmelCase__ : Node | None ) -> Iterable:
if node is not None:
yield node # Preorder Traversal
yield from self.preorder_traverse(node.left )
yield from self.preorder_traverse(node.right )
def _lowercase ( self : List[Any] , UpperCAmelCase__ : List[Any]=None ) -> Any:
if traversal_function is None:
return self.preorder_traverse(self.root )
else:
return traversal_function(self.root )
def _lowercase ( self : List[str] , UpperCAmelCase__ : list , UpperCAmelCase__ : Node | None ) -> None:
if node:
self.inorder(UpperCAmelCase__ , node.left )
arr.append(node.value )
self.inorder(UpperCAmelCase__ , node.right )
def _lowercase ( self : Union[str, Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : Node ) -> int:
_a : list[int] = []
self.inorder(UpperCAmelCase__ , UpperCAmelCase__ ) # append all values to list using inorder traversal
return arr[k - 1]
def lowerCAmelCase__ ( UpperCamelCase__ ):
'''simple docstring'''
_a : Union[str, Any] = []
if curr_node is not None:
_a : Tuple = postorder(curr_node.left ) + postorder(curr_node.right ) + [curr_node]
return node_list
def lowerCAmelCase__ ( ):
'''simple docstring'''
_a : int = (8, 3, 6, 1, 1_0, 1_4, 1_3, 4, 7)
_a : List[Any] = BinarySearchTree()
for i in testlist:
t.insert(UpperCamelCase__ )
# Prints all the elements of the list in order traversal
print(UpperCamelCase__ )
if t.search(6 ) is not None:
print("""The value 6 exists""" )
else:
print("""The value 6 doesn't exist""" )
if t.search(-1 ) is not None:
print("""The value -1 exists""" )
else:
print("""The value -1 doesn't exist""" )
if not t.empty():
print("""Max Value: """ , t.get_max().value ) # type: ignore
print("""Min Value: """ , t.get_min().value ) # type: ignore
for i in testlist:
t.remove(UpperCamelCase__ )
print(UpperCamelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 389 | 1 |
from typing import List
from .keymap import KEYMAP, get_character
def _lowercase ( SCREAMING_SNAKE_CASE_ : str ):
"""simple docstring"""
def decorator(SCREAMING_SNAKE_CASE_ : Any ):
UpperCamelCase = getattr(_A , """handle_key""" , [] )
handle += [key]
setattr(_A , """handle_key""" , _A )
return func
return decorator
def _lowercase ( *SCREAMING_SNAKE_CASE_ : List[str] ):
"""simple docstring"""
def decorator(SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
UpperCamelCase = getattr(_A , """handle_key""" , [] )
handle += keys
setattr(_A , """handle_key""" , _A )
return func
return decorator
class UpperCAmelCase ( lowercase__ ):
def __new__( cls : Tuple , __magic_name__ : List[str] , __magic_name__ : Optional[Any] , __magic_name__ : Optional[int] ):
"""simple docstring"""
UpperCamelCase = super().__new__(cls , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
if not hasattr(__lowerCamelCase , """key_handler""" ):
setattr(__lowerCamelCase , """key_handler""" , {} )
setattr(__lowerCamelCase , """handle_input""" , KeyHandler.handle_input )
for value in attrs.values():
UpperCamelCase = getattr(__lowerCamelCase , """handle_key""" , [] )
for key in handled_keys:
UpperCamelCase = value
return new_cls
@staticmethod
def lowerCamelCase_ ( cls : Optional[int] ):
"""simple docstring"""
UpperCamelCase = get_character()
if char != KEYMAP["undefined"]:
UpperCamelCase = ord(__lowerCamelCase )
UpperCamelCase = cls.key_handler.get(__lowerCamelCase )
if handler:
UpperCamelCase = char
return handler(cls )
else:
return None
def _lowercase ( cls : int ):
"""simple docstring"""
return KeyHandler(cls.__name__ , cls.__bases__ , cls.__dict__.copy() )
| 705 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class UpperCAmelCase ( __snake_case , unittest.TestCase ):
lowercase = KandinskyInpaintPipeline
lowercase = ["""prompt""", """image_embeds""", """negative_image_embeds""", """image""", """mask_image"""]
lowercase = [
"""prompt""",
"""negative_prompt""",
"""image_embeds""",
"""negative_image_embeds""",
"""image""",
"""mask_image""",
]
lowercase = [
"""generator""",
"""height""",
"""width""",
"""latents""",
"""guidance_scale""",
"""negative_prompt""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
lowercase = False
@property
def lowerCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
return 3_2
@property
def lowerCamelCase_ ( self : Optional[Any] ):
"""simple docstring"""
return 3_2
@property
def lowerCamelCase_ ( self : List[Any] ):
"""simple docstring"""
return self.time_input_dim
@property
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
return self.time_input_dim * 4
@property
def lowerCamelCase_ ( self : str ):
"""simple docstring"""
return 1_0_0
@property
def lowerCamelCase_ ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase = XLMRobertaTokenizerFast.from_pretrained("""YiYiXu/tiny-random-mclip-base""" )
return tokenizer
@property
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
torch.manual_seed(0 )
UpperCamelCase = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=3_7 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1_0_0_5 , )
UpperCamelCase = MultilingualCLIP(__magic_name__ )
UpperCamelCase = text_encoder.eval()
return text_encoder
@property
def lowerCamelCase_ ( self : List[str] ):
"""simple docstring"""
torch.manual_seed(0 )
UpperCamelCase = {
"""in_channels""": 9,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """text_image""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """text_image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
UpperCamelCase = UNetaDConditionModel(**__magic_name__ )
return model
@property
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
return {
"block_out_channels": [3_2, 6_4],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 1_2,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def lowerCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
torch.manual_seed(0 )
UpperCamelCase = VQModel(**self.dummy_movq_kwargs )
return model
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase = self.dummy_text_encoder
UpperCamelCase = self.dummy_tokenizer
UpperCamelCase = self.dummy_unet
UpperCamelCase = self.dummy_movq
UpperCamelCase = DDIMScheduler(
num_train_timesteps=1_0_0_0 , beta_schedule="""linear""" , beta_start=0.00_085 , beta_end=0.012 , clip_sample=__magic_name__ , set_alpha_to_one=__magic_name__ , steps_offset=1 , prediction_type="""epsilon""" , thresholding=__magic_name__ , )
UpperCamelCase = {
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def lowerCamelCase_ ( self : Tuple , __magic_name__ : Tuple , __magic_name__ : Optional[int]=0 ):
"""simple docstring"""
UpperCamelCase = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(__magic_name__ ) ).to(__magic_name__ )
UpperCamelCase = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(__magic_name__ )
# create init_image
UpperCamelCase = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(__magic_name__ ) ).to(__magic_name__ )
UpperCamelCase = image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCamelCase = Image.fromarray(np.uinta(__magic_name__ ) ).convert("""RGB""" ).resize((2_5_6, 2_5_6) )
# create mask
UpperCamelCase = np.ones((6_4, 6_4) , dtype=np.floataa )
UpperCamelCase = 0
if str(__magic_name__ ).startswith("""mps""" ):
UpperCamelCase = torch.manual_seed(__magic_name__ )
else:
UpperCamelCase = torch.Generator(device=__magic_name__ ).manual_seed(__magic_name__ )
UpperCamelCase = {
"""prompt""": """horse""",
"""image""": init_image,
"""mask_image""": mask,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""generator""": generator,
"""height""": 6_4,
"""width""": 6_4,
"""num_inference_steps""": 2,
"""guidance_scale""": 4.0,
"""output_type""": """np""",
}
return inputs
def lowerCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase = """cpu"""
UpperCamelCase = self.get_dummy_components()
UpperCamelCase = self.pipeline_class(**__magic_name__ )
UpperCamelCase = pipe.to(__magic_name__ )
pipe.set_progress_bar_config(disable=__magic_name__ )
UpperCamelCase = pipe(**self.get_dummy_inputs(__magic_name__ ) )
UpperCamelCase = output.images
UpperCamelCase = pipe(
**self.get_dummy_inputs(__magic_name__ ) , return_dict=__magic_name__ , )[0]
UpperCamelCase = image[0, -3:, -3:, -1]
UpperCamelCase = image_from_tuple[0, -3:, -3:, -1]
print(F'image.shape {image.shape}' )
assert image.shape == (1, 6_4, 6_4, 3)
UpperCamelCase = np.array(
[0.8_326_919, 0.73_790_467, 0.20_918_581, 0.9_309_612, 0.5_511_791, 0.43_713_328, 0.5_513_321, 0.49_922_934, 0.59_497_786] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F' expected_slice {expected_slice}, but got {image_slice.flatten()}'
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class UpperCAmelCase ( unittest.TestCase ):
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy""" )
UpperCamelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" )
UpperCamelCase = np.ones((7_6_8, 7_6_8) , dtype=np.floataa )
UpperCamelCase = 0
UpperCamelCase = """a hat"""
UpperCamelCase = KandinskyPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-1-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(__magic_name__ )
UpperCamelCase = KandinskyInpaintPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-1-inpaint""" , torch_dtype=torch.floataa )
UpperCamelCase = pipeline.to(__magic_name__ )
pipeline.set_progress_bar_config(disable=__magic_name__ )
UpperCamelCase = torch.Generator(device="""cpu""" ).manual_seed(0 )
UpperCamelCase , UpperCamelCase = pipe_prior(
__magic_name__ , generator=__magic_name__ , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple()
UpperCamelCase = pipeline(
__magic_name__ , image=__magic_name__ , mask_image=__magic_name__ , image_embeds=__magic_name__ , negative_image_embeds=__magic_name__ , generator=__magic_name__ , num_inference_steps=1_0_0 , height=7_6_8 , width=7_6_8 , output_type="""np""" , )
UpperCamelCase = output.images[0]
assert image.shape == (7_6_8, 7_6_8, 3)
assert_mean_pixel_difference(__magic_name__ , __magic_name__ )
| 181 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowercase : str = logging.get_logger(__name__)
__lowercase : Optional[int] = {
"""microsoft/beit-base-patch16-224-pt22k""": (
"""https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json"""
),
# See all BEiT models at https://huggingface.co/models?filter=beit
}
class lowerCAmelCase ( a ):
"""simple docstring"""
__lowercase :int = "beit"
def __init__( self , UpperCamelCase__=8_192 , UpperCamelCase__=768 , UpperCamelCase__=12 , UpperCamelCase__=12 , UpperCamelCase__=3_072 , UpperCamelCase__="gelu" , UpperCamelCase__=0.0 , UpperCamelCase__=0.0 , UpperCamelCase__=0.02 , UpperCamelCase__=1e-12 , UpperCamelCase__=224 , UpperCamelCase__=16 , UpperCamelCase__=3 , UpperCamelCase__=False , UpperCamelCase__=False , UpperCamelCase__=False , UpperCamelCase__=False , UpperCamelCase__=0.1 , UpperCamelCase__=0.1 , UpperCamelCase__=True , UpperCamelCase__=[3, 5, 7, 11] , UpperCamelCase__=[1, 2, 3, 6] , UpperCamelCase__=True , UpperCamelCase__=0.4 , UpperCamelCase__=256 , UpperCamelCase__=1 , UpperCamelCase__=False , UpperCamelCase__=255 , **UpperCamelCase__ , ) -> List[str]:
'''simple docstring'''
super().__init__(**UpperCamelCase__ )
lowerCamelCase_ = vocab_size
lowerCamelCase_ = hidden_size
lowerCamelCase_ = num_hidden_layers
lowerCamelCase_ = num_attention_heads
lowerCamelCase_ = intermediate_size
lowerCamelCase_ = hidden_act
lowerCamelCase_ = hidden_dropout_prob
lowerCamelCase_ = attention_probs_dropout_prob
lowerCamelCase_ = initializer_range
lowerCamelCase_ = layer_norm_eps
lowerCamelCase_ = image_size
lowerCamelCase_ = patch_size
lowerCamelCase_ = num_channels
lowerCamelCase_ = use_mask_token
lowerCamelCase_ = use_absolute_position_embeddings
lowerCamelCase_ = use_relative_position_bias
lowerCamelCase_ = use_shared_relative_position_bias
lowerCamelCase_ = layer_scale_init_value
lowerCamelCase_ = drop_path_rate
lowerCamelCase_ = use_mean_pooling
# decode head attributes (semantic segmentation)
lowerCamelCase_ = out_indices
lowerCamelCase_ = pool_scales
# auxiliary head attributes (semantic segmentation)
lowerCamelCase_ = use_auxiliary_head
lowerCamelCase_ = auxiliary_loss_weight
lowerCamelCase_ = auxiliary_channels
lowerCamelCase_ = auxiliary_num_convs
lowerCamelCase_ = auxiliary_concat_input
lowerCamelCase_ = semantic_loss_ignore_index
class lowerCAmelCase ( a ):
"""simple docstring"""
__lowercase :Dict = version.parse("1.11" )
@property
def _lowerCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def _lowerCAmelCase ( self ) -> float:
'''simple docstring'''
return 1e-4 | 142 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_beit import BeitImageProcessor
__lowercase : List[str] = logging.get_logger(__name__)
class lowerCAmelCase ( a ):
"""simple docstring"""
def __init__( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> None:
'''simple docstring'''
warnings.warn(
'''The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use BeitImageProcessor instead.''' , UpperCamelCase__ , )
super().__init__(*UpperCamelCase__ , **UpperCamelCase__ ) | 142 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__A ={
'configuration_megatron_bert': ['MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MegatronBertConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =[
'MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'MegatronBertForCausalLM',
'MegatronBertForMaskedLM',
'MegatronBertForMultipleChoice',
'MegatronBertForNextSentencePrediction',
'MegatronBertForPreTraining',
'MegatronBertForQuestionAnswering',
'MegatronBertForSequenceClassification',
'MegatronBertForTokenClassification',
'MegatronBertModel',
'MegatronBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_megatron_bert import MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, MegatronBertConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_megatron_bert import (
MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
MegatronBertPreTrainedModel,
)
else:
import sys
__A =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 702 |
def a ( _UpperCAmelCase : list[int] , _UpperCAmelCase : list[int] ):
'''simple docstring'''
__UpperCAmelCase : Dict = len(_UpperCAmelCase )
print('''The following activities are selected:''' )
# The first activity is always selected
__UpperCAmelCase : str = 0
print(_UpperCAmelCase , end=''',''' )
# Consider rest of the activities
for j in range(_UpperCAmelCase ):
# If this activity has start time greater than
# or equal to the finish time of previously
# selected activity, then select it
if start[j] >= finish[i]:
print(_UpperCAmelCase , end=''',''' )
__UpperCAmelCase : int = j
if __name__ == "__main__":
import doctest
doctest.testmod()
__A =[1, 3, 0, 5, 8, 5]
__A =[2, 4, 6, 7, 9, 9]
print_max_activities(start, finish)
| 241 | 0 |
"""simple docstring"""
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all feature extractors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...feature_extraction_utils import FeatureExtractionMixin
from ...utils import CONFIG_NAME, FEATURE_EXTRACTOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
__A : Union[str, Any] = logging.get_logger(__name__)
__A : List[str] = OrderedDict(
[
('''audio-spectrogram-transformer''', '''ASTFeatureExtractor'''),
('''beit''', '''BeitFeatureExtractor'''),
('''chinese_clip''', '''ChineseCLIPFeatureExtractor'''),
('''clap''', '''ClapFeatureExtractor'''),
('''clip''', '''CLIPFeatureExtractor'''),
('''clipseg''', '''ViTFeatureExtractor'''),
('''conditional_detr''', '''ConditionalDetrFeatureExtractor'''),
('''convnext''', '''ConvNextFeatureExtractor'''),
('''cvt''', '''ConvNextFeatureExtractor'''),
('''data2vec-audio''', '''Wav2Vec2FeatureExtractor'''),
('''data2vec-vision''', '''BeitFeatureExtractor'''),
('''deformable_detr''', '''DeformableDetrFeatureExtractor'''),
('''deit''', '''DeiTFeatureExtractor'''),
('''detr''', '''DetrFeatureExtractor'''),
('''dinat''', '''ViTFeatureExtractor'''),
('''donut-swin''', '''DonutFeatureExtractor'''),
('''dpt''', '''DPTFeatureExtractor'''),
('''encodec''', '''EncodecFeatureExtractor'''),
('''flava''', '''FlavaFeatureExtractor'''),
('''glpn''', '''GLPNFeatureExtractor'''),
('''groupvit''', '''CLIPFeatureExtractor'''),
('''hubert''', '''Wav2Vec2FeatureExtractor'''),
('''imagegpt''', '''ImageGPTFeatureExtractor'''),
('''layoutlmv2''', '''LayoutLMv2FeatureExtractor'''),
('''layoutlmv3''', '''LayoutLMv3FeatureExtractor'''),
('''levit''', '''LevitFeatureExtractor'''),
('''maskformer''', '''MaskFormerFeatureExtractor'''),
('''mctct''', '''MCTCTFeatureExtractor'''),
('''mobilenet_v1''', '''MobileNetV1FeatureExtractor'''),
('''mobilenet_v2''', '''MobileNetV2FeatureExtractor'''),
('''mobilevit''', '''MobileViTFeatureExtractor'''),
('''nat''', '''ViTFeatureExtractor'''),
('''owlvit''', '''OwlViTFeatureExtractor'''),
('''perceiver''', '''PerceiverFeatureExtractor'''),
('''poolformer''', '''PoolFormerFeatureExtractor'''),
('''regnet''', '''ConvNextFeatureExtractor'''),
('''resnet''', '''ConvNextFeatureExtractor'''),
('''segformer''', '''SegformerFeatureExtractor'''),
('''sew''', '''Wav2Vec2FeatureExtractor'''),
('''sew-d''', '''Wav2Vec2FeatureExtractor'''),
('''speech_to_text''', '''Speech2TextFeatureExtractor'''),
('''speecht5''', '''SpeechT5FeatureExtractor'''),
('''swiftformer''', '''ViTFeatureExtractor'''),
('''swin''', '''ViTFeatureExtractor'''),
('''swinv2''', '''ViTFeatureExtractor'''),
('''table-transformer''', '''DetrFeatureExtractor'''),
('''timesformer''', '''VideoMAEFeatureExtractor'''),
('''tvlt''', '''TvltFeatureExtractor'''),
('''unispeech''', '''Wav2Vec2FeatureExtractor'''),
('''unispeech-sat''', '''Wav2Vec2FeatureExtractor'''),
('''van''', '''ConvNextFeatureExtractor'''),
('''videomae''', '''VideoMAEFeatureExtractor'''),
('''vilt''', '''ViltFeatureExtractor'''),
('''vit''', '''ViTFeatureExtractor'''),
('''vit_mae''', '''ViTFeatureExtractor'''),
('''vit_msn''', '''ViTFeatureExtractor'''),
('''wav2vec2''', '''Wav2Vec2FeatureExtractor'''),
('''wav2vec2-conformer''', '''Wav2Vec2FeatureExtractor'''),
('''wavlm''', '''Wav2Vec2FeatureExtractor'''),
('''whisper''', '''WhisperFeatureExtractor'''),
('''xclip''', '''CLIPFeatureExtractor'''),
('''yolos''', '''YolosFeatureExtractor'''),
]
)
__A : List[Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FEATURE_EXTRACTOR_MAPPING_NAMES)
def A_ ( snake_case_ : str ):
'''simple docstring'''
for module_name, extractors in FEATURE_EXTRACTOR_MAPPING_NAMES.items():
if class_name in extractors:
UpperCamelCase : str = model_type_to_module_name(snake_case_ )
UpperCamelCase : Union[str, Any] = importlib.import_module(f'.{module_name}' ,"""transformers.models""" )
try:
return getattr(snake_case_ ,snake_case_ )
except AttributeError:
continue
for _, extractor in FEATURE_EXTRACTOR_MAPPING._extra_content.items():
if getattr(snake_case_ ,"""__name__""" ,snake_case_ ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
UpperCamelCase : List[Any] = importlib.import_module("""transformers""" )
if hasattr(snake_case_ ,snake_case_ ):
return getattr(snake_case_ ,snake_case_ )
return None
def A_ ( snake_case_ : Union[str, os.PathLike] ,snake_case_ : Optional[Union[str, os.PathLike]] = None ,snake_case_ : bool = False ,snake_case_ : bool = False ,snake_case_ : Optional[Dict[str, str]] = None ,snake_case_ : Optional[Union[bool, str]] = None ,snake_case_ : Optional[str] = None ,snake_case_ : bool = False ,**snake_case_ : str ,):
'''simple docstring'''
UpperCamelCase : List[Any] = get_file_from_repo(
snake_case_ ,snake_case_ ,cache_dir=snake_case_ ,force_download=snake_case_ ,resume_download=snake_case_ ,proxies=snake_case_ ,use_auth_token=snake_case_ ,revision=snake_case_ ,local_files_only=snake_case_ ,)
if resolved_config_file is None:
logger.info(
"""Could not locate the feature extractor configuration file, will try to use the model config instead.""" )
return {}
with open(snake_case_ ,encoding="""utf-8""" ) as reader:
return json.load(snake_case_ )
class lowerCamelCase :
def __init__( self ):
raise EnvironmentError(
"""AutoFeatureExtractor is designed to be instantiated """
"""using the `AutoFeatureExtractor.from_pretrained(pretrained_model_name_or_path)` method.""" )
@classmethod
@replace_list_option_in_docstrings(SCREAMING_SNAKE_CASE_ )
def a_ ( cls , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : Union[str, Any] = kwargs.pop("""config""" , SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Tuple = kwargs.pop("""trust_remote_code""" , SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[int] = True
UpperCamelCase , UpperCamelCase : int = FeatureExtractionMixin.get_feature_extractor_dict(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = config_dict.get("""feature_extractor_type""" , SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Tuple = None
if "AutoFeatureExtractor" in config_dict.get("""auto_map""" , {} ):
UpperCamelCase : Optional[int] = config_dict["""auto_map"""]["""AutoFeatureExtractor"""]
# If we don't find the feature extractor class in the feature extractor config, let's try the model config.
if feature_extractor_class is None and feature_extractor_auto_map is None:
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : Optional[Any] = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
# It could be in `config.feature_extractor_type``
UpperCamelCase : Optional[int] = getattr(SCREAMING_SNAKE_CASE_ , """feature_extractor_type""" , SCREAMING_SNAKE_CASE_ )
if hasattr(SCREAMING_SNAKE_CASE_ , """auto_map""" ) and "AutoFeatureExtractor" in config.auto_map:
UpperCamelCase : Tuple = config.auto_map["""AutoFeatureExtractor"""]
if feature_extractor_class is not None:
UpperCamelCase : str = feature_extractor_class_from_name(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Union[str, Any] = feature_extractor_auto_map is not None
UpperCamelCase : Dict = feature_extractor_class is not None or type(SCREAMING_SNAKE_CASE_ ) in FEATURE_EXTRACTOR_MAPPING
UpperCamelCase : Dict = resolve_trust_remote_code(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if has_remote_code and trust_remote_code:
UpperCamelCase : str = get_class_from_dynamic_module(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = kwargs.pop("""code_revision""" , SCREAMING_SNAKE_CASE_ )
if os.path.isdir(SCREAMING_SNAKE_CASE_ ):
feature_extractor_class.register_for_auto_class()
return feature_extractor_class.from_dict(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
elif feature_extractor_class is not None:
return feature_extractor_class.from_dict(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
# Last try: we use the FEATURE_EXTRACTOR_MAPPING.
elif type(SCREAMING_SNAKE_CASE_ ) in FEATURE_EXTRACTOR_MAPPING:
UpperCamelCase : str = FEATURE_EXTRACTOR_MAPPING[type(SCREAMING_SNAKE_CASE_ )]
return feature_extractor_class.from_dict(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
raise ValueError(
f'Unrecognized feature extractor in {pretrained_model_name_or_path}. Should have a '
f'`feature_extractor_type` key in its {FEATURE_EXTRACTOR_NAME} of {CONFIG_NAME}, or one of the following '
f'`model_type` keys in its {CONFIG_NAME}: {", ".join(c for c in FEATURE_EXTRACTOR_MAPPING_NAMES.keys() )}' )
@staticmethod
def a_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
FEATURE_EXTRACTOR_MAPPING.register(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
| 499 |
"""simple docstring"""
from PIL import Image
def A_ ( snake_case_ : Image ,snake_case_ : int ):
'''simple docstring'''
UpperCamelCase : Optional[int] = (2_5_9 * (level + 2_5_5)) / (2_5_5 * (2_5_9 - level))
def contrast(snake_case_ : int ) -> int:
return int(1_2_8 + factor * (c - 1_2_8) )
return img.point(snake_case_ )
if __name__ == "__main__":
# Load image
with Image.open('''image_data/lena.jpg''') as img:
# Change contrast to 170
__A : Optional[int] = change_contrast(img, 170)
cont_img.save('''image_data/lena_high_contrast.png''', format='''png''')
| 499 | 1 |
from dataclasses import dataclass
from typing import Optional
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .modeling_utils import ModelMixin
@dataclass
class a__ ( _snake_case ):
"""simple docstring"""
A__ : torch.FloatTensor
class a__ ( _snake_case , _snake_case ):
"""simple docstring"""
@register_to_config
def __init__( self :Tuple , lowercase__ :int = 16 , lowercase__ :int = 88 , lowercase__ :Optional[int] = None , lowercase__ :Optional[int] = None , lowercase__ :int = 1 , lowercase__ :float = 0.0 , lowercase__ :int = 32 , lowercase__ :Optional[int] = None , lowercase__ :bool = False , lowercase__ :Optional[int] = None , lowercase__ :str = "geglu" , lowercase__ :bool = True , lowercase__ :bool = True , ):
super().__init__()
lowercase = num_attention_heads
lowercase = attention_head_dim
lowercase = num_attention_heads * attention_head_dim
lowercase = in_channels
lowercase = torch.nn.GroupNorm(num_groups=lowercase__ , num_channels=lowercase__ , eps=1E-6 , affine=lowercase__ )
lowercase = nn.Linear(lowercase__ , lowercase__ )
# 3. Define transformers blocks
lowercase = nn.ModuleList(
[
BasicTransformerBlock(
lowercase__ , lowercase__ , lowercase__ , dropout=lowercase__ , cross_attention_dim=lowercase__ , activation_fn=lowercase__ , attention_bias=lowercase__ , double_self_attention=lowercase__ , norm_elementwise_affine=lowercase__ , )
for d in range(lowercase__ )
] )
lowercase = nn.Linear(lowercase__ , lowercase__ )
def __UpperCAmelCase ( self :Union[str, Any] , lowercase__ :Union[str, Any] , lowercase__ :Any=None , lowercase__ :Union[str, Any]=None , lowercase__ :int=None , lowercase__ :Optional[Any]=1 , lowercase__ :Dict=None , lowercase__ :bool = True , ):
lowercase , lowercase , lowercase , lowercase = hidden_states.shape
lowercase = batch_frames // num_frames
lowercase = hidden_states
lowercase = hidden_states[None, :].reshape(lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
lowercase = hidden_states.permute(0 , 2 , 1 , 3 , 4 )
lowercase = self.norm(lowercase__ )
lowercase = hidden_states.permute(0 , 3 , 4 , 2 , 1 ).reshape(batch_size * height * width , lowercase__ , lowercase__ )
lowercase = self.proj_in(lowercase__ )
# 2. Blocks
for block in self.transformer_blocks:
lowercase = block(
lowercase__ , encoder_hidden_states=lowercase__ , timestep=lowercase__ , cross_attention_kwargs=lowercase__ , class_labels=lowercase__ , )
# 3. Output
lowercase = self.proj_out(lowercase__ )
lowercase = (
hidden_states[None, None, :]
.reshape(lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
.permute(0 , 3 , 4 , 1 , 2 )
.contiguous()
)
lowercase = hidden_states.reshape(lowercase__ , lowercase__ , lowercase__ , lowercase__ )
lowercase = hidden_states + residual
if not return_dict:
return (output,)
return TransformerTemporalModelOutput(sample=lowercase__ )
| 314 |
import argparse
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import GLPNConfig, GLPNForDepthEstimation, GLPNImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__magic_name__ = logging.get_logger(__name__)
def __snake_case ( _UpperCAmelCase ):
"""simple docstring"""
lowercase = OrderedDict()
for key, value in state_dict.items():
if key.startswith('module.encoder' ):
lowercase = key.replace('module.encoder' , 'glpn.encoder' )
if key.startswith('module.decoder' ):
lowercase = key.replace('module.decoder' , 'decoder.stages' )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
lowercase = key[key.find('patch_embed' ) + len('patch_embed' )]
lowercase = key.replace(f"""patch_embed{idx}""" , f"""patch_embeddings.{int(_UpperCAmelCase )-1}""" )
if "norm" in key:
lowercase = key.replace('norm' , 'layer_norm' )
if "glpn.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
lowercase = key[key.find('glpn.encoder.layer_norm' ) + len('glpn.encoder.layer_norm' )]
lowercase = key.replace(f"""layer_norm{idx}""" , f"""layer_norm.{int(_UpperCAmelCase )-1}""" )
if "layer_norm1" in key:
lowercase = key.replace('layer_norm1' , 'layer_norm_1' )
if "layer_norm2" in key:
lowercase = key.replace('layer_norm2' , 'layer_norm_2' )
if "block" in key:
# replace for example block1 by block.0
lowercase = key[key.find('block' ) + len('block' )]
lowercase = key.replace(f"""block{idx}""" , f"""block.{int(_UpperCAmelCase )-1}""" )
if "attn.q" in key:
lowercase = key.replace('attn.q' , 'attention.self.query' )
if "attn.proj" in key:
lowercase = key.replace('attn.proj' , 'attention.output.dense' )
if "attn" in key:
lowercase = key.replace('attn' , 'attention.self' )
if "fc1" in key:
lowercase = key.replace('fc1' , 'dense1' )
if "fc2" in key:
lowercase = key.replace('fc2' , 'dense2' )
if "linear_pred" in key:
lowercase = key.replace('linear_pred' , 'classifier' )
if "linear_fuse" in key:
lowercase = key.replace('linear_fuse.conv' , 'linear_fuse' )
lowercase = key.replace('linear_fuse.bn' , 'batch_norm' )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
lowercase = key[key.find('linear_c' ) + len('linear_c' )]
lowercase = key.replace(f"""linear_c{idx}""" , f"""linear_c.{int(_UpperCAmelCase )-1}""" )
if "bot_conv" in key:
lowercase = key.replace('bot_conv' , '0.convolution' )
if "skip_conv1" in key:
lowercase = key.replace('skip_conv1' , '1.convolution' )
if "skip_conv2" in key:
lowercase = key.replace('skip_conv2' , '2.convolution' )
if "fusion1" in key:
lowercase = key.replace('fusion1' , '1.fusion' )
if "fusion2" in key:
lowercase = key.replace('fusion2' , '2.fusion' )
if "fusion3" in key:
lowercase = key.replace('fusion3' , '3.fusion' )
if "fusion" in key and "conv" in key:
lowercase = key.replace('conv' , 'convolutional_layer' )
if key.startswith('module.last_layer_depth' ):
lowercase = key.replace('module.last_layer_depth' , 'head.head' )
lowercase = value
return new_state_dict
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
lowercase = state_dict.pop(f"""glpn.encoder.block.{i}.{j}.attention.self.kv.weight""" )
lowercase = state_dict.pop(f"""glpn.encoder.block.{i}.{j}.attention.self.kv.bias""" )
# next, add keys and values (in that order) to the state dict
lowercase = kv_weight[
: config.hidden_sizes[i], :
]
lowercase = kv_bias[: config.hidden_sizes[i]]
lowercase = kv_weight[
config.hidden_sizes[i] :, :
]
lowercase = kv_bias[config.hidden_sizes[i] :]
def __snake_case ( ):
"""simple docstring"""
lowercase = 'http://images.cocodataset.org/val2017/000000039769.jpg'
lowercase = Image.open(requests.get(_UpperCAmelCase , stream=_UpperCAmelCase ).raw )
return image
@torch.no_grad()
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=False , _UpperCAmelCase=None ):
"""simple docstring"""
lowercase = GLPNConfig(hidden_sizes=[64, 1_28, 3_20, 5_12] , decoder_hidden_size=64 , depths=[3, 8, 27, 3] )
# load image processor (only resize + rescale)
lowercase = GLPNImageProcessor()
# prepare image
lowercase = prepare_img()
lowercase = image_processor(images=_UpperCAmelCase , return_tensors='pt' ).pixel_values
logger.info('Converting model...' )
# load original state dict
lowercase = torch.load(_UpperCAmelCase , map_location=torch.device('cpu' ) )
# rename keys
lowercase = rename_keys(_UpperCAmelCase )
# key and value matrices need special treatment
read_in_k_v(_UpperCAmelCase , _UpperCAmelCase )
# create HuggingFace model and load state dict
lowercase = GLPNForDepthEstimation(_UpperCAmelCase )
model.load_state_dict(_UpperCAmelCase )
model.eval()
# forward pass
lowercase = model(_UpperCAmelCase )
lowercase = outputs.predicted_depth
# verify output
if model_name is not None:
if "nyu" in model_name:
lowercase = torch.tensor(
[[4.4_147, 4.0_873, 4.0_673], [3.7_890, 3.2_881, 3.1_525], [3.7_674, 3.5_423, 3.4_913]] )
elif "kitti" in model_name:
lowercase = torch.tensor(
[[3.4_291, 2.7_865, 2.5_151], [3.2_841, 2.7_021, 2.3_502], [3.1_147, 2.4_625, 2.2_481]] )
else:
raise ValueError(f"""Unknown model name: {model_name}""" )
lowercase = torch.Size([1, 4_80, 6_40] )
assert predicted_depth.shape == expected_shape
assert torch.allclose(predicted_depth[0, :3, :3] , _UpperCAmelCase , atol=1e-4 )
print('Looks ok!' )
# finally, push to hub if required
if push_to_hub:
logger.info('Pushing model and image processor to the hub...' )
model.push_to_hub(
repo_path_or_name=Path(_UpperCAmelCase , _UpperCAmelCase ) , organization='nielsr' , commit_message='Add model' , use_temp_dir=_UpperCAmelCase , )
image_processor.push_to_hub(
repo_path_or_name=Path(_UpperCAmelCase , _UpperCAmelCase ) , organization='nielsr' , commit_message='Add image processor' , use_temp_dir=_UpperCAmelCase , )
if __name__ == "__main__":
__magic_name__ = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_path''',
default=None,
type=str,
help='''Path to the original PyTorch checkpoint (.pth file).''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether to upload the model to the HuggingFace hub.'''
)
parser.add_argument(
'''--model_name''',
default='''glpn-kitti''',
type=str,
help='''Name of the model in case you\'re pushing to the hub.''',
)
__magic_name__ = parser.parse_args()
convert_glpn_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 314 | 1 |
"""simple docstring"""
def __lowerCamelCase ( a_ : str ) -> bool:
__SCREAMING_SNAKE_CASE :Optional[int] = [int(a_ ) for i in ip_va_address.split('''.''' ) if i.isdigit()]
return len(a_ ) == 4 and all(0 <= int(a_ ) <= 2_54 for octet in octets )
if __name__ == "__main__":
lowerCamelCase_ = input().strip()
lowerCamelCase_ = "valid" if is_ip_va_address_valid(ip) else "invalid"
print(f'{ip} is a {valid_or_invalid} IP v4 address.') | 498 |
"""simple docstring"""
from __future__ import annotations
from dataclasses import dataclass
@dataclass
class _SCREAMING_SNAKE_CASE:
SCREAMING_SNAKE_CASE_ : float
SCREAMING_SNAKE_CASE_ : TreeNode | None = None
SCREAMING_SNAKE_CASE_ : TreeNode | None = None
def __lowerCamelCase ( a_ : TreeNode | None ) -> bool:
# Validation
def is_valid_tree(a_ : TreeNode | None ) -> bool:
if node is None:
return True
if not isinstance(a_ , a_ ):
return False
try:
float(node.data )
except (TypeError, ValueError):
return False
return is_valid_tree(node.left ) and is_valid_tree(node.right )
if not is_valid_tree(a_ ):
raise ValueError(
'''Each node should be type of TreeNode and data should be float.''' )
def is_binary_search_tree_recursive_check(
a_ : TreeNode | None , a_ : float , a_ : float ) -> bool:
if node is None:
return True
return (
left_bound < node.data < right_bound
and is_binary_search_tree_recursive_check(node.left , a_ , node.data )
and is_binary_search_tree_recursive_check(
node.right , node.data , a_ )
)
return is_binary_search_tree_recursive_check(a_ , -float('''inf''' ) , float('''inf''' ) )
if __name__ == "__main__":
import doctest
doctest.testmod() | 498 | 1 |
import argparse
import re
import torch
from CLAP import create_model
from transformers import AutoFeatureExtractor, ClapConfig, ClapModel
_snake_case : Any = {
'text_branch': 'text_model',
'audio_branch': 'audio_model.audio_encoder',
'attn': 'attention.self',
'self.proj': 'output.dense',
'attention.self_mask': 'attn_mask',
'mlp.fc1': 'intermediate.dense',
'mlp.fc2': 'output.dense',
'norm1': 'layernorm_before',
'norm2': 'layernorm_after',
'bn0': 'batch_norm',
}
_snake_case : Any = AutoFeatureExtractor.from_pretrained('laion/clap-htsat-unfused', truncation='rand_trunc')
def _A ( __snake_case :Tuple , __snake_case :str=False ) -> List[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = create_model(
"HTSAT-tiny" , "roberta" , __snake_case , precision="fp32" , device="cuda:0" if torch.cuda.is_available() else "cpu" , enable_fusion=__snake_case , fusion_type="aff_2d" if enable_fusion else None , )
return model, model_cfg
def _A ( __snake_case :Any ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = {}
__SCREAMING_SNAKE_CASE = R".*sequential.(\d+).*"
__SCREAMING_SNAKE_CASE = R".*_projection.(\d+).*"
for key, value in state_dict.items():
# check if any key needs to be modified
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
__SCREAMING_SNAKE_CASE = key.replace(__snake_case , __snake_case )
if re.match(__snake_case , __snake_case ):
# replace sequential layers with list
__SCREAMING_SNAKE_CASE = re.match(__snake_case , __snake_case ).group(1 )
__SCREAMING_SNAKE_CASE = key.replace(f'''sequential.{sequential_layer}.''' , f'''layers.{int(__snake_case )//3}.linear.''' )
elif re.match(__snake_case , __snake_case ):
__SCREAMING_SNAKE_CASE = int(re.match(__snake_case , __snake_case ).group(1 ) )
# Because in CLAP they use `nn.Sequential`...
__SCREAMING_SNAKE_CASE = 1 if projecton_layer == 0 else 2
__SCREAMING_SNAKE_CASE = key.replace(f'''_projection.{projecton_layer}.''' , f'''_projection.linear{transformers_projection_layer}.''' )
if "audio" and "qkv" in key:
# split qkv into query key and value
__SCREAMING_SNAKE_CASE = value
__SCREAMING_SNAKE_CASE = mixed_qkv.size(0 ) // 3
__SCREAMING_SNAKE_CASE = mixed_qkv[:qkv_dim]
__SCREAMING_SNAKE_CASE = mixed_qkv[qkv_dim : qkv_dim * 2]
__SCREAMING_SNAKE_CASE = mixed_qkv[qkv_dim * 2 :]
__SCREAMING_SNAKE_CASE = query_layer
__SCREAMING_SNAKE_CASE = key_layer
__SCREAMING_SNAKE_CASE = value_layer
else:
__SCREAMING_SNAKE_CASE = value
return model_state_dict
def _A ( __snake_case :str , __snake_case :Optional[int] , __snake_case :int , __snake_case :str=False ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = init_clap(__snake_case , enable_fusion=__snake_case )
clap_model.eval()
__SCREAMING_SNAKE_CASE = clap_model.state_dict()
__SCREAMING_SNAKE_CASE = rename_state_dict(__snake_case )
__SCREAMING_SNAKE_CASE = ClapConfig()
__SCREAMING_SNAKE_CASE = enable_fusion
__SCREAMING_SNAKE_CASE = ClapModel(__snake_case )
# ignore the spectrogram embedding layer
model.load_state_dict(__snake_case , strict=__snake_case )
model.save_pretrained(__snake_case )
transformers_config.save_pretrained(__snake_case )
if __name__ == "__main__":
_snake_case : Optional[Any] = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument('--enable_fusion', action='store_true', help='Whether to enable fusion or not')
_snake_case : List[str] = parser.parse_args()
convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion)
| 721 |
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def _A ( __snake_case :List[str] , __snake_case :List[Any]=0.9_9_9 , __snake_case :str="cosine" , ) -> Any:
"""simple docstring"""
if alpha_transform_type == "cosine":
def alpha_bar_fn(__snake_case :str ):
return math.cos((t + 0.0_0_8) / 1.0_0_8 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(__snake_case :Union[str, Any] ):
return math.exp(t * -1_2.0 )
else:
raise ValueError(f'''Unsupported alpha_tranform_type: {alpha_transform_type}''' )
__SCREAMING_SNAKE_CASE = []
for i in range(__snake_case ):
__SCREAMING_SNAKE_CASE = i / num_diffusion_timesteps
__SCREAMING_SNAKE_CASE = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(__snake_case ) / alpha_bar_fn(__snake_case ) , __snake_case ) )
return torch.tensor(__snake_case , dtype=torch.floataa )
class __SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE__ =[e.name for e in KarrasDiffusionSchedulers]
SCREAMING_SNAKE_CASE__ =2
@register_to_config
def __init__( self, _a = 10_00, _a = 0.0_0085, _a = 0.012, _a = "linear", _a = None, _a = "epsilon", _a = False, _a = False, _a = 1.0, _a = "linspace", _a = 0, ) -> str:
if trained_betas is not None:
__SCREAMING_SNAKE_CASE = torch.tensor(_a, dtype=torch.floataa )
elif beta_schedule == "linear":
__SCREAMING_SNAKE_CASE = torch.linspace(_a, _a, _a, dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
__SCREAMING_SNAKE_CASE = (
torch.linspace(beta_start**0.5, beta_end**0.5, _a, dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
__SCREAMING_SNAKE_CASE = betas_for_alpha_bar(_a, alpha_transform_type="cosine" )
elif beta_schedule == "exp":
__SCREAMING_SNAKE_CASE = betas_for_alpha_bar(_a, alpha_transform_type="exp" )
else:
raise NotImplementedError(f'''{beta_schedule} does is not implemented for {self.__class__}''' )
__SCREAMING_SNAKE_CASE = 1.0 - self.betas
__SCREAMING_SNAKE_CASE = torch.cumprod(self.alphas, dim=0 )
# set all values
self.set_timesteps(_a, _a, _a )
__SCREAMING_SNAKE_CASE = use_karras_sigmas
def __lowerCAmelCase ( self, _a, _a=None ) -> Any:
if schedule_timesteps is None:
__SCREAMING_SNAKE_CASE = self.timesteps
__SCREAMING_SNAKE_CASE = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
__SCREAMING_SNAKE_CASE = 1 if len(_a ) > 1 else 0
else:
__SCREAMING_SNAKE_CASE = timestep.cpu().item() if torch.is_tensor(_a ) else timestep
__SCREAMING_SNAKE_CASE = self._index_counter[timestep_int]
return indices[pos].item()
@property
def __lowerCAmelCase ( self ) -> List[str]:
# standard deviation of the initial noise distribution
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def __lowerCAmelCase ( self, _a, _a, ) -> torch.FloatTensor:
__SCREAMING_SNAKE_CASE = self.index_for_timestep(_a )
__SCREAMING_SNAKE_CASE = self.sigmas[step_index]
__SCREAMING_SNAKE_CASE = sample / ((sigma**2 + 1) ** 0.5)
return sample
def __lowerCAmelCase ( self, _a, _a = None, _a = None, ) -> str:
__SCREAMING_SNAKE_CASE = num_inference_steps
__SCREAMING_SNAKE_CASE = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
__SCREAMING_SNAKE_CASE = np.linspace(0, num_train_timesteps - 1, _a, dtype=_a )[::-1].copy()
elif self.config.timestep_spacing == "leading":
__SCREAMING_SNAKE_CASE = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
__SCREAMING_SNAKE_CASE = (np.arange(0, _a ) * step_ratio).round()[::-1].copy().astype(_a )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
__SCREAMING_SNAKE_CASE = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
__SCREAMING_SNAKE_CASE = (np.arange(_a, 0, -step_ratio )).round().copy().astype(_a )
timesteps -= 1
else:
raise ValueError(
f'''{self.config.timestep_spacing} is not supported. Please make sure to choose one of \'linspace\', \'leading\' or \'trailing\'.''' )
__SCREAMING_SNAKE_CASE = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
__SCREAMING_SNAKE_CASE = np.log(_a )
__SCREAMING_SNAKE_CASE = np.interp(_a, np.arange(0, len(_a ) ), _a )
if self.config.use_karras_sigmas:
__SCREAMING_SNAKE_CASE = self._convert_to_karras(in_sigmas=_a, num_inference_steps=self.num_inference_steps )
__SCREAMING_SNAKE_CASE = np.array([self._sigma_to_t(_a, _a ) for sigma in sigmas] )
__SCREAMING_SNAKE_CASE = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
__SCREAMING_SNAKE_CASE = torch.from_numpy(_a ).to(device=_a )
__SCREAMING_SNAKE_CASE = torch.cat([sigmas[:1], sigmas[1:-1].repeat_interleave(2 ), sigmas[-1:]] )
__SCREAMING_SNAKE_CASE = torch.from_numpy(_a )
__SCREAMING_SNAKE_CASE = torch.cat([timesteps[:1], timesteps[1:].repeat_interleave(2 )] )
if str(_a ).startswith("mps" ):
# mps does not support float64
__SCREAMING_SNAKE_CASE = timesteps.to(_a, dtype=torch.floataa )
else:
__SCREAMING_SNAKE_CASE = timesteps.to(device=_a )
# empty dt and derivative
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
__SCREAMING_SNAKE_CASE = defaultdict(_a )
def __lowerCAmelCase ( self, _a, _a ) -> int:
# get log sigma
__SCREAMING_SNAKE_CASE = np.log(_a )
# get distribution
__SCREAMING_SNAKE_CASE = log_sigma - log_sigmas[:, np.newaxis]
# get sigmas range
__SCREAMING_SNAKE_CASE = np.cumsum((dists >= 0), axis=0 ).argmax(axis=0 ).clip(max=log_sigmas.shape[0] - 2 )
__SCREAMING_SNAKE_CASE = low_idx + 1
__SCREAMING_SNAKE_CASE = log_sigmas[low_idx]
__SCREAMING_SNAKE_CASE = log_sigmas[high_idx]
# interpolate sigmas
__SCREAMING_SNAKE_CASE = (low - log_sigma) / (low - high)
__SCREAMING_SNAKE_CASE = np.clip(_a, 0, 1 )
# transform interpolation to time range
__SCREAMING_SNAKE_CASE = (1 - w) * low_idx + w * high_idx
__SCREAMING_SNAKE_CASE = t.reshape(sigma.shape )
return t
def __lowerCAmelCase ( self, _a, _a ) -> torch.FloatTensor:
__SCREAMING_SNAKE_CASE = in_sigmas[-1].item()
__SCREAMING_SNAKE_CASE = in_sigmas[0].item()
__SCREAMING_SNAKE_CASE = 7.0 # 7.0 is the value used in the paper
__SCREAMING_SNAKE_CASE = np.linspace(0, 1, _a )
__SCREAMING_SNAKE_CASE = sigma_min ** (1 / rho)
__SCREAMING_SNAKE_CASE = sigma_max ** (1 / rho)
__SCREAMING_SNAKE_CASE = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho
return sigmas
@property
def __lowerCAmelCase ( self ) -> List[Any]:
return self.dt is None
def __lowerCAmelCase ( self, _a, _a, _a, _a = True, ) -> Union[SchedulerOutput, Tuple]:
__SCREAMING_SNAKE_CASE = self.index_for_timestep(_a )
# advance index counter by 1
__SCREAMING_SNAKE_CASE = timestep.cpu().item() if torch.is_tensor(_a ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
__SCREAMING_SNAKE_CASE = self.sigmas[step_index]
__SCREAMING_SNAKE_CASE = self.sigmas[step_index + 1]
else:
# 2nd order / Heun's method
__SCREAMING_SNAKE_CASE = self.sigmas[step_index - 1]
__SCREAMING_SNAKE_CASE = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
__SCREAMING_SNAKE_CASE = sigma_hat if self.state_in_first_order else sigma_next
__SCREAMING_SNAKE_CASE = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
__SCREAMING_SNAKE_CASE = sigma_hat if self.state_in_first_order else sigma_next
__SCREAMING_SNAKE_CASE = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
__SCREAMING_SNAKE_CASE = model_output
else:
raise ValueError(
f'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`''' )
if self.config.clip_sample:
__SCREAMING_SNAKE_CASE = pred_original_sample.clamp(
-self.config.clip_sample_range, self.config.clip_sample_range )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
__SCREAMING_SNAKE_CASE = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
__SCREAMING_SNAKE_CASE = sigma_next - sigma_hat
# store for 2nd order step
__SCREAMING_SNAKE_CASE = derivative
__SCREAMING_SNAKE_CASE = dt
__SCREAMING_SNAKE_CASE = sample
else:
# 2. 2nd order / Heun's method
__SCREAMING_SNAKE_CASE = (sample - pred_original_sample) / sigma_next
__SCREAMING_SNAKE_CASE = (self.prev_derivative + derivative) / 2
# 3. take prev timestep & sample
__SCREAMING_SNAKE_CASE = self.dt
__SCREAMING_SNAKE_CASE = self.sample
# free dt and derivative
# Note, this puts the scheduler in "first order mode"
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=_a )
def __lowerCAmelCase ( self, _a, _a, _a, ) -> torch.FloatTensor:
# Make sure sigmas and timesteps have the same device and dtype as original_samples
__SCREAMING_SNAKE_CASE = self.sigmas.to(device=original_samples.device, dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(_a ):
# mps does not support float64
__SCREAMING_SNAKE_CASE = self.timesteps.to(original_samples.device, dtype=torch.floataa )
__SCREAMING_SNAKE_CASE = timesteps.to(original_samples.device, dtype=torch.floataa )
else:
__SCREAMING_SNAKE_CASE = self.timesteps.to(original_samples.device )
__SCREAMING_SNAKE_CASE = timesteps.to(original_samples.device )
__SCREAMING_SNAKE_CASE = [self.index_for_timestep(_a, _a ) for t in timesteps]
__SCREAMING_SNAKE_CASE = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
__SCREAMING_SNAKE_CASE = sigma.unsqueeze(-1 )
__SCREAMING_SNAKE_CASE = original_samples + noise * sigma
return noisy_samples
def __len__( self ) -> Dict:
return self.config.num_train_timesteps
| 214 | 0 |
"""simple docstring"""
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
_a = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, """utils"""))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated.
_a = """ \"\"\"
Output class for the scheduler's step function output.
Args:
prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the
denoising loop.
pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
The predicted denoised sample (x_{0}) based on the model output from the current timestep.
`pred_original_sample` can be used to preview progress or for guidance.
\"\"\"
prev_sample: torch.FloatTensor
pred_original_sample: Optional[torch.FloatTensor] = None
"""
class _UpperCAmelCase( unittest.TestCase ):
def UpperCAmelCase ( self) -> Any:
'''simple docstring'''
_UpperCamelCase = tempfile.mkdtemp()
os.makedirs(os.path.join(self.diffusers_dir , '''schedulers/'''))
_UpperCamelCase = self.diffusers_dir
shutil.copy(
os.path.join(__a , '''src/diffusers/schedulers/scheduling_ddpm.py''') , os.path.join(self.diffusers_dir , '''schedulers/scheduling_ddpm.py''') , )
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
_UpperCamelCase = '''src/diffusers'''
shutil.rmtree(self.diffusers_dir)
def UpperCAmelCase ( self , __a , __a , __a , __a=None) -> str:
'''simple docstring'''
_UpperCamelCase = comment + F'''\nclass {class_name}(nn.Module):\n''' + class_code
if overwrite_result is not None:
_UpperCamelCase = comment + F'''\nclass {class_name}(nn.Module):\n''' + overwrite_result
_UpperCamelCase = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=1_19)
_UpperCamelCase = black.format_str(__a , mode=__a)
_UpperCamelCase = os.path.join(self.diffusers_dir , '''new_code.py''')
with open(__a , '''w''' , newline='''\n''') as f:
f.write(__a)
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(__a)) == 0)
else:
check_copies.is_copy_consistent(f.name , overwrite=__a)
with open(__a , '''r''') as f:
self.assertTrue(f.read() , __a)
def UpperCAmelCase ( self) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = check_copies.find_code_in_diffusers('''schedulers.scheduling_ddpm.DDPMSchedulerOutput''')
self.assertEqual(__a , __a)
def UpperCAmelCase ( self) -> Dict:
'''simple docstring'''
# Base copy consistency
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput''' , '''DDPMSchedulerOutput''' , REFERENCE_CODE + '''\n''' , )
# With no empty line at the end
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput''' , '''DDPMSchedulerOutput''' , __a , )
# Copy consistency with rename
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test''' , '''TestSchedulerOutput''' , re.sub('''DDPM''' , '''Test''' , __a) , )
# Copy consistency with a really long name
_UpperCamelCase = '''TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason'''
self.check_copy_consistency(
F'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}''' , F'''{long_class_name}SchedulerOutput''' , re.sub('''Bert''' , __a , __a) , )
# Copy consistency with overwrite
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test''' , '''TestSchedulerOutput''' , __a , overwrite_result=re.sub('''DDPM''' , '''Test''' , __a) , )
| 19 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers import (
TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
BertConfig,
DPRConfig,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
class _UpperCAmelCase:
def __init__( self , __a , __a=13 , __a=7 , __a=True , __a=True , __a=True , __a=True , __a=99 , __a=32 , __a=2 , __a=4 , __a=37 , __a="gelu" , __a=0.1 , __a=0.1 , __a=5_12 , __a=16 , __a=2 , __a=0.02 , __a=3 , __a=4 , __a=None , __a=0 , ) -> Any:
'''simple docstring'''
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = seq_length
_UpperCamelCase = is_training
_UpperCamelCase = use_input_mask
_UpperCamelCase = use_token_type_ids
_UpperCamelCase = use_labels
_UpperCamelCase = vocab_size
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_act
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = type_vocab_size
_UpperCamelCase = type_sequence_label_size
_UpperCamelCase = initializer_range
_UpperCamelCase = num_labels
_UpperCamelCase = num_choices
_UpperCamelCase = scope
_UpperCamelCase = projection_dim
def UpperCAmelCase ( self) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
_UpperCamelCase = None
if self.use_input_mask:
# follow test_modeling_tf_ctrl.py
_UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length])
_UpperCamelCase = None
if self.use_token_type_ids:
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
if self.use_labels:
_UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size)
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
_UpperCamelCase = ids_tensor([self.batch_size] , self.num_choices)
_UpperCamelCase = BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__a , initializer_range=self.initializer_range , )
_UpperCamelCase = DPRConfig(projection_dim=self.projection_dim , **config.to_dict())
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = TFDPRContextEncoder(config=__a)
_UpperCamelCase = model(__a , attention_mask=__a , token_type_ids=__a)
_UpperCamelCase = model(__a , token_type_ids=__a)
_UpperCamelCase = model(__a)
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size))
def UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = TFDPRQuestionEncoder(config=__a)
_UpperCamelCase = model(__a , attention_mask=__a , token_type_ids=__a)
_UpperCamelCase = model(__a , token_type_ids=__a)
_UpperCamelCase = model(__a)
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size))
def UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a) -> Dict:
'''simple docstring'''
_UpperCamelCase = TFDPRReader(config=__a)
_UpperCamelCase = model(__a , attention_mask=__a)
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.relevance_logits.shape , (self.batch_size,))
def UpperCAmelCase ( self) -> Any:
'''simple docstring'''
_UpperCamelCase = self.prepare_config_and_inputs()
(
(
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) ,
) = config_and_inputs
_UpperCamelCase = {'''input_ids''': input_ids}
return config, inputs_dict
@require_tf
class _UpperCAmelCase( lowerCamelCase , lowerCamelCase , unittest.TestCase ):
lowercase__ = (
(
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
if is_tf_available()
else ()
)
lowercase__ = {'feature-extraction': TFDPRQuestionEncoder} if is_tf_available() else {}
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
def UpperCAmelCase ( self) -> Tuple:
'''simple docstring'''
_UpperCamelCase = TFDPRModelTester(self)
_UpperCamelCase = ConfigTester(self , config_class=__a , hidden_size=37)
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCAmelCase ( self) -> Tuple:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_context_encoder(*__a)
def UpperCAmelCase ( self) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_question_encoder(*__a)
def UpperCAmelCase ( self) -> Dict:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_reader(*__a)
@slow
def UpperCAmelCase ( self) -> str:
'''simple docstring'''
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase = TFDPRContextEncoder.from_pretrained(__a)
self.assertIsNotNone(__a)
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase = TFDPRContextEncoder.from_pretrained(__a)
self.assertIsNotNone(__a)
for model_name in TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase = TFDPRQuestionEncoder.from_pretrained(__a)
self.assertIsNotNone(__a)
for model_name in TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase = TFDPRReader.from_pretrained(__a)
self.assertIsNotNone(__a)
@require_tf
class _UpperCAmelCase( unittest.TestCase ):
@slow
def UpperCAmelCase ( self) -> Tuple:
'''simple docstring'''
_UpperCamelCase = TFDPRQuestionEncoder.from_pretrained('''facebook/dpr-question_encoder-single-nq-base''')
_UpperCamelCase = tf.constant(
[[1_01, 75_92, 10_10, 20_03, 20_26, 38_99, 1_01_40, 10_29, 1_02]]) # [CLS] hello, is my dog cute? [SEP]
_UpperCamelCase = model(__a)[0] # embedding shape = (1, 768)
# compare the actual values for a slice.
_UpperCamelCase = tf.constant(
[
[
0.0323_6253,
0.1275_3335,
0.1681_8509,
0.0027_9786,
0.389_6933,
0.2426_4945,
0.217_8971,
-0.0233_5227,
-0.0848_1959,
-0.1432_4117,
]
])
self.assertTrue(numpy.allclose(output[:, :10].numpy() , expected_slice.numpy() , atol=1e-4))
| 19 | 1 |
import argparse
from pathlib import Path
import fairseq
import torch
from fairseq.models.xmod import XMODModel as FairseqXmodModel
from packaging import version
from transformers import XmodConfig, XmodForMaskedLM, XmodForSequenceClassification
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse("""0.12.2"""):
raise Exception("""requires fairseq >= 0.12.2""")
if version.parse(fairseq.__version__) > version.parse("""2"""):
raise Exception("""requires fairseq < v2""")
logging.set_verbosity_info()
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = """Hello, World!"""
lowerCAmelCase__ = """en_XX"""
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: str , SCREAMING_SNAKE_CASE_: str , SCREAMING_SNAKE_CASE_: bool ) -> List[str]:
'''simple docstring'''
A__ = Path("data_bin" )
A__ = FairseqXmodModel.from_pretrained(
model_name_or_path=str(Path(SCREAMING_SNAKE_CASE_ ).parent ) , checkpoint_file=Path(SCREAMING_SNAKE_CASE_ ).name , _name="xmod_base" , arch="xmod_base" , task="multilingual_masked_lm" , data_name_or_path=str(SCREAMING_SNAKE_CASE_ ) , bpe="sentencepiece" , sentencepiece_model=str(Path(SCREAMING_SNAKE_CASE_ ).parent / "sentencepiece.bpe.model" ) , src_dict=str(data_dir / "dict.txt" ) , )
xmod.eval() # disable dropout
print(SCREAMING_SNAKE_CASE_ )
A__ = xmod.model.encoder.sentence_encoder
A__ = XmodConfig(
vocab_size=xmod_sent_encoder.embed_tokens.num_embeddings , hidden_size=xmod.cfg.model.encoder_embed_dim , num_hidden_layers=xmod.cfg.model.encoder_layers , num_attention_heads=xmod.cfg.model.encoder_attention_heads , intermediate_size=xmod.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=5_1_4 , type_vocab_size=1 , layer_norm_eps=1e-5 , pre_norm=xmod.cfg.model.encoder_normalize_before , adapter_reduction_factor=getattr(xmod.cfg.model , "bottleneck" , 2 ) , adapter_layer_norm=xmod.cfg.model.adapter_layer_norm , adapter_reuse_layer_norm=xmod.cfg.model.adapter_reuse_layer_norm , ln_before_adapter=xmod.cfg.model.ln_before_adapter , languages=xmod.cfg.model.languages , )
if classification_head:
A__ = xmod.model.classification_heads["mnli"].out_proj.weight.shape[0]
print("Our X-MOD config:" , SCREAMING_SNAKE_CASE_ )
A__ = XmodForSequenceClassification(SCREAMING_SNAKE_CASE_ ) if classification_head else XmodForMaskedLM(SCREAMING_SNAKE_CASE_ )
model.eval()
# Now let's copy all the weights.
# Embeddings
A__ = xmod_sent_encoder.embed_tokens.weight
A__ = xmod_sent_encoder.embed_positions.weight
A__ = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c xmod doesn't use them.
A__ = xmod_sent_encoder.layernorm_embedding.weight
A__ = xmod_sent_encoder.layernorm_embedding.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
A__ = model.roberta.encoder.layer[i]
A__ = xmod_sent_encoder.layers[i]
# self attention
A__ = layer.attention.self
if not (
xmod_layer.self_attn.k_proj.weight.data.shape
== xmod_layer.self_attn.q_proj.weight.data.shape
== xmod_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
):
raise AssertionError("Dimensions of self-attention weights do not match." )
A__ = xmod_layer.self_attn.q_proj.weight
A__ = xmod_layer.self_attn.q_proj.bias
A__ = xmod_layer.self_attn.k_proj.weight
A__ = xmod_layer.self_attn.k_proj.bias
A__ = xmod_layer.self_attn.v_proj.weight
A__ = xmod_layer.self_attn.v_proj.bias
# self-attention output
A__ = layer.attention.output
if self_output.dense.weight.shape != xmod_layer.self_attn.out_proj.weight.shape:
raise AssertionError("Dimensions of self-attention output weights do not match." )
A__ = xmod_layer.self_attn.out_proj.weight
A__ = xmod_layer.self_attn.out_proj.bias
A__ = xmod_layer.self_attn_layer_norm.weight
A__ = xmod_layer.self_attn_layer_norm.bias
# intermediate
A__ = layer.intermediate
if intermediate.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError("Dimensions of intermediate weights do not match." )
A__ = xmod_layer.fca.weight
A__ = xmod_layer.fca.bias
# output
A__ = layer.output
if bert_output.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError("Dimensions of feed-forward weights do not match." )
A__ = xmod_layer.fca.weight
A__ = xmod_layer.fca.bias
A__ = xmod_layer.final_layer_norm.weight
A__ = xmod_layer.final_layer_norm.bias
if bert_output.adapter_layer_norm is not None:
A__ = xmod_layer.adapter_layer_norm.weight
A__ = xmod_layer.adapter_layer_norm.bias
if sorted(bert_output.adapter_modules.keys() ) != sorted(xmod_layer.adapter_modules.keys() ):
raise AssertionError("Lists of language adapters do not match." )
for lang_code, adapter in xmod_layer.adapter_modules.items():
A__ = bert_output.adapter_modules[lang_code]
A__ = xmod_layer.adapter_modules[lang_code]
A__ = from_adapter.fca.weight
A__ = from_adapter.fca.bias
A__ = from_adapter.fca.weight
A__ = from_adapter.fca.bias
# end of layer
if xmod_sent_encoder.layer_norm is not None:
A__ = xmod_sent_encoder.layer_norm.weight
A__ = xmod_sent_encoder.layer_norm.bias
if classification_head:
A__ = xmod.model.classification_heads["mnli"].dense.weight
A__ = xmod.model.classification_heads["mnli"].dense.bias
A__ = xmod.model.classification_heads["mnli"].out_proj.weight
A__ = xmod.model.classification_heads["mnli"].out_proj.bias
else:
# LM Head
A__ = xmod.model.encoder.lm_head.dense.weight
A__ = xmod.model.encoder.lm_head.dense.bias
A__ = xmod.model.encoder.lm_head.layer_norm.weight
A__ = xmod.model.encoder.lm_head.layer_norm.bias
A__ = xmod.model.encoder.lm_head.weight
A__ = xmod.model.encoder.lm_head.bias
# Let's check that we get the same results.
A__ = xmod.encode(SCREAMING_SNAKE_CASE_ ).unsqueeze(0 ) # batch of size 1
model.roberta.set_default_language(SCREAMING_SNAKE_CASE_ )
A__ = model(SCREAMING_SNAKE_CASE_ )[0]
if classification_head:
A__ = xmod.model.classification_heads["mnli"](xmod.extract_features(SCREAMING_SNAKE_CASE_ ) )
else:
A__ = xmod.model(SCREAMING_SNAKE_CASE_ , lang_id=[SAMPLE_LANGUAGE] )[0]
print(our_output.shape , their_output.shape )
A__ = torch.max(torch.abs(our_output - their_output ) ).item()
print(F'max_absolute_diff = {max_absolute_diff}' ) # ~ 1e-7
A__ = torch.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1e-3 )
print("Do both models output the same tensors?" , "🔥" if success else "💩" )
if not success:
raise Exception("Something went wRoNg" )
Path(SCREAMING_SNAKE_CASE_ ).mkdir(parents=SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ )
print(F'Saving model to {pytorch_dump_folder_path}' )
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--xmod_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--classification_head""", action="""store_true""", help="""Whether to convert a final classification head."""
)
lowerCAmelCase__ = parser.parse_args()
convert_xmod_checkpoint_to_pytorch(
args.xmod_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 626 |
import warnings
from ...utils import logging
from .image_processing_clip import CLIPImageProcessor
lowerCAmelCase__ = logging.get_logger(__name__)
class a__ ( snake_case ):
"""simple docstring"""
def __init__( self , *lowercase , **lowercase ) -> None:
'''simple docstring'''
warnings.warn(
"The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use CLIPImageProcessor instead." , lowercase , )
super().__init__(*lowercase , **lowercase )
| 626 | 1 |
'''simple docstring'''
import unittest
from transformers import BertGenerationConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import BertGenerationDecoder, BertGenerationEncoder
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self : Union[str, Any] , __A : Any , __A : Union[str, Any]=1_3 , __A : int=7 , __A : Optional[int]=True , __A : Union[str, Any]=True , __A : Any=9_9 , __A : List[str]=3_2 , __A : Dict=5 , __A : List[str]=4 , __A : List[Any]=3_7 , __A : Any="gelu" , __A : Any=0.1 , __A : Tuple=0.1 , __A : Optional[int]=5_0 , __A : Union[str, Any]=0.0_2 , __A : Optional[Any]=True , __A : Dict=None , ):
"""simple docstring"""
_lowercase = parent
_lowercase = batch_size
_lowercase = seq_length
_lowercase = is_training
_lowercase = use_input_mask
_lowercase = vocab_size
_lowercase = hidden_size
_lowercase = num_hidden_layers
_lowercase = num_attention_heads
_lowercase = intermediate_size
_lowercase = hidden_act
_lowercase = hidden_dropout_prob
_lowercase = attention_probs_dropout_prob
_lowercase = max_position_embeddings
_lowercase = initializer_range
_lowercase = use_labels
_lowercase = scope
def snake_case ( self : List[str] ):
"""simple docstring"""
_lowercase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowercase = None
if self.use_input_mask:
_lowercase = random_attention_mask([self.batch_size, self.seq_length] )
if self.use_labels:
_lowercase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowercase = self.get_config()
return config, input_ids, input_mask, token_labels
def snake_case ( self : Union[str, Any] ):
"""simple docstring"""
return BertGenerationConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , is_decoder=__A , initializer_range=self.initializer_range , )
def snake_case ( self : str ):
"""simple docstring"""
(
(
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) ,
) = self.prepare_config_and_inputs()
_lowercase = True
_lowercase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
_lowercase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
input_mask,
token_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def snake_case ( self : str , __A : str , __A : Optional[Any] , __A : int , __A : int , **__A : Union[str, Any] , ):
"""simple docstring"""
_lowercase = BertGenerationEncoder(config=__A )
model.to(__A )
model.eval()
_lowercase = model(__A , attention_mask=__A )
_lowercase = model(__A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case ( self : Dict , __A : int , __A : List[Any] , __A : List[Any] , __A : str , __A : List[Any] , __A : Optional[Any] , **__A : List[Any] , ):
"""simple docstring"""
_lowercase = True
_lowercase = BertGenerationEncoder(config=__A )
model.to(__A )
model.eval()
_lowercase = model(
__A , attention_mask=__A , encoder_hidden_states=__A , encoder_attention_mask=__A , )
_lowercase = model(
__A , attention_mask=__A , encoder_hidden_states=__A , )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case ( self : Union[str, Any] , __A : List[Any] , __A : List[Any] , __A : Optional[int] , __A : Any , __A : Union[str, Any] , __A : List[str] , **__A : str , ):
"""simple docstring"""
_lowercase = True
_lowercase = True
_lowercase = BertGenerationDecoder(config=__A ).to(__A ).eval()
# first forward pass
_lowercase = model(
__A , attention_mask=__A , encoder_hidden_states=__A , encoder_attention_mask=__A , use_cache=__A , )
_lowercase = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
_lowercase = ids_tensor((self.batch_size, 3) , config.vocab_size )
_lowercase = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
_lowercase = torch.cat([input_ids, next_tokens] , dim=-1 )
_lowercase = torch.cat([input_mask, next_mask] , dim=-1 )
_lowercase = model(
__A , attention_mask=__A , encoder_hidden_states=__A , encoder_attention_mask=__A , output_hidden_states=__A , )["hidden_states"][0]
_lowercase = model(
__A , attention_mask=__A , encoder_hidden_states=__A , encoder_attention_mask=__A , past_key_values=__A , output_hidden_states=__A , )["hidden_states"][0]
# select random slice
_lowercase = ids_tensor((1,) , output_from_past.shape[-1] ).item()
_lowercase = output_from_no_past[:, -3:, random_slice_idx].detach()
_lowercase = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__A , __A , atol=1e-3 ) )
def snake_case ( self : List[Any] , __A : Tuple , __A : Optional[int] , __A : Dict , __A : Optional[int] , *__A : Optional[int] , ):
"""simple docstring"""
_lowercase = BertGenerationDecoder(__A )
model.to(__A )
model.eval()
_lowercase = model(__A , attention_mask=__A , labels=__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case ( self : List[str] ):
"""simple docstring"""
_lowercase , _lowercase , _lowercase , _lowercase = self.prepare_config_and_inputs()
_lowercase = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ = (BertGenerationEncoder, BertGenerationDecoder) if is_torch_available() else ()
UpperCAmelCase__ = (BertGenerationDecoder,) if is_torch_available() else ()
UpperCAmelCase__ = (
{'feature-extraction': BertGenerationEncoder, 'text-generation': BertGenerationDecoder}
if is_torch_available()
else {}
)
def snake_case ( self : Tuple ):
"""simple docstring"""
_lowercase = BertGenerationEncoderTester(self )
_lowercase = ConfigTester(self , config_class=__A , hidden_size=3_7 )
def snake_case ( self : int ):
"""simple docstring"""
self.config_tester.run_common_tests()
def snake_case ( self : Optional[Any] ):
"""simple docstring"""
_lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__A )
def snake_case ( self : Dict ):
"""simple docstring"""
_lowercase , _lowercase , _lowercase , _lowercase = self.model_tester.prepare_config_and_inputs()
_lowercase = "bert"
self.model_tester.create_and_check_model(__A , __A , __A , __A )
def snake_case ( self : List[str] ):
"""simple docstring"""
_lowercase = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*__A )
def snake_case ( self : Optional[Any] ):
"""simple docstring"""
_lowercase = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*__A )
def snake_case ( self : str ):
"""simple docstring"""
# This regression test was failing with PyTorch < 1.3
(
(
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) ,
) = self.model_tester.prepare_config_and_inputs_for_decoder()
_lowercase = None
self.model_tester.create_and_check_model_as_decoder(
__A , __A , __A , __A , __A , __A , )
def snake_case ( self : List[str] ):
"""simple docstring"""
_lowercase = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_for_causal_lm(*__A )
@slow
def snake_case ( self : List[str] ):
"""simple docstring"""
_lowercase = BertGenerationEncoder.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder" )
self.assertIsNotNone(__A )
@require_torch
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def snake_case ( self : int ):
"""simple docstring"""
_lowercase = BertGenerationEncoder.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder" )
_lowercase = torch.tensor([[1_0_1, 7_5_9_2, 1_0_1_0, 2_0_2_6, 3_8_9_9, 2_0_0_3, 1_0_1_4_0, 1_0_2]] )
with torch.no_grad():
_lowercase = model(__A )[0]
_lowercase = torch.Size([1, 8, 1_0_2_4] )
self.assertEqual(output.shape , __A )
_lowercase = torch.tensor(
[[[0.1_7_7_5, 0.0_0_8_3, -0.0_3_2_1], [1.6_0_0_2, 0.1_2_8_7, 0.3_9_1_2], [2.1_4_7_3, 0.5_7_9_1, 0.6_0_6_6]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __A , atol=1e-4 ) )
@require_torch
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def snake_case ( self : Any ):
"""simple docstring"""
_lowercase = BertGenerationDecoder.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder" )
_lowercase = torch.tensor([[1_0_1, 7_5_9_2, 1_0_1_0, 2_0_2_6, 3_8_9_9, 2_0_0_3, 1_0_1_4_0, 1_0_2]] )
with torch.no_grad():
_lowercase = model(__A )[0]
_lowercase = torch.Size([1, 8, 5_0_3_5_8] )
self.assertEqual(output.shape , __A )
_lowercase = torch.tensor(
[[[-0.5_7_8_8, -2.5_9_9_4, -3.7_0_5_4], [0.0_4_3_8, 4.7_9_9_7, 1.8_7_9_5], [1.5_8_6_2, 6.6_4_0_9, 4.4_6_3_8]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __A , atol=1e-4 ) )
| 497 |
'''simple docstring'''
from typing import Any
def A__ ( A_ ) -> list[Any]:
if not input_list:
return []
_lowercase = [input_list.count(A_ ) for value in input_list]
_lowercase = max(A_ ) # Gets the maximum count in the input list.
# Gets values of modes
return sorted({input_list[i] for i, value in enumerate(A_ ) if value == y} )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 497 | 1 |
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class __UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , __A , __A=7 , __A=3 , __A=18 , __A=30 , __A=400 , __A=True , __A=None , __A=True , ):
__a = size if size is not None else {"""height""": 18, """width""": 18}
__a = parent
__a = batch_size
__a = num_channels
__a = image_size
__a = min_resolution
__a = max_resolution
__a = do_resize
__a = size
__a = apply_ocr
def snake_case_ ( self ):
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class __UpperCAmelCase ( __A , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def snake_case_ ( self ):
__a = LayoutLMvaImageProcessingTester(self )
@property
def snake_case_ ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def snake_case_ ( self ):
__a = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__A , """do_resize""" ) )
self.assertTrue(hasattr(__A , """size""" ) )
self.assertTrue(hasattr(__A , """apply_ocr""" ) )
def snake_case_ ( self ):
__a = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 18, """width""": 18} )
__a = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} )
def snake_case_ ( self ):
pass
def snake_case_ ( self ):
# Initialize image_processing
__a = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A )
for image in image_inputs:
self.assertIsInstance(__A , Image.Image )
# Test not batched input
__a = image_processing(image_inputs[0] , return_tensors="""pt""" )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
self.assertIsInstance(encoding.words , __A )
self.assertIsInstance(encoding.boxes , __A )
# Test batched
__a = image_processing(__A , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def snake_case_ ( self ):
# Initialize image_processing
__a = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A , numpify=__A )
for image in image_inputs:
self.assertIsInstance(__A , np.ndarray )
# Test not batched input
__a = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
__a = image_processing(__A , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def snake_case_ ( self ):
# Initialize image_processing
__a = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A , torchify=__A )
for image in image_inputs:
self.assertIsInstance(__A , torch.Tensor )
# Test not batched input
__a = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
__a = image_processing(__A , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def snake_case_ ( self ):
# with apply_OCR = True
__a = LayoutLMvaImageProcessor()
from datasets import load_dataset
__a = load_dataset("""hf-internal-testing/fixtures_docvqa""" , split="""test""" )
__a = Image.open(ds[0]["""file"""] ).convert("""RGB""" )
__a = image_processing(__A , return_tensors="""pt""" )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
__a = [["""11:14""", """to""", """11:39""", """a.m""", """11:39""", """to""", """11:44""", """a.m.""", """11:44""", """a.m.""", """to""", """12:25""", """p.m.""", """12:25""", """to""", """12:58""", """p.m.""", """12:58""", """to""", """4:00""", """p.m.""", """2:00""", """to""", """5:00""", """p.m.""", """Coffee""", """Break""", """Coffee""", """will""", """be""", """served""", """for""", """men""", """and""", """women""", """in""", """the""", """lobby""", """adjacent""", """to""", """exhibit""", """area.""", """Please""", """move""", """into""", """exhibit""", """area.""", """(Exhibits""", """Open)""", """TRRF""", """GENERAL""", """SESSION""", """(PART""", """|)""", """Presiding:""", """Lee""", """A.""", """Waller""", """TRRF""", """Vice""", """President""", """“Introductory""", """Remarks”""", """Lee""", """A.""", """Waller,""", """TRRF""", """Vice""", """Presi-""", """dent""", """Individual""", """Interviews""", """with""", """TRRF""", """Public""", """Board""", """Members""", """and""", """Sci-""", """entific""", """Advisory""", """Council""", """Mem-""", """bers""", """Conducted""", """by""", """TRRF""", """Treasurer""", """Philip""", """G.""", """Kuehn""", """to""", """get""", """answers""", """which""", """the""", """public""", """refrigerated""", """warehousing""", """industry""", """is""", """looking""", """for.""", """Plus""", """questions""", """from""", """the""", """floor.""", """Dr.""", """Emil""", """M.""", """Mrak,""", """University""", """of""", """Cal-""", """ifornia,""", """Chairman,""", """TRRF""", """Board;""", """Sam""", """R.""", """Cecil,""", """University""", """of""", """Georgia""", """College""", """of""", """Agriculture;""", """Dr.""", """Stanley""", """Charm,""", """Tufts""", """University""", """School""", """of""", """Medicine;""", """Dr.""", """Robert""", """H.""", """Cotton,""", """ITT""", """Continental""", """Baking""", """Company;""", """Dr.""", """Owen""", """Fennema,""", """University""", """of""", """Wis-""", """consin;""", """Dr.""", """Robert""", """E.""", """Hardenburg,""", """USDA.""", """Questions""", """and""", """Answers""", """Exhibits""", """Open""", """Capt.""", """Jack""", """Stoney""", """Room""", """TRRF""", """Scientific""", """Advisory""", """Council""", """Meeting""", """Ballroom""", """Foyer"""]] # noqa: E231
__a = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , __A )
self.assertListEqual(encoding.boxes , __A )
# with apply_OCR = False
__a = LayoutLMvaImageProcessor(apply_ocr=__A )
__a = image_processing(__A , return_tensors="""pt""" )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
| 209 |
import copy
from typing import Dict, List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
SCREAMING_SNAKE_CASE = {
'facebook/mask2former-swin-small-coco-instance': (
'https://huggingface.co/facebook/mask2former-swin-small-coco-instance/blob/main/config.json'
)
# See all Mask2Former models at https://huggingface.co/models?filter=mask2former
}
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
class __UpperCAmelCase ( __A ):
"""simple docstring"""
_lowerCamelCase = """mask2former"""
_lowerCamelCase = ["""swin"""]
_lowerCamelCase = {"""hidden_size""": """hidden_dim"""}
def __init__( self , __A = None , __A = 256 , __A = 256 , __A = 256 , __A = 1024 , __A = "relu" , __A = 6 , __A = 10 , __A = 8 , __A = 0.0 , __A = 2048 , __A = False , __A = False , __A = 4 , __A = 255 , __A = 100 , __A = 0.1 , __A = 2.0 , __A = 5.0 , __A = 5.0 , __A = 12544 , __A = 3.0 , __A = 0.75 , __A = 0.02 , __A = 1.0 , __A = True , __A = [4, 8, 16, 32] , __A = None , **__A , ):
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `Swin` backbone.""" )
__a = CONFIG_MAPPING["""swin"""](
image_size=224 , in_channels=3 , patch_size=4 , embed_dim=96 , depths=[2, 2, 18, 2] , num_heads=[3, 6, 12, 24] , window_size=7 , drop_path_rate=0.3 , use_absolute_embeddings=__A , out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] , )
if isinstance(__A , __A ):
__a = backbone_config.pop("""model_type""" )
__a = CONFIG_MAPPING[backbone_model_type]
__a = config_class.from_dict(__A )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
f'''Backbone {backbone_config.model_type} is not a supported model and may not be compatible with Mask2Former. '''
f'''Supported model types: {','.join(self.backbones_supported )}''' )
__a = backbone_config
__a = feature_size
__a = mask_feature_size
__a = hidden_dim
__a = encoder_feedforward_dim
__a = activation_function
__a = encoder_layers
__a = decoder_layers
__a = num_attention_heads
__a = dropout
__a = dim_feedforward
__a = pre_norm
__a = enforce_input_projection
__a = common_stride
__a = ignore_value
__a = num_queries
__a = no_object_weight
__a = class_weight
__a = mask_weight
__a = dice_weight
__a = train_num_points
__a = oversample_ratio
__a = importance_sample_ratio
__a = init_std
__a = init_xavier_std
__a = use_auxiliary_loss
__a = feature_strides
__a = output_auxiliary_logits
__a = decoder_layers
super().__init__(**__A )
@classmethod
def snake_case_ ( cls , __A , **__A ):
return cls(
backbone_config=__A , **__A , )
def snake_case_ ( self ):
__a = copy.deepcopy(self.__dict__ )
__a = self.backbone_config.to_dict()
__a = self.__class__.model_type
return output
| 209 | 1 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
SCREAMING_SNAKE_CASE : Union[str, Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : Optional[Any] = "▁"
SCREAMING_SNAKE_CASE : Dict = {"vocab_file": "sentencepiece.bpe.model"}
SCREAMING_SNAKE_CASE : Tuple = {
"vocab_file": {
"facebook/mbart-large-en-ro": (
"https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model"
),
"facebook/mbart-large-cc25": (
"https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model"
),
}
}
SCREAMING_SNAKE_CASE : List[Any] = {
"facebook/mbart-large-en-ro": 1024,
"facebook/mbart-large-cc25": 1024,
}
# fmt: off
SCREAMING_SNAKE_CASE : List[Any] = ["ar_AR", "cs_CZ", "de_DE", "en_XX", "es_XX", "et_EE", "fi_FI", "fr_XX", "gu_IN", "hi_IN", "it_IT", "ja_XX", "kk_KZ", "ko_KR", "lt_LT", "lv_LV", "my_MM", "ne_NP", "nl_XX", "ro_RO", "ru_RU", "si_LK", "tr_TR", "vi_VN", "zh_CN"]
class __lowercase ( lowerCamelCase__ ):
__magic_name__ : Optional[int] = VOCAB_FILES_NAMES
__magic_name__ : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__ : str = PRETRAINED_VOCAB_FILES_MAP
__magic_name__ : List[Any] = ['input_ids', 'attention_mask']
__magic_name__ : List[int] = []
__magic_name__ : List[int] = []
def __init__( self , a__ , a__="<s>" , a__="</s>" , a__="</s>" , a__="<s>" , a__="<unk>" , a__="<pad>" , a__="<mask>" , a__=None , a__=None , a__=None , a__ = None , a__=None , **a__ , ) -> Optional[int]:
'''simple docstring'''
A_ = AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase ) if isinstance(__lowercase , __lowercase ) else mask_token
A_ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__lowercase , eos_token=__lowercase , unk_token=__lowercase , sep_token=__lowercase , cls_token=__lowercase , pad_token=__lowercase , mask_token=__lowercase , tokenizer_file=__lowercase , src_lang=__lowercase , tgt_lang=__lowercase , additional_special_tokens=__lowercase , sp_model_kwargs=self.sp_model_kwargs , **__lowercase , )
A_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__lowercase ) )
A_ = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
A_ = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
A_ = 1
A_ = len(self.sp_model )
A_ = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(__lowercase )
}
A_ = {v: k for k, v in self.lang_code_to_id.items()}
A_ = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
A_ = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
A_ = list(self.lang_code_to_id.keys() )
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
self._additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in self._additional_special_tokens] )
A_ = src_lang if src_lang is not None else '''en_XX'''
A_ = self.lang_code_to_id[self._src_lang]
A_ = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self ) -> str:
'''simple docstring'''
A_ = self.__dict__.copy()
A_ = None
A_ = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , a__ ) -> Optional[Any]:
'''simple docstring'''
A_ = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
A_ = {}
A_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
@property
def lowerCAmelCase_ ( self ) -> List[str]:
'''simple docstring'''
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def lowerCAmelCase_ ( self ) -> Tuple:
'''simple docstring'''
return self._src_lang
@src_lang.setter
def lowerCAmelCase_ ( self , a__ ) -> str:
'''simple docstring'''
A_ = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def lowerCAmelCase_ ( self , a__ , a__ = None , a__ = False ) -> Tuple:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowercase , token_ids_a=__lowercase , already_has_special_tokens=__lowercase )
A_ = [1] * len(self.prefix_tokens )
A_ = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(__lowercase )) + suffix_ones
return prefix_ones + ([0] * len(__lowercase )) + ([0] * len(__lowercase )) + suffix_ones
def lowerCAmelCase_ ( self , a__ , a__ = None ) -> int:
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def lowerCAmelCase_ ( self , a__ , a__ = None ) -> Any:
'''simple docstring'''
A_ = [self.sep_token_id]
A_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCAmelCase_ ( self , a__ , a__ , a__ , a__ , **a__ ) -> List[Any]:
'''simple docstring'''
if src_lang is None or tgt_lang is None:
raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' )
A_ = src_lang
A_ = self(__lowercase , add_special_tokens=__lowercase , return_tensors=__lowercase , **__lowercase )
A_ = self.convert_tokens_to_ids(__lowercase )
A_ = tgt_lang_id
return inputs
def lowerCAmelCase_ ( self ) -> Any:
'''simple docstring'''
A_ = {self.convert_ids_to_tokens(__lowercase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowerCAmelCase_ ( self , a__ ) -> Tuple:
'''simple docstring'''
return self.sp_model.encode(__lowercase , out_type=__lowercase )
def lowerCAmelCase_ ( self , a__ ) -> Tuple:
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
A_ = self.sp_model.PieceToId(__lowercase )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def lowerCAmelCase_ ( self , a__ ) -> Optional[int]:
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def lowerCAmelCase_ ( self , a__ ) -> Union[str, Any]:
'''simple docstring'''
A_ = ''''''.join(__lowercase ).replace(__lowercase , ''' ''' ).strip()
return out_string
def lowerCAmelCase_ ( self , a__ , a__ = None ) -> Optional[int]:
'''simple docstring'''
if not os.path.isdir(__lowercase ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
A_ = os.path.join(
__lowercase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowercase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __lowercase )
elif not os.path.isfile(self.vocab_file ):
with open(__lowercase , '''wb''' ) as fi:
A_ = self.sp_model.serialized_model_proto()
fi.write(__lowercase )
return (out_vocab_file,)
def lowerCAmelCase_ ( self , a__ , a__ = "en_XX" , a__ = None , a__ = "ro_RO" , **a__ , ) -> Union[str, Any]:
'''simple docstring'''
A_ = src_lang
A_ = tgt_lang
return super().prepare_seqaseq_batch(__lowercase , __lowercase , **__lowercase )
def lowerCAmelCase_ ( self ) -> Dict:
'''simple docstring'''
return self.set_src_lang_special_tokens(self.src_lang )
def lowerCAmelCase_ ( self ) -> List[Any]:
'''simple docstring'''
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def lowerCAmelCase_ ( self , a__ ) -> Optional[int]:
'''simple docstring'''
A_ = self.lang_code_to_id[src_lang]
A_ = []
A_ = [self.eos_token_id, self.cur_lang_code]
def lowerCAmelCase_ ( self , a__ ) -> List[str]:
'''simple docstring'''
A_ = self.lang_code_to_id[lang]
A_ = []
A_ = [self.eos_token_id, self.cur_lang_code] | 141 |
import logging
import os
from typing import List, Tuple
import numpy as np
import psutil
import torch
import torch.distributed as dist
from transformers import RagRetriever
lowerCamelCase__ = logging.getLogger(__name__)
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
def __init__( self : Optional[Any] , __lowercase : Dict , __lowercase : Optional[int] , __lowercase : Optional[int] , __lowercase : Optional[Any]=None ):
'''simple docstring'''
super().__init__(
__lowercase , question_encoder_tokenizer=__lowercase , generator_tokenizer=__lowercase , index=__lowercase , init_retrieval=__lowercase , )
__a = None
def UpperCamelCase_ ( self : List[Any] , __lowercase : int ):
'''simple docstring'''
logger.info("""initializing retrieval""" )
# initializing a separate process group for retrieval as the default
# nccl backend doesn't support gather/scatter operations while gloo
# is too slow to replace nccl for the core gpu communication
if dist.is_initialized():
logger.info("""dist initialized""" )
# needs to be set manually
__a = self._infer_socket_ifname()
# avoid clash with the NCCL port
__a = str(distributed_port + 1 )
__a = dist.new_group(ranks=__lowercase , backend="""gloo""" )
# initialize retriever only on the main worker
if not dist.is_initialized() or self._is_main():
logger.info("""dist not initialized / main""" )
self.index.init_index()
# all processes wait untill the retriever is initialized by the main process
if dist.is_initialized():
torch.distributed.barrier(group=self.process_group )
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
return dist.get_rank(group=self.process_group ) == 0
def UpperCamelCase_ ( self : int , __lowercase : List[str] , __lowercase : int , __lowercase : List[str]=torch.floataa ):
'''simple docstring'''
__a = torch.empty(__lowercase , dtype=__lowercase )
dist.scatter(__lowercase , src=0 , scatter_list=__lowercase , group=self.process_group )
return target_tensor
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
__a = psutil.net_if_addrs()
# a hacky way to deal with varying network interface names
__a = next((addr for addr in addrs if addr.startswith("""e""" )) , __lowercase )
return ifname
def UpperCamelCase_ ( self : int , __lowercase : np.ndarray , __lowercase : int ):
'''simple docstring'''
# single GPU training
if not dist.is_initialized():
__a , __a = self._main_retrieve(__lowercase , __lowercase )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(__lowercase )
# distributed training
__a = dist.get_world_size(group=self.process_group )
# gather logic
__a = None
if self._is_main():
__a = [torch.empty(question_hidden_states.shape , dtype=torch.floataa ) for _ in range(__lowercase )]
dist.gather(torch.tensor(__lowercase ) , dst=0 , gather_list=__lowercase , group=self.process_group )
# scatter logic
__a = question_hidden_states.shape[0]
__a = []
__a = []
if self._is_main():
assert len(__lowercase ) == world_size
__a , __a = self._main_retrieve(torch.cat(__lowercase ).numpy() , __lowercase )
__a , __a = torch.tensor(__lowercase ), torch.tensor(__lowercase )
__a = self._chunk_tensor(__lowercase , __lowercase )
__a = self._chunk_tensor(__lowercase , __lowercase )
__a = self._scattered(__lowercase , [n_queries, n_docs] , target_type=torch.intaa )
__a = self._scattered(__lowercase , [n_queries, n_docs, question_hidden_states.shape[1]] )
return retrieved_doc_embeds.numpy(), doc_ids.numpy(), self.index.get_doc_dicts(__lowercase )
| 225 | 0 |
"""simple docstring"""
from abc import ABC, abstractmethod
from typing import Optional, Union
from .. import Dataset, DatasetDict, Features, IterableDataset, IterableDatasetDict, NamedSplit
from ..utils.typing import NestedDataStructureLike, PathLike
class snake_case ( UpperCAmelCase__ ):
def __init__( self :List[Any] , _lowerCamelCase :Optional[NestedDataStructureLike[PathLike]] = None , _lowerCamelCase :Optional[NamedSplit] = None , _lowerCamelCase :Optional[Features] = None , _lowerCamelCase :str = None , _lowerCamelCase :bool = False , _lowerCamelCase :bool = False , _lowerCamelCase :Optional[int] = None , **_lowerCamelCase :Optional[Any] , ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = path_or_paths
__SCREAMING_SNAKE_CASE : List[str] = split if split or isinstance(lowerCamelCase__ , lowerCamelCase__ ) else "train"
__SCREAMING_SNAKE_CASE : str = features
__SCREAMING_SNAKE_CASE : Tuple = cache_dir
__SCREAMING_SNAKE_CASE : List[str] = keep_in_memory
__SCREAMING_SNAKE_CASE : Optional[int] = streaming
__SCREAMING_SNAKE_CASE : List[Any] = num_proc
__SCREAMING_SNAKE_CASE : List[Any] = kwargs
@abstractmethod
def SCREAMING_SNAKE_CASE_ ( self :Optional[Any] ):
pass
class snake_case ( UpperCAmelCase__ ):
def __init__( self :List[Any] , _lowerCamelCase :Optional[Features] = None , _lowerCamelCase :str = None , _lowerCamelCase :bool = False , _lowerCamelCase :bool = False , _lowerCamelCase :Optional[int] = None , **_lowerCamelCase :List[Any] , ):
__SCREAMING_SNAKE_CASE : Any = features
__SCREAMING_SNAKE_CASE : List[Any] = cache_dir
__SCREAMING_SNAKE_CASE : int = keep_in_memory
__SCREAMING_SNAKE_CASE : Tuple = streaming
__SCREAMING_SNAKE_CASE : List[str] = num_proc
__SCREAMING_SNAKE_CASE : Optional[Any] = kwargs
@abstractmethod
def SCREAMING_SNAKE_CASE_ ( self :Union[str, Any] ):
pass
| 718 |
"""simple docstring"""
from math import isqrt
def lowerCAmelCase_ ( lowercase_ : int ):
'''simple docstring'''
return all(number % divisor != 0 for divisor in range(2 , isqrt(lowercase_ ) + 1 ) )
def lowerCAmelCase_ ( lowercase_ : int = 10**6 ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE : List[Any] = 0
__SCREAMING_SNAKE_CASE : Optional[int] = 1
__SCREAMING_SNAKE_CASE : int = 7
while prime_candidate < max_prime:
primes_count += is_prime(lowercase_ )
cube_index += 1
prime_candidate += 6 * cube_index
return primes_count
if __name__ == "__main__":
print(f'{solution() = }')
| 401 | 0 |
from __future__ import annotations
def a_ ( lowerCAmelCase_ : str, lowerCAmelCase_ : list[str] | None = None, lowerCAmelCase_ : dict[str, float] | None = None, lowerCAmelCase_ : bool = False, ):
__lowerCAmelCase = cipher_alphabet or [chr(lowerCAmelCase_ ) for i in range(97, 123 )]
# If the argument is None or the user provided an empty dictionary
if not frequencies_dict:
# Frequencies of letters in the english language (how much they show up)
__lowerCAmelCase = {
'a': 0.0_8497,
'b': 0.0_1492,
'c': 0.0_2202,
'd': 0.0_4253,
'e': 0.1_1162,
'f': 0.0_2228,
'g': 0.0_2015,
'h': 0.0_6094,
'i': 0.0_7546,
'j': 0.0_0153,
'k': 0.0_1292,
'l': 0.0_4025,
'm': 0.0_2406,
'n': 0.0_6749,
'o': 0.0_7507,
'p': 0.0_1929,
'q': 0.0_0095,
'r': 0.0_7587,
's': 0.0_6327,
't': 0.0_9356,
'u': 0.0_2758,
'v': 0.0_0978,
'w': 0.0_2560,
'x': 0.0_0150,
'y': 0.0_1994,
'z': 0.0_0077,
}
else:
# Custom frequencies dictionary
__lowerCAmelCase = frequencies_dict
if not case_sensitive:
__lowerCAmelCase = ciphertext.lower()
# Chi squared statistic values
__lowerCAmelCase = {}
# cycle through all of the shifts
for shift in range(len(lowerCAmelCase_ ) ):
__lowerCAmelCase = ''
# decrypt the message with the shift
for letter in ciphertext:
try:
# Try to index the letter in the alphabet
__lowerCAmelCase = (alphabet_letters.index(letter.lower() ) - shift) % len(
lowerCAmelCase_ )
decrypted_with_shift += (
alphabet_letters[new_key].upper()
if case_sensitive and letter.isupper()
else alphabet_letters[new_key]
)
except ValueError:
# Append the character if it isn't in the alphabet
decrypted_with_shift += letter
__lowerCAmelCase = 0.0
# Loop through each letter in the decoded message with the shift
for letter in decrypted_with_shift:
if case_sensitive:
__lowerCAmelCase = letter.lower()
if letter in frequencies:
# Get the amount of times the letter occurs in the message
__lowerCAmelCase = decrypted_with_shift.lower().count(lowerCAmelCase_ )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
__lowerCAmelCase = frequencies[letter] * occurrences
# Complete the chi squared statistic formula
__lowerCAmelCase = ((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
else:
if letter.lower() in frequencies:
# Get the amount of times the letter occurs in the message
__lowerCAmelCase = decrypted_with_shift.count(lowerCAmelCase_ )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
__lowerCAmelCase = frequencies[letter] * occurrences
# Complete the chi squared statistic formula
__lowerCAmelCase = ((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
# Add the data to the chi_squared_statistic_values dictionary
__lowerCAmelCase = (
chi_squared_statistic,
decrypted_with_shift,
)
# Get the most likely cipher by finding the cipher with the smallest chi squared
# statistic
def chi_squared_statistic_values_sorting_key(lowerCAmelCase_ : int ) -> tuple[float, str]:
return chi_squared_statistic_values[key]
__lowerCAmelCase = min(
lowerCAmelCase_, key=lowerCAmelCase_, )
# Get all the data from the most likely cipher (key, decoded message)
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
) = chi_squared_statistic_values[most_likely_cipher]
# Return the data on the most likely shift
return (
most_likely_cipher,
most_likely_cipher_chi_squared_value,
decoded_most_likely_cipher,
)
| 53 |
"""simple docstring"""
from __future__ import annotations
from math import gcd
def A_ ( snake_case_ : int ,snake_case_ : int = 2 ,snake_case_ : int = 1 ,snake_case_ : int = 3 ,):
'''simple docstring'''
# A value less than 2 can cause an infinite loop in the algorithm.
if num < 2:
raise ValueError("""The input value cannot be less than 2""" )
# Because of the relationship between ``f(f(x))`` and ``f(x)``, this
# algorithm struggles to find factors that are divisible by two.
# As a workaround, we specifically check for two and even inputs.
# See: https://math.stackexchange.com/a/2856214/165820
if num > 2 and num % 2 == 0:
return 2
# Pollard's Rho algorithm requires a function that returns pseudorandom
# values between 0 <= X < ``num``. It doesn't need to be random in the
# sense that the output value is cryptographically secure or difficult
# to calculate, it only needs to be random in the sense that all output
# values should be equally likely to appear.
# For this reason, Pollard suggested using ``f(x) = (x**2 - 1) % num``
# However, the success of Pollard's algorithm isn't guaranteed and is
# determined in part by the initial seed and the chosen random function.
# To make retries easier, we will instead use ``f(x) = (x**2 + C) % num``
# where ``C`` is a value that we can modify between each attempt.
def rand_fn(snake_case_ : int ,snake_case_ : int ,snake_case_ : int ) -> int:
return (pow(snake_case_ ,2 ) + step) % modulus
for _ in range(snake_case_ ):
# These track the position within the cycle detection logic.
UpperCamelCase : Optional[Any] = seed
UpperCamelCase : str = seed
while True:
# At each iteration, the tortoise moves one step and the hare moves two.
UpperCamelCase : int = rand_fn(snake_case_ ,snake_case_ ,snake_case_ )
UpperCamelCase : Dict = rand_fn(snake_case_ ,snake_case_ ,snake_case_ )
UpperCamelCase : Union[str, Any] = rand_fn(snake_case_ ,snake_case_ ,snake_case_ )
# At some point both the tortoise and the hare will enter a cycle whose
# length ``p`` is a divisor of ``num``. Once in that cycle, at some point
# the tortoise and hare will end up on the same value modulo ``p``.
# We can detect when this happens because the position difference between
# the tortoise and the hare will share a common divisor with ``num``.
UpperCamelCase : str = gcd(hare - tortoise ,snake_case_ )
if divisor == 1:
# No common divisor yet, just keep searching.
continue
else:
# We found a common divisor!
if divisor == num:
# Unfortunately, the divisor is ``num`` itself and is useless.
break
else:
# The divisor is a nontrivial factor of ``num``!
return divisor
# If we made it here, then this attempt failed.
# We need to pick a new starting seed for the tortoise and hare
# in addition to a new step value for the random function.
# To keep this example implementation deterministic, the
# new values will be generated based on currently available
# values instead of using something like ``random.randint``.
# We can use the hare's position as the new seed.
# This is actually what Richard Brent's the "optimized" variant does.
UpperCamelCase : Optional[int] = hare
# The new step value for the random function can just be incremented.
# At first the results will be similar to what the old function would
# have produced, but the value will quickly diverge after a bit.
step += 1
# We haven't found a divisor within the requested number of attempts.
# We were unlucky or ``num`` itself is actually prime.
return None
if __name__ == "__main__":
import argparse
__A : Any = argparse.ArgumentParser()
parser.add_argument(
'''num''',
type=int,
help='''The value to find a divisor of''',
)
parser.add_argument(
'''--attempts''',
type=int,
default=3,
help='''The number of attempts before giving up''',
)
__A : Optional[int] = parser.parse_args()
__A : Optional[int] = pollard_rho(args.num, attempts=args.attempts)
if divisor is None:
print(F'''{args.num} is probably prime''')
else:
__A : Optional[Any] = args.num // divisor
print(F'''{args.num} = {divisor} * {quotient}''')
| 499 | 0 |
'''simple docstring'''
import inspect
import unittest
from transformers import ViTHybridConfig
from transformers.testing_utils import require_accelerate, require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel
from transformers.models.vit_hybrid.modeling_vit_hybrid import VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class UpperCAmelCase :
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=13 , SCREAMING_SNAKE_CASE_=64 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=5 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=37 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=10 , SCREAMING_SNAKE_CASE_=0.02 , SCREAMING_SNAKE_CASE_=[1, 16, 4, 4] , SCREAMING_SNAKE_CASE_=None , ) -> str:
'''simple docstring'''
lowerCamelCase_ = parent
lowerCamelCase_ = batch_size
lowerCamelCase_ = image_size
lowerCamelCase_ = patch_size
lowerCamelCase_ = num_channels
lowerCamelCase_ = is_training
lowerCamelCase_ = use_labels
lowerCamelCase_ = hidden_size
lowerCamelCase_ = num_hidden_layers
lowerCamelCase_ = num_attention_heads
lowerCamelCase_ = intermediate_size
lowerCamelCase_ = hidden_act
lowerCamelCase_ = hidden_dropout_prob
lowerCamelCase_ = attention_probs_dropout_prob
lowerCamelCase_ = type_sequence_label_size
lowerCamelCase_ = initializer_range
lowerCamelCase_ = scope
lowerCamelCase_ = backbone_featmap_shape
# in ViT hybrid, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
# the number of patches is based on the feature map of the backbone, which by default uses an output stride
# of 32, which means that the feature map has a spatial resolution of 1/32 of the input image size
lowerCamelCase_ = (self.image_size // 32) ** 2
lowerCamelCase_ = num_patches + 1
def UpperCamelCase( self ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase_ = None
if self.use_labels:
lowerCamelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase_ = self.get_config()
return config, pixel_values, labels
def UpperCamelCase( self ) -> Any:
'''simple docstring'''
lowerCamelCase_ = {
'global_padding': 'same',
'layer_type': 'bottleneck',
'depths': [3, 4, 9],
'out_features': ['stage1', 'stage2', 'stage3'],
'embedding_dynamic_padding': True,
'hidden_sizes': [4, 8, 16, 32],
'num_groups': 2,
}
return ViTHybridConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=SCREAMING_SNAKE_CASE_ , initializer_range=self.initializer_range , backbone_featmap_shape=self.backbone_featmap_shape , backbone_config=SCREAMING_SNAKE_CASE_ , )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Any:
'''simple docstring'''
lowerCamelCase_ = ViTHybridModel(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCamelCase_ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> int:
'''simple docstring'''
lowerCamelCase_ = self.type_sequence_label_size
lowerCamelCase_ = ViTHybridForImageClassification(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCamelCase_ = model(SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def UpperCamelCase( self ) -> str:
'''simple docstring'''
lowerCamelCase_ = self.prepare_config_and_inputs()
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = config_and_inputs
lowerCamelCase_ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = (ViTHybridModel, ViTHybridForImageClassification) if is_torch_available() else ()
SCREAMING_SNAKE_CASE_ = (
{'feature-extraction': ViTHybridModel, 'image-classification': ViTHybridForImageClassification}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = False
def UpperCamelCase( self ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase_ = ViTHybridModelTester(self )
lowerCamelCase_ = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , has_text_modality=SCREAMING_SNAKE_CASE_ , hidden_size=37 )
def UpperCamelCase( self ) -> Optional[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='ViT does not use inputs_embeds' )
def UpperCamelCase( self ) -> Any:
'''simple docstring'''
pass
def UpperCamelCase( self ) -> Dict:
'''simple docstring'''
lowerCamelCase_ ,lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase_ = model_class(SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowerCamelCase_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(SCREAMING_SNAKE_CASE_ , nn.Linear ) )
def UpperCamelCase( self ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase_ ,lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase_ = model_class(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase_ = [*signature.parameters.keys()]
lowerCamelCase_ = ['pixel_values']
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> List[str]:
'''simple docstring'''
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> List[str]:
'''simple docstring'''
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> Tuple:
'''simple docstring'''
lowerCamelCase_ ,lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase_ = _config_zero_init(SCREAMING_SNAKE_CASE_ )
for model_class in self.all_model_classes:
lowerCamelCase_ = model_class(config=SCREAMING_SNAKE_CASE_ )
# Skip the check for the backbone
for name, module in model.named_modules():
if module.__class__.__name__ == "ViTHybridPatchEmbeddings":
lowerCamelCase_ = [f'''{name}.{key}''' for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@slow
def UpperCamelCase( self ) -> List[str]:
'''simple docstring'''
for model_name in VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ = ViTHybridModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
def _UpperCamelCase ( ) -> str:
lowerCamelCase_ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCamelCase( self ) -> Any:
'''simple docstring'''
return (
ViTHybridImageProcessor.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def UpperCamelCase( self ) -> Dict:
'''simple docstring'''
lowerCamelCase_ = ViTHybridForImageClassification.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = self.default_image_processor
lowerCamelCase_ = prepare_img()
lowerCamelCase_ = image_processor(images=SCREAMING_SNAKE_CASE_ , return_tensors='pt' ).to(SCREAMING_SNAKE_CASE_ )
# forward pass
with torch.no_grad():
lowerCamelCase_ = model(**SCREAMING_SNAKE_CASE_ )
# verify the logits
lowerCamelCase_ = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = torch.tensor([-1.9_090, -0.4_993, -0.2_389] ).to(SCREAMING_SNAKE_CASE_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE_ , atol=1E-4 ) )
@slow
@require_accelerate
def UpperCamelCase( self ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase_ = ViTHybridImageProcessor.from_pretrained('google/vit-hybrid-base-bit-384' )
lowerCamelCase_ = ViTHybridForImageClassification.from_pretrained('google/vit-hybrid-base-bit-384' , device_map='auto' )
lowerCamelCase_ = prepare_img()
lowerCamelCase_ = image_processor(images=SCREAMING_SNAKE_CASE_ , return_tensors='pt' )
lowerCamelCase_ = model(**SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = outputs.logits
# model predicts one of the 1000 ImageNet classes
lowerCamelCase_ = logits.argmax(-1 ).item()
self.assertTrue(model.config.idalabel[predicted_class_idx] , 'tabby, tabby cat' )
| 384 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = 'bert-generation'
def __init__( self , SCREAMING_SNAKE_CASE_=50358 , SCREAMING_SNAKE_CASE_=1024 , SCREAMING_SNAKE_CASE_=24 , SCREAMING_SNAKE_CASE_=16 , SCREAMING_SNAKE_CASE_=4096 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=512 , SCREAMING_SNAKE_CASE_=0.02 , SCREAMING_SNAKE_CASE_=1E-12 , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=1 , SCREAMING_SNAKE_CASE_="absolute" , SCREAMING_SNAKE_CASE_=True , **SCREAMING_SNAKE_CASE_ , ) -> Any:
'''simple docstring'''
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE_ , bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = vocab_size
lowerCamelCase_ = hidden_size
lowerCamelCase_ = num_hidden_layers
lowerCamelCase_ = num_attention_heads
lowerCamelCase_ = hidden_act
lowerCamelCase_ = intermediate_size
lowerCamelCase_ = hidden_dropout_prob
lowerCamelCase_ = attention_probs_dropout_prob
lowerCamelCase_ = max_position_embeddings
lowerCamelCase_ = initializer_range
lowerCamelCase_ = layer_norm_eps
lowerCamelCase_ = position_embedding_type
lowerCamelCase_ = use_cache
| 384 | 1 |
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all feature extractors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...feature_extraction_utils import FeatureExtractionMixin
from ...utils import CONFIG_NAME, FEATURE_EXTRACTOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = OrderedDict(
[
('''audio-spectrogram-transformer''', '''ASTFeatureExtractor'''),
('''beit''', '''BeitFeatureExtractor'''),
('''chinese_clip''', '''ChineseCLIPFeatureExtractor'''),
('''clap''', '''ClapFeatureExtractor'''),
('''clip''', '''CLIPFeatureExtractor'''),
('''clipseg''', '''ViTFeatureExtractor'''),
('''conditional_detr''', '''ConditionalDetrFeatureExtractor'''),
('''convnext''', '''ConvNextFeatureExtractor'''),
('''cvt''', '''ConvNextFeatureExtractor'''),
('''data2vec-audio''', '''Wav2Vec2FeatureExtractor'''),
('''data2vec-vision''', '''BeitFeatureExtractor'''),
('''deformable_detr''', '''DeformableDetrFeatureExtractor'''),
('''deit''', '''DeiTFeatureExtractor'''),
('''detr''', '''DetrFeatureExtractor'''),
('''dinat''', '''ViTFeatureExtractor'''),
('''donut-swin''', '''DonutFeatureExtractor'''),
('''dpt''', '''DPTFeatureExtractor'''),
('''encodec''', '''EncodecFeatureExtractor'''),
('''flava''', '''FlavaFeatureExtractor'''),
('''glpn''', '''GLPNFeatureExtractor'''),
('''groupvit''', '''CLIPFeatureExtractor'''),
('''hubert''', '''Wav2Vec2FeatureExtractor'''),
('''imagegpt''', '''ImageGPTFeatureExtractor'''),
('''layoutlmv2''', '''LayoutLMv2FeatureExtractor'''),
('''layoutlmv3''', '''LayoutLMv3FeatureExtractor'''),
('''levit''', '''LevitFeatureExtractor'''),
('''maskformer''', '''MaskFormerFeatureExtractor'''),
('''mctct''', '''MCTCTFeatureExtractor'''),
('''mobilenet_v1''', '''MobileNetV1FeatureExtractor'''),
('''mobilenet_v2''', '''MobileNetV2FeatureExtractor'''),
('''mobilevit''', '''MobileViTFeatureExtractor'''),
('''nat''', '''ViTFeatureExtractor'''),
('''owlvit''', '''OwlViTFeatureExtractor'''),
('''perceiver''', '''PerceiverFeatureExtractor'''),
('''poolformer''', '''PoolFormerFeatureExtractor'''),
('''regnet''', '''ConvNextFeatureExtractor'''),
('''resnet''', '''ConvNextFeatureExtractor'''),
('''segformer''', '''SegformerFeatureExtractor'''),
('''sew''', '''Wav2Vec2FeatureExtractor'''),
('''sew-d''', '''Wav2Vec2FeatureExtractor'''),
('''speech_to_text''', '''Speech2TextFeatureExtractor'''),
('''speecht5''', '''SpeechT5FeatureExtractor'''),
('''swiftformer''', '''ViTFeatureExtractor'''),
('''swin''', '''ViTFeatureExtractor'''),
('''swinv2''', '''ViTFeatureExtractor'''),
('''table-transformer''', '''DetrFeatureExtractor'''),
('''timesformer''', '''VideoMAEFeatureExtractor'''),
('''tvlt''', '''TvltFeatureExtractor'''),
('''unispeech''', '''Wav2Vec2FeatureExtractor'''),
('''unispeech-sat''', '''Wav2Vec2FeatureExtractor'''),
('''van''', '''ConvNextFeatureExtractor'''),
('''videomae''', '''VideoMAEFeatureExtractor'''),
('''vilt''', '''ViltFeatureExtractor'''),
('''vit''', '''ViTFeatureExtractor'''),
('''vit_mae''', '''ViTFeatureExtractor'''),
('''vit_msn''', '''ViTFeatureExtractor'''),
('''wav2vec2''', '''Wav2Vec2FeatureExtractor'''),
('''wav2vec2-conformer''', '''Wav2Vec2FeatureExtractor'''),
('''wavlm''', '''Wav2Vec2FeatureExtractor'''),
('''whisper''', '''WhisperFeatureExtractor'''),
('''xclip''', '''CLIPFeatureExtractor'''),
('''yolos''', '''YolosFeatureExtractor'''),
]
)
lowerCAmelCase_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FEATURE_EXTRACTOR_MAPPING_NAMES)
def lowerCamelCase_ ( _UpperCamelCase ) -> Any:
"""simple docstring"""
for module_name, extractors in FEATURE_EXTRACTOR_MAPPING_NAMES.items():
if class_name in extractors:
snake_case_ : Union[str, Any] = model_type_to_module_name(_UpperCamelCase )
snake_case_ : Dict = importlib.import_module(f'''.{module_name}''' , '''transformers.models''' )
try:
return getattr(_UpperCamelCase , _UpperCamelCase )
except AttributeError:
continue
for _, extractor in FEATURE_EXTRACTOR_MAPPING._extra_content.items():
if getattr(_UpperCamelCase , '''__name__''' , _UpperCamelCase ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
snake_case_ : Union[str, Any] = importlib.import_module('''transformers''' )
if hasattr(_UpperCamelCase , _UpperCamelCase ):
return getattr(_UpperCamelCase , _UpperCamelCase )
return None
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase = None , _UpperCamelCase = False , _UpperCamelCase = False , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = False , **_UpperCamelCase , ) -> Optional[Any]:
"""simple docstring"""
snake_case_ : Optional[Any] = get_file_from_repo(
_UpperCamelCase , _UpperCamelCase , cache_dir=_UpperCamelCase , force_download=_UpperCamelCase , resume_download=_UpperCamelCase , proxies=_UpperCamelCase , use_auth_token=_UpperCamelCase , revision=_UpperCamelCase , local_files_only=_UpperCamelCase , )
if resolved_config_file is None:
logger.info(
'''Could not locate the feature extractor configuration file, will try to use the model config instead.''' )
return {}
with open(_UpperCamelCase , encoding='''utf-8''' ) as reader:
return json.load(_UpperCamelCase )
class __lowerCAmelCase :
def __init__(self ) -> Optional[Any]:
'''simple docstring'''
raise EnvironmentError(
'''AutoFeatureExtractor is designed to be instantiated '''
'''using the `AutoFeatureExtractor.from_pretrained(pretrained_model_name_or_path)` method.''' )
@classmethod
@replace_list_option_in_docstrings(__magic_name__ )
def lowerCamelCase (cls , __magic_name__ , **__magic_name__ ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Dict = kwargs.pop('''config''' , __magic_name__ )
snake_case_ : Union[str, Any] = kwargs.pop('''trust_remote_code''' , __magic_name__ )
snake_case_ : Tuple = True
snake_case_ , snake_case_ : Tuple = FeatureExtractionMixin.get_feature_extractor_dict(__magic_name__ , **__magic_name__ )
snake_case_ : int = config_dict.get('''feature_extractor_type''' , __magic_name__ )
snake_case_ : str = None
if "AutoFeatureExtractor" in config_dict.get('''auto_map''' , {} ):
snake_case_ : Any = config_dict['''auto_map''']['''AutoFeatureExtractor''']
# If we don't find the feature extractor class in the feature extractor config, let's try the model config.
if feature_extractor_class is None and feature_extractor_auto_map is None:
if not isinstance(__magic_name__ , __magic_name__ ):
snake_case_ : Any = AutoConfig.from_pretrained(__magic_name__ , **__magic_name__ )
# It could be in `config.feature_extractor_type``
snake_case_ : List[Any] = getattr(__magic_name__ , '''feature_extractor_type''' , __magic_name__ )
if hasattr(__magic_name__ , '''auto_map''' ) and "AutoFeatureExtractor" in config.auto_map:
snake_case_ : List[str] = config.auto_map['''AutoFeatureExtractor''']
if feature_extractor_class is not None:
snake_case_ : List[Any] = feature_extractor_class_from_name(__magic_name__ )
snake_case_ : Optional[int] = feature_extractor_auto_map is not None
snake_case_ : Tuple = feature_extractor_class is not None or type(__magic_name__ ) in FEATURE_EXTRACTOR_MAPPING
snake_case_ : Optional[int] = resolve_trust_remote_code(
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
if has_remote_code and trust_remote_code:
snake_case_ : int = get_class_from_dynamic_module(
__magic_name__ , __magic_name__ , **__magic_name__ )
snake_case_ : int = kwargs.pop('''code_revision''' , __magic_name__ )
if os.path.isdir(__magic_name__ ):
feature_extractor_class.register_for_auto_class()
return feature_extractor_class.from_dict(__magic_name__ , **__magic_name__ )
elif feature_extractor_class is not None:
return feature_extractor_class.from_dict(__magic_name__ , **__magic_name__ )
# Last try: we use the FEATURE_EXTRACTOR_MAPPING.
elif type(__magic_name__ ) in FEATURE_EXTRACTOR_MAPPING:
snake_case_ : Optional[Any] = FEATURE_EXTRACTOR_MAPPING[type(__magic_name__ )]
return feature_extractor_class.from_dict(__magic_name__ , **__magic_name__ )
raise ValueError(
F'''Unrecognized feature extractor in {pretrained_model_name_or_path}. Should have a '''
F'''`feature_extractor_type` key in its {FEATURE_EXTRACTOR_NAME} of {CONFIG_NAME}, or one of the following '''
F'''`model_type` keys in its {CONFIG_NAME}: {", ".join(c for c in FEATURE_EXTRACTOR_MAPPING_NAMES.keys() )}''' )
@staticmethod
def lowerCamelCase (__magic_name__ , __magic_name__ ) -> Union[str, Any]:
'''simple docstring'''
FEATURE_EXTRACTOR_MAPPING.register(__magic_name__ , __magic_name__ )
| 60 |
'''simple docstring'''
import argparse
import hashlib # hashlib is only used inside the Test class
import struct
class SCREAMING_SNAKE_CASE:
"""simple docstring"""
def __init__( self : List[str] , __snake_case : Any ) -> List[Any]:
UpperCAmelCase : Any = data
UpperCAmelCase : Optional[Any] = [0X6745_2301, 0Xefcd_ab89, 0X98ba_dcfe, 0X1032_5476, 0Xc3d2_e1f0]
@staticmethod
def A ( __snake_case : List[str] , __snake_case : Any ) -> int:
return ((n << b) | (n >> (32 - b))) & 0Xffff_ffff
def A ( self : Optional[int] ) -> Tuple:
UpperCAmelCase : Dict = b'''\x80''' + b'''\x00''' * (63 - (len(self.data ) + 8) % 64)
UpperCAmelCase : int = self.data + padding + struct.pack('''>Q''' , 8 * len(self.data ) )
return padded_data
def A ( self : str ) -> List[Any]:
return [
self.padded_data[i : i + 64] for i in range(0 , len(self.padded_data ) , 64 )
]
def A ( self : List[str] , __snake_case : int ) -> Optional[Any]:
UpperCAmelCase : List[Any] = list(struct.unpack('''>16L''' , __snake_case ) ) + [0] * 64
for i in range(16 , 80 ):
UpperCAmelCase : Any = self.rotate((w[i - 3] ^ w[i - 8] ^ w[i - 14] ^ w[i - 16]) , 1 )
return w
def A ( self : Optional[Any] ) -> Dict:
UpperCAmelCase : Tuple = self.padding()
UpperCAmelCase : Optional[Any] = self.split_blocks()
for block in self.blocks:
UpperCAmelCase : str = self.expand_block(__snake_case )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Optional[int] = self.h
for i in range(0 , 80 ):
if 0 <= i < 20:
UpperCAmelCase : Optional[int] = (b & c) | ((~b) & d)
UpperCAmelCase : List[str] = 0X5a82_7999
elif 20 <= i < 40:
UpperCAmelCase : List[str] = b ^ c ^ d
UpperCAmelCase : List[Any] = 0X6ed9_eba1
elif 40 <= i < 60:
UpperCAmelCase : Union[str, Any] = (b & c) | (b & d) | (c & d)
UpperCAmelCase : Dict = 0X8f1b_bcdc
elif 60 <= i < 80:
UpperCAmelCase : Optional[int] = b ^ c ^ d
UpperCAmelCase : Optional[int] = 0Xca62_c1d6
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Union[str, Any] = (
self.rotate(__snake_case , 5 ) + f + e + k + expanded_block[i] & 0Xffff_ffff,
a,
self.rotate(__snake_case , 30 ),
c,
d,
)
UpperCAmelCase : Tuple = (
self.h[0] + a & 0Xffff_ffff,
self.h[1] + b & 0Xffff_ffff,
self.h[2] + c & 0Xffff_ffff,
self.h[3] + d & 0Xffff_ffff,
self.h[4] + e & 0Xffff_ffff,
)
return ("{:08x}" * 5).format(*self.h )
def snake_case_ ( ) -> List[str]:
UpperCAmelCase : int = b'''Test String'''
assert SHAaHash(_lowerCAmelCase ).final_hash() == hashlib.shaa(_lowerCAmelCase ).hexdigest() # noqa: S324
def snake_case_ ( ) -> List[str]:
UpperCAmelCase : Tuple = argparse.ArgumentParser(description='''Process some strings or files''' )
parser.add_argument(
'''--string''' , dest='''input_string''' , default='''Hello World!! Welcome to Cryptography''' , help='''Hash the string''' , )
parser.add_argument('''--file''' , dest='''input_file''' , help='''Hash contents of a file''' )
UpperCAmelCase : List[str] = parser.parse_args()
UpperCAmelCase : Dict = args.input_string
# In any case hash input should be a bytestring
if args.input_file:
with open(args.input_file , '''rb''' ) as f:
UpperCAmelCase : Dict = f.read()
else:
UpperCAmelCase : Optional[Any] = bytes(_lowerCAmelCase , '''utf-8''' )
print(SHAaHash(_lowerCAmelCase ).final_hash() )
if __name__ == "__main__":
main()
import doctest
doctest.testmod()
| 127 | 0 |
"""simple docstring"""
import argparse
import collections
import os
import re
import tempfile
import pandas as pd
from datasets import Dataset
from huggingface_hub import hf_hub_download, upload_folder
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/update_metadata.py
UpperCAmelCase_ : Union[str, Any] = '''src/transformers'''
# This is to make sure the transformers module imported is the one in the repo.
UpperCAmelCase_ : str = direct_transformers_import(TRANSFORMERS_PATH)
# Regexes that match TF/Flax/PT model names.
UpperCAmelCase_ : Optional[int] = re.compile(R'''TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)''')
UpperCAmelCase_ : Tuple = re.compile(R'''Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)''')
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
UpperCAmelCase_ : Any = re.compile(R'''(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)''')
# Fill this with tuples (pipeline_tag, model_mapping, auto_model)
UpperCAmelCase_ : Any = [
('''pretraining''', '''MODEL_FOR_PRETRAINING_MAPPING_NAMES''', '''AutoModelForPreTraining'''),
('''feature-extraction''', '''MODEL_MAPPING_NAMES''', '''AutoModel'''),
('''audio-classification''', '''MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES''', '''AutoModelForAudioClassification'''),
('''text-generation''', '''MODEL_FOR_CAUSAL_LM_MAPPING_NAMES''', '''AutoModelForCausalLM'''),
('''automatic-speech-recognition''', '''MODEL_FOR_CTC_MAPPING_NAMES''', '''AutoModelForCTC'''),
('''image-classification''', '''MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES''', '''AutoModelForImageClassification'''),
('''image-segmentation''', '''MODEL_FOR_IMAGE_SEGMENTATION_MAPPING_NAMES''', '''AutoModelForImageSegmentation'''),
('''fill-mask''', '''MODEL_FOR_MASKED_LM_MAPPING_NAMES''', '''AutoModelForMaskedLM'''),
('''object-detection''', '''MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES''', '''AutoModelForObjectDetection'''),
(
'''zero-shot-object-detection''',
'''MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING_NAMES''',
'''AutoModelForZeroShotObjectDetection''',
),
('''question-answering''', '''MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES''', '''AutoModelForQuestionAnswering'''),
('''text2text-generation''', '''MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES''', '''AutoModelForSeq2SeqLM'''),
('''text-classification''', '''MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES''', '''AutoModelForSequenceClassification'''),
('''automatic-speech-recognition''', '''MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES''', '''AutoModelForSpeechSeq2Seq'''),
(
'''table-question-answering''',
'''MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING_NAMES''',
'''AutoModelForTableQuestionAnswering''',
),
('''token-classification''', '''MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES''', '''AutoModelForTokenClassification'''),
('''multiple-choice''', '''MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES''', '''AutoModelForMultipleChoice'''),
(
'''next-sentence-prediction''',
'''MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES''',
'''AutoModelForNextSentencePrediction''',
),
(
'''audio-frame-classification''',
'''MODEL_FOR_AUDIO_FRAME_CLASSIFICATION_MAPPING_NAMES''',
'''AutoModelForAudioFrameClassification''',
),
('''audio-xvector''', '''MODEL_FOR_AUDIO_XVECTOR_MAPPING_NAMES''', '''AutoModelForAudioXVector'''),
(
'''document-question-answering''',
'''MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES''',
'''AutoModelForDocumentQuestionAnswering''',
),
(
'''visual-question-answering''',
'''MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING_NAMES''',
'''AutoModelForVisualQuestionAnswering''',
),
('''image-to-text''', '''MODEL_FOR_FOR_VISION_2_SEQ_MAPPING_NAMES''', '''AutoModelForVision2Seq'''),
(
'''zero-shot-image-classification''',
'''MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING_NAMES''',
'''AutoModelForZeroShotImageClassification''',
),
('''depth-estimation''', '''MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES''', '''AutoModelForDepthEstimation'''),
('''video-classification''', '''MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES''', '''AutoModelForVideoClassification'''),
('''mask-generation''', '''MODEL_FOR_MASK_GENERATION_MAPPING_NAMES''', '''AutoModelForMaskGeneration'''),
]
def _lowerCAmelCase(a : Any ) -> Any:
_SCREAMING_SNAKE_CASE =re.finditer('''.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)''' , a )
return [m.group(0 ) for m in matches]
def _lowerCAmelCase() -> Any:
_SCREAMING_SNAKE_CASE =transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
_SCREAMING_SNAKE_CASE ={
config.replace('''Config''' , '''''' ): model_type for model_type, config in config_maping_names.items()
}
# Dictionaries flagging if each model prefix has a backend in PT/TF/Flax.
_SCREAMING_SNAKE_CASE =collections.defaultdict(a )
_SCREAMING_SNAKE_CASE =collections.defaultdict(a )
_SCREAMING_SNAKE_CASE =collections.defaultdict(a )
# Let's lookup through all transformers object (once) and find if models are supported by a given backend.
for attr_name in dir(a ):
_SCREAMING_SNAKE_CASE =None
if _re_tf_models.match(a ) is not None:
_SCREAMING_SNAKE_CASE =tf_models
_SCREAMING_SNAKE_CASE =_re_tf_models.match(a ).groups()[0]
elif _re_flax_models.match(a ) is not None:
_SCREAMING_SNAKE_CASE =flax_models
_SCREAMING_SNAKE_CASE =_re_flax_models.match(a ).groups()[0]
elif _re_pt_models.match(a ) is not None:
_SCREAMING_SNAKE_CASE =pt_models
_SCREAMING_SNAKE_CASE =_re_pt_models.match(a ).groups()[0]
if lookup_dict is not None:
while len(a ) > 0:
if attr_name in model_prefix_to_model_type:
_SCREAMING_SNAKE_CASE =True
break
# Try again after removing the last word in the name
_SCREAMING_SNAKE_CASE =''''''.join(camel_case_split(a )[:-1] )
_SCREAMING_SNAKE_CASE =set(list(pt_models.keys() ) + list(tf_models.keys() ) + list(flax_models.keys() ) )
_SCREAMING_SNAKE_CASE =list(a )
all_models.sort()
_SCREAMING_SNAKE_CASE ={'''model_type''': all_models}
_SCREAMING_SNAKE_CASE =[pt_models[t] for t in all_models]
_SCREAMING_SNAKE_CASE =[tf_models[t] for t in all_models]
_SCREAMING_SNAKE_CASE =[flax_models[t] for t in all_models]
# Now let's use the auto-mapping names to make sure
_SCREAMING_SNAKE_CASE ={}
for t in all_models:
if t in transformers_module.models.auto.processing_auto.PROCESSOR_MAPPING_NAMES:
_SCREAMING_SNAKE_CASE ='''AutoProcessor'''
elif t in transformers_module.models.auto.tokenization_auto.TOKENIZER_MAPPING_NAMES:
_SCREAMING_SNAKE_CASE ='''AutoTokenizer'''
elif t in transformers_module.models.auto.feature_extraction_auto.FEATURE_EXTRACTOR_MAPPING_NAMES:
_SCREAMING_SNAKE_CASE ='''AutoFeatureExtractor'''
else:
# Default to AutoTokenizer if a model has nothing, for backward compatibility.
_SCREAMING_SNAKE_CASE ='''AutoTokenizer'''
_SCREAMING_SNAKE_CASE =[processors[t] for t in all_models]
return pd.DataFrame(a )
def _lowerCAmelCase(a : Dict ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE =[
transformers_module.models.auto.modeling_auto,
transformers_module.models.auto.modeling_tf_auto,
transformers_module.models.auto.modeling_flax_auto,
]
for pipeline_tag, model_mapping, auto_class in PIPELINE_TAGS_AND_AUTO_MODELS:
_SCREAMING_SNAKE_CASE =[model_mapping, f"""TF_{model_mapping}""", f"""FLAX_{model_mapping}"""]
_SCREAMING_SNAKE_CASE =[auto_class, f"""TF_{auto_class}""", f"""Flax_{auto_class}"""]
# Loop through all three frameworks
for module, cls, mapping in zip(a , a , a ):
# The type of pipeline may not exist in this framework
if not hasattr(a , a ):
continue
# First extract all model_names
_SCREAMING_SNAKE_CASE =[]
for name in getattr(a , a ).values():
if isinstance(a , a ):
model_names.append(a )
else:
model_names.extend(list(a ) )
# Add pipeline tag and auto model class for those models
table.update({model_name: (pipeline_tag, cls) for model_name in model_names} )
return table
def _lowerCAmelCase(a : Optional[int] , a : int ) -> Tuple:
_SCREAMING_SNAKE_CASE =get_frameworks_table()
_SCREAMING_SNAKE_CASE =Dataset.from_pandas(a )
_SCREAMING_SNAKE_CASE =hf_hub_download(
'''huggingface/transformers-metadata''' , '''pipeline_tags.json''' , repo_type='''dataset''' , token=a )
_SCREAMING_SNAKE_CASE =Dataset.from_json(a )
_SCREAMING_SNAKE_CASE ={
tags_dataset[i]['''model_class''']: (tags_dataset[i]['''pipeline_tag'''], tags_dataset[i]['''auto_class'''])
for i in range(len(a ) )
}
_SCREAMING_SNAKE_CASE =update_pipeline_and_auto_class_table(a )
# Sort the model classes to avoid some nondeterministic updates to create false update commits.
_SCREAMING_SNAKE_CASE =sorted(table.keys() )
_SCREAMING_SNAKE_CASE =pd.DataFrame(
{
'''model_class''': model_classes,
'''pipeline_tag''': [table[m][0] for m in model_classes],
'''auto_class''': [table[m][1] for m in model_classes],
} )
_SCREAMING_SNAKE_CASE =Dataset.from_pandas(a )
with tempfile.TemporaryDirectory() as tmp_dir:
frameworks_dataset.to_json(os.path.join(a , '''frameworks.json''' ) )
tags_dataset.to_json(os.path.join(a , '''pipeline_tags.json''' ) )
if commit_sha is not None:
_SCREAMING_SNAKE_CASE =(
f"""Update with commit {commit_sha}\n\nSee: """
f"""https://github.com/huggingface/transformers/commit/{commit_sha}"""
)
else:
_SCREAMING_SNAKE_CASE ='''Update'''
upload_folder(
repo_id='''huggingface/transformers-metadata''' , folder_path=a , repo_type='''dataset''' , token=a , commit_message=a , )
def _lowerCAmelCase() -> int:
_SCREAMING_SNAKE_CASE ={tag: cls for tag, _, cls in PIPELINE_TAGS_AND_AUTO_MODELS}
_SCREAMING_SNAKE_CASE =transformers_module.pipelines.SUPPORTED_TASKS
_SCREAMING_SNAKE_CASE =[]
for key in pipeline_tasks:
if key not in in_table:
_SCREAMING_SNAKE_CASE =pipeline_tasks[key]['''pt''']
if isinstance(a , (list, tuple) ):
_SCREAMING_SNAKE_CASE =model[0]
_SCREAMING_SNAKE_CASE =model.__name__
if model not in in_table.values():
missing.append(a )
if len(a ) > 0:
_SCREAMING_SNAKE_CASE =''', '''.join(a )
raise ValueError(
'''The following pipeline tags are not present in the `PIPELINE_TAGS_AND_AUTO_MODELS` constant inside '''
f"""`utils/update_metadata.py`: {msg}. Please add them!""" )
if __name__ == "__main__":
UpperCAmelCase_ : Any = argparse.ArgumentParser()
parser.add_argument('''--token''', type=str, help='''The token to use to push to the transformers-metadata dataset.''')
parser.add_argument('''--commit_sha''', type=str, help='''The sha of the commit going with this update.''')
parser.add_argument('''--check-only''', action='''store_true''', help='''Activate to just check all pipelines are present.''')
UpperCAmelCase_ : Any = parser.parse_args()
if args.check_only:
check_pipeline_tags()
else:
update_metadata(args.token, args.commit_sha)
| 165 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
UpperCAmelCase_ : int = {
'''configuration_wav2vec2''': ['''WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Wav2Vec2Config'''],
'''feature_extraction_wav2vec2''': ['''Wav2Vec2FeatureExtractor'''],
'''processing_wav2vec2''': ['''Wav2Vec2Processor'''],
'''tokenization_wav2vec2''': ['''Wav2Vec2CTCTokenizer''', '''Wav2Vec2Tokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Dict = [
'''WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Wav2Vec2ForAudioFrameClassification''',
'''Wav2Vec2ForCTC''',
'''Wav2Vec2ForMaskedLM''',
'''Wav2Vec2ForPreTraining''',
'''Wav2Vec2ForSequenceClassification''',
'''Wav2Vec2ForXVector''',
'''Wav2Vec2Model''',
'''Wav2Vec2PreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : int = [
'''TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFWav2Vec2ForCTC''',
'''TFWav2Vec2Model''',
'''TFWav2Vec2PreTrainedModel''',
'''TFWav2Vec2ForSequenceClassification''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : str = [
'''FlaxWav2Vec2ForCTC''',
'''FlaxWav2Vec2ForPreTraining''',
'''FlaxWav2Vec2Model''',
'''FlaxWav2Vec2PreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_wavaveca import WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, WavaVecaConfig
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .processing_wavaveca import WavaVecaProcessor
from .tokenization_wavaveca import WavaVecaCTCTokenizer, WavaVecaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavaveca import (
WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
WavaVecaForAudioFrameClassification,
WavaVecaForCTC,
WavaVecaForMaskedLM,
WavaVecaForPreTraining,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
WavaVecaModel,
WavaVecaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWavaVecaForCTC,
TFWavaVecaForSequenceClassification,
TFWavaVecaModel,
TFWavaVecaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
FlaxWavaVecaForCTC,
FlaxWavaVecaForPreTraining,
FlaxWavaVecaModel,
FlaxWavaVecaPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 165 | 1 |
from __future__ import annotations
import unittest
from transformers import is_tf_available, is_torch_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, is_pt_tf_cross_test, slow
if is_tf_available():
from transformers import (
AutoConfig,
BertConfig,
GPTaConfig,
TaConfig,
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
if is_torch_available():
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelWithLMHead,
BertForMaskedLM,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertModel,
GPTaLMHeadModel,
RobertaForMaskedLM,
TaForConditionalGeneration,
)
@is_pt_tf_cross_test
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCamelCase__ ( self :str):
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
_lowercase =AutoConfig.from_pretrained(__UpperCamelCase)
self.assertIsNotNone(__UpperCamelCase)
self.assertIsInstance(__UpperCamelCase, __UpperCamelCase)
_lowercase =TFAutoModel.from_pretrained(__UpperCamelCase, from_pt=__UpperCamelCase)
self.assertIsNotNone(__UpperCamelCase)
self.assertIsInstance(__UpperCamelCase, __UpperCamelCase)
_lowercase =AutoModel.from_pretrained(__UpperCamelCase, from_tf=__UpperCamelCase)
self.assertIsNotNone(__UpperCamelCase)
self.assertIsInstance(__UpperCamelCase, __UpperCamelCase)
@slow
def UpperCamelCase__ ( self :Tuple):
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
_lowercase =AutoConfig.from_pretrained(__UpperCamelCase)
self.assertIsNotNone(__UpperCamelCase)
self.assertIsInstance(__UpperCamelCase, __UpperCamelCase)
_lowercase =TFAutoModelForPreTraining.from_pretrained(__UpperCamelCase, from_pt=__UpperCamelCase)
self.assertIsNotNone(__UpperCamelCase)
self.assertIsInstance(__UpperCamelCase, __UpperCamelCase)
_lowercase =AutoModelForPreTraining.from_pretrained(__UpperCamelCase, from_tf=__UpperCamelCase)
self.assertIsNotNone(__UpperCamelCase)
self.assertIsInstance(__UpperCamelCase, __UpperCamelCase)
@slow
def UpperCamelCase__ ( self :Union[str, Any]):
"""simple docstring"""
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowercase =AutoConfig.from_pretrained(__UpperCamelCase)
self.assertIsNotNone(__UpperCamelCase)
self.assertIsInstance(__UpperCamelCase, __UpperCamelCase)
_lowercase =TFAutoModelForCausalLM.from_pretrained(__UpperCamelCase, from_pt=__UpperCamelCase)
_lowercase =TFAutoModelForCausalLM.from_pretrained(
__UpperCamelCase, output_loading_info=__UpperCamelCase, from_pt=__UpperCamelCase)
self.assertIsNotNone(__UpperCamelCase)
self.assertIsInstance(__UpperCamelCase, __UpperCamelCase)
_lowercase =AutoModelForCausalLM.from_pretrained(__UpperCamelCase, from_tf=__UpperCamelCase)
_lowercase =AutoModelForCausalLM.from_pretrained(
__UpperCamelCase, output_loading_info=__UpperCamelCase, from_tf=__UpperCamelCase)
self.assertIsNotNone(__UpperCamelCase)
self.assertIsInstance(__UpperCamelCase, __UpperCamelCase)
@slow
def UpperCamelCase__ ( self :Union[str, Any]):
"""simple docstring"""
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowercase =AutoConfig.from_pretrained(__UpperCamelCase)
self.assertIsNotNone(__UpperCamelCase)
self.assertIsInstance(__UpperCamelCase, __UpperCamelCase)
_lowercase =TFAutoModelWithLMHead.from_pretrained(__UpperCamelCase, from_pt=__UpperCamelCase)
self.assertIsNotNone(__UpperCamelCase)
self.assertIsInstance(__UpperCamelCase, __UpperCamelCase)
_lowercase =AutoModelWithLMHead.from_pretrained(__UpperCamelCase, from_tf=__UpperCamelCase)
self.assertIsNotNone(__UpperCamelCase)
self.assertIsInstance(__UpperCamelCase, __UpperCamelCase)
@slow
def UpperCamelCase__ ( self :Any):
"""simple docstring"""
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowercase =AutoConfig.from_pretrained(__UpperCamelCase)
self.assertIsNotNone(__UpperCamelCase)
self.assertIsInstance(__UpperCamelCase, __UpperCamelCase)
_lowercase =TFAutoModelForMaskedLM.from_pretrained(__UpperCamelCase, from_pt=__UpperCamelCase)
_lowercase =TFAutoModelForMaskedLM.from_pretrained(
__UpperCamelCase, output_loading_info=__UpperCamelCase, from_pt=__UpperCamelCase)
self.assertIsNotNone(__UpperCamelCase)
self.assertIsInstance(__UpperCamelCase, __UpperCamelCase)
_lowercase =AutoModelForMaskedLM.from_pretrained(__UpperCamelCase, from_tf=__UpperCamelCase)
_lowercase =AutoModelForMaskedLM.from_pretrained(
__UpperCamelCase, output_loading_info=__UpperCamelCase, from_tf=__UpperCamelCase)
self.assertIsNotNone(__UpperCamelCase)
self.assertIsInstance(__UpperCamelCase, __UpperCamelCase)
@slow
def UpperCamelCase__ ( self :Any):
"""simple docstring"""
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowercase =AutoConfig.from_pretrained(__UpperCamelCase)
self.assertIsNotNone(__UpperCamelCase)
self.assertIsInstance(__UpperCamelCase, __UpperCamelCase)
_lowercase =TFAutoModelForSeqaSeqLM.from_pretrained(__UpperCamelCase, from_pt=__UpperCamelCase)
_lowercase =TFAutoModelForSeqaSeqLM.from_pretrained(
__UpperCamelCase, output_loading_info=__UpperCamelCase, from_pt=__UpperCamelCase)
self.assertIsNotNone(__UpperCamelCase)
self.assertIsInstance(__UpperCamelCase, __UpperCamelCase)
_lowercase =AutoModelForSeqaSeqLM.from_pretrained(__UpperCamelCase, from_tf=__UpperCamelCase)
_lowercase =AutoModelForSeqaSeqLM.from_pretrained(
__UpperCamelCase, output_loading_info=__UpperCamelCase, from_tf=__UpperCamelCase)
self.assertIsNotNone(__UpperCamelCase)
self.assertIsInstance(__UpperCamelCase, __UpperCamelCase)
@slow
def UpperCamelCase__ ( self :List[str]):
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
_lowercase =AutoConfig.from_pretrained(__UpperCamelCase)
self.assertIsNotNone(__UpperCamelCase)
self.assertIsInstance(__UpperCamelCase, __UpperCamelCase)
_lowercase =TFAutoModelForSequenceClassification.from_pretrained(__UpperCamelCase, from_pt=__UpperCamelCase)
self.assertIsNotNone(__UpperCamelCase)
self.assertIsInstance(__UpperCamelCase, __UpperCamelCase)
_lowercase =AutoModelForSequenceClassification.from_pretrained(__UpperCamelCase, from_tf=__UpperCamelCase)
self.assertIsNotNone(__UpperCamelCase)
self.assertIsInstance(__UpperCamelCase, __UpperCamelCase)
@slow
def UpperCamelCase__ ( self :Optional[int]):
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
_lowercase =AutoConfig.from_pretrained(__UpperCamelCase)
self.assertIsNotNone(__UpperCamelCase)
self.assertIsInstance(__UpperCamelCase, __UpperCamelCase)
_lowercase =TFAutoModelForQuestionAnswering.from_pretrained(__UpperCamelCase, from_pt=__UpperCamelCase)
self.assertIsNotNone(__UpperCamelCase)
self.assertIsInstance(__UpperCamelCase, __UpperCamelCase)
_lowercase =AutoModelForQuestionAnswering.from_pretrained(__UpperCamelCase, from_tf=__UpperCamelCase)
self.assertIsNotNone(__UpperCamelCase)
self.assertIsInstance(__UpperCamelCase, __UpperCamelCase)
def UpperCamelCase__ ( self :List[str]):
"""simple docstring"""
_lowercase =TFAutoModelWithLMHead.from_pretrained(__UpperCamelCase, from_pt=__UpperCamelCase)
self.assertIsInstance(__UpperCamelCase, __UpperCamelCase)
self.assertEqual(model.num_parameters(), 1_4410)
self.assertEqual(model.num_parameters(only_trainable=__UpperCamelCase), 1_4410)
_lowercase =AutoModelWithLMHead.from_pretrained(__UpperCamelCase, from_tf=__UpperCamelCase)
self.assertIsInstance(__UpperCamelCase, __UpperCamelCase)
self.assertEqual(model.num_parameters(), 1_4410)
self.assertEqual(model.num_parameters(only_trainable=__UpperCamelCase), 1_4410)
def UpperCamelCase__ ( self :Dict):
"""simple docstring"""
_lowercase =TFAutoModelWithLMHead.from_pretrained(__UpperCamelCase, from_pt=__UpperCamelCase)
self.assertIsInstance(__UpperCamelCase, __UpperCamelCase)
self.assertEqual(model.num_parameters(), 1_4410)
self.assertEqual(model.num_parameters(only_trainable=__UpperCamelCase), 1_4410)
_lowercase =AutoModelWithLMHead.from_pretrained(__UpperCamelCase, from_tf=__UpperCamelCase)
self.assertIsInstance(__UpperCamelCase, __UpperCamelCase)
self.assertEqual(model.num_parameters(), 1_4410)
self.assertEqual(model.num_parameters(only_trainable=__UpperCamelCase), 1_4410)
| 181 |
def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> list[int]:
if length <= 0 or not isinstance(snake_case_, snake_case_ ):
raise ValueError('''Length must be a positive integer.''' )
return [n * (2 * n - 1) for n in range(snake_case_ )]
if __name__ == "__main__":
print(hexagonal_numbers(length=5))
print(hexagonal_numbers(length=10))
| 416 | 0 |
"""simple docstring"""
from __future__ import annotations
import string
from itertools import cycle, product
from pathlib import Path
lowerCamelCase : str = (
string.ascii_letters + string.digits + string.punctuation + string.whitespace
)
lowerCamelCase : list[int] = [ord(letter) for letter in string.ascii_lowercase]
lowerCamelCase : set[int] = {ord(char) for char in VALID_CHARS}
lowerCamelCase : list[str] = ["the", "be", "to", "of", "and", "in", "that", "have"]
def _lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = ''''''
_SCREAMING_SNAKE_CASE = 42
_SCREAMING_SNAKE_CASE = 42
_SCREAMING_SNAKE_CASE = 42
for keychar, cipherchar in zip(cycle(UpperCamelCase__ ) , UpperCamelCase__ ):
_SCREAMING_SNAKE_CASE = cipherchar ^ keychar
if decodedchar not in VALID_INTS:
return None
decoded += chr(UpperCamelCase__ )
return decoded
def _lowerCAmelCase ( UpperCamelCase__ ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = []
for key in product(UpperCamelCase__ , repeat=3 ):
_SCREAMING_SNAKE_CASE = try_key(UpperCamelCase__ , UpperCamelCase__ )
if encoded is not None:
possibles.append(UpperCamelCase__ )
return possibles
def _lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
return [possible for possible in possibles if common_word in possible.lower()]
def _lowerCAmelCase ( UpperCamelCase__ = "p059_cipher.txt" ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = 42
_SCREAMING_SNAKE_CASE = 42
_SCREAMING_SNAKE_CASE = 42
_SCREAMING_SNAKE_CASE = 42
_SCREAMING_SNAKE_CASE = Path(UpperCamelCase__ ).parent.joinpath(UpperCamelCase__ ).read_text(encoding='''utf-8''' )
_SCREAMING_SNAKE_CASE = [int(UpperCamelCase__ ) for number in data.strip().split(''',''' )]
_SCREAMING_SNAKE_CASE = filter_valid_chars(UpperCamelCase__ )
for common_word in COMMON_WORDS:
_SCREAMING_SNAKE_CASE = filter_common_word(UpperCamelCase__ , UpperCamelCase__ )
if len(UpperCamelCase__ ) == 1:
break
_SCREAMING_SNAKE_CASE = possibles[0]
return sum(ord(UpperCamelCase__ ) for char in decoded_text )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 703 |
"""simple docstring"""
def A__ ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
if mass < 0:
raise ValueError('''The mass of a body cannot be negative''' )
return 0.5 * mass * abs(UpperCamelCase__ ) * abs(UpperCamelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 168 | 0 |
'''simple docstring'''
import doctest
import glob
import importlib
import inspect
import os
import re
from contextlib import contextmanager
from functools import wraps
from unittest.mock import patch
import numpy as np
import pytest
from absl.testing import parameterized
import datasets
from datasets import load_metric
from .utils import for_all_test_methods, local, slow
# mark all tests as integration
UpperCamelCase =pytest.mark.integration
UpperCamelCase ={"comet"}
UpperCamelCase =importlib.util.find_spec("fairseq") is not None
UpperCamelCase ={"code_eval"}
UpperCamelCase =os.name == "nt"
UpperCamelCase ={"bertscore", "frugalscore", "perplexity"}
UpperCamelCase =importlib.util.find_spec("transformers") is not None
def snake_case ( a_ : Tuple ) -> Dict:
"""simple docstring"""
@wraps(a_ )
def wrapper(self : Dict , a_ : Union[str, Any] ):
if not _has_fairseq and metric_name in REQUIRE_FAIRSEQ:
self.skipTest("""\"test requires Fairseq\"""" )
else:
test_case(self , a_ )
return wrapper
def snake_case ( a_ : List[Any] ) -> Optional[Any]:
"""simple docstring"""
@wraps(a_ )
def wrapper(self : Any , a_ : str ):
if not _has_transformers and metric_name in REQUIRE_TRANSFORMERS:
self.skipTest("""\"test requires transformers\"""" )
else:
test_case(self , a_ )
return wrapper
def snake_case ( a_ : List[str] ) -> Optional[Any]:
"""simple docstring"""
@wraps(a_ )
def wrapper(self : List[Any] , a_ : Tuple ):
if _on_windows and metric_name in UNSUPPORTED_ON_WINDOWS:
self.skipTest("""\"test not supported on Windows\"""" )
else:
test_case(self , a_ )
return wrapper
def snake_case ( ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase_ : Optional[int] = [metric_dir.split(os.sep )[-2] for metric_dir in glob.glob("""./metrics/*/""" )]
return [{"testcase_name": x, "metric_name": x} for x in metrics if x != "gleu"] # gleu is unfinished
@parameterized.named_parameters(get_local_metric_names() )
@for_all_test_methods(
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
@local
class A ( parameterized.TestCase ):
"""simple docstring"""
__a : Dict = {}
__a : Tuple = None
@pytest.mark.filterwarnings("""ignore:metric_module_factory is deprecated:FutureWarning""" )
@pytest.mark.filterwarnings("""ignore:load_metric is deprecated:FutureWarning""" )
def _UpperCAmelCase ( self , __lowerCAmelCase ):
UpperCamelCase_ : List[str] = """[...]"""
UpperCamelCase_ : int = importlib.import_module(
datasets.load.metric_module_factory(os.path.join("""metrics""" , __lowerCAmelCase ) ).module_path )
UpperCamelCase_ : Optional[Any] = datasets.load.import_main_class(metric_module.__name__ , dataset=__lowerCAmelCase )
# check parameters
UpperCamelCase_ : Dict = inspect.signature(metric._compute ).parameters
self.assertTrue(all(p.kind != p.VAR_KEYWORD for p in parameters.values() ) ) # no **kwargs
# run doctest
with self.patch_intensive_calls(__lowerCAmelCase , metric_module.__name__ ):
with self.use_local_metrics():
try:
UpperCamelCase_ : int = doctest.testmod(__lowerCAmelCase , verbose=__lowerCAmelCase , raise_on_error=__lowerCAmelCase )
except doctest.UnexpectedException as e:
raise e.exc_info[1] # raise the exception that doctest caught
self.assertEqual(results.failed , 0 )
self.assertGreater(results.attempted , 1 )
@slow
def _UpperCAmelCase ( self , __lowerCAmelCase ):
UpperCamelCase_ : Tuple = """[...]"""
UpperCamelCase_ : Tuple = importlib.import_module(
datasets.load.metric_module_factory(os.path.join("""metrics""" , __lowerCAmelCase ) ).module_path )
# run doctest
with self.use_local_metrics():
UpperCamelCase_ : int = doctest.testmod(__lowerCAmelCase , verbose=__lowerCAmelCase , raise_on_error=__lowerCAmelCase )
self.assertEqual(results.failed , 0 )
self.assertGreater(results.attempted , 1 )
@contextmanager
def _UpperCAmelCase ( self , __lowerCAmelCase , __lowerCAmelCase ):
if metric_name in self.INTENSIVE_CALLS_PATCHER:
with self.INTENSIVE_CALLS_PATCHER[metric_name](__lowerCAmelCase ):
yield
else:
yield
@contextmanager
def _UpperCAmelCase ( self ):
def load_local_metric(__lowerCAmelCase , *__lowerCAmelCase , **__lowerCAmelCase ):
return load_metric(os.path.join("""metrics""" , __lowerCAmelCase ) , *__lowerCAmelCase , **__lowerCAmelCase )
with patch("""datasets.load_metric""" ) as mock_load_metric:
UpperCamelCase_ : Tuple = load_local_metric
yield
@classmethod
def _UpperCAmelCase ( cls , __lowerCAmelCase ):
def wrapper(__lowerCAmelCase ):
UpperCamelCase_ : Any = contextmanager(__lowerCAmelCase )
UpperCamelCase_ : Tuple = patcher
return patcher
return wrapper
@LocalMetricTest.register_intensive_calls_patcher("""bleurt""" )
def snake_case ( a_ : Optional[Any] ) -> Dict:
"""simple docstring"""
import tensorflow.compat.va as tf
from bleurt.score import Predictor
tf.flags.DEFINE_string("""sv""" , """""" , """""" ) # handle pytest cli flags
class A ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def _UpperCAmelCase ( self , __lowerCAmelCase ):
assert len(input_dict["""input_ids"""] ) == 2
return np.array([1.03, 1.04] )
# mock predict_fn which is supposed to do a forward pass with a bleurt model
with patch("""bleurt.score._create_predictor""" ) as mock_create_predictor:
UpperCamelCase_ : Tuple = MockedPredictor()
yield
@LocalMetricTest.register_intensive_calls_patcher("""bertscore""" )
def snake_case ( a_ : Optional[int] ) -> Optional[int]:
"""simple docstring"""
import torch
def bert_cos_score_idf(a_ : Optional[int] , a_ : Optional[Any] , *a_ : Optional[int] , **a_ : Optional[Any] ):
return torch.tensor([[1.0, 1.0, 1.0]] * len(a_ ) )
# mock get_model which is supposed to do download a bert model
# mock bert_cos_score_idf which is supposed to do a forward pass with a bert model
with patch("""bert_score.scorer.get_model""" ), patch(
"""bert_score.scorer.bert_cos_score_idf""" ) as mock_bert_cos_score_idf:
UpperCamelCase_ : Union[str, Any] = bert_cos_score_idf
yield
@LocalMetricTest.register_intensive_calls_patcher("""comet""" )
def snake_case ( a_ : Optional[int] ) -> int:
"""simple docstring"""
def load_from_checkpoint(a_ : Union[str, Any] ):
class A :
"""simple docstring"""
def _UpperCAmelCase ( self , __lowerCAmelCase , *__lowerCAmelCase , **__lowerCAmelCase ):
assert len(__lowerCAmelCase ) == 2
UpperCamelCase_ : Dict = [0.19, 0.92]
return scores, sum(__lowerCAmelCase ) / len(__lowerCAmelCase )
return Model()
# mock load_from_checkpoint which is supposed to do download a bert model
# mock load_from_checkpoint which is supposed to do download a bert model
with patch("""comet.download_model""" ) as mock_download_model:
UpperCamelCase_ : Optional[Any] = None
with patch("""comet.load_from_checkpoint""" ) as mock_load_from_checkpoint:
UpperCamelCase_ : List[Any] = load_from_checkpoint
yield
def snake_case ( ) -> List[Any]:
"""simple docstring"""
UpperCamelCase_ : Any = load_metric(os.path.join("""metrics""" , """seqeval""" ) )
UpperCamelCase_ : Any = """ERROR"""
UpperCamelCase_ : str = f"Scheme should be one of [IOB1, IOB2, IOE1, IOE2, IOBES, BILOU], got {wrong_scheme}"
with pytest.raises(a_ , match=re.escape(a_ ) ):
metric.compute(predictions=[] , references=[] , scheme=a_ )
| 208 |
'''simple docstring'''
def snake_case ( a_ : str , a_ : Optional[int] ) -> Any:
"""simple docstring"""
UpperCamelCase_ : Tuple = (boundary[1] - boundary[0]) / steps
UpperCamelCase_ : Dict = boundary[0]
UpperCamelCase_ : Any = boundary[1]
UpperCamelCase_ : Union[str, Any] = make_points(a_ , a_ , a_ )
UpperCamelCase_ : Any = 0.0
y += (h / 2.0) * f(a_ )
for i in x_i:
# print(i)
y += h * f(a_ )
y += (h / 2.0) * f(a_ )
return y
def snake_case ( a_ : Tuple , a_ : Any , a_ : Tuple ) -> Tuple:
"""simple docstring"""
UpperCamelCase_ : Optional[int] = a + h
while x < (b - h):
yield x
UpperCamelCase_ : List[str] = x + h
def snake_case ( a_ : List[str] ) -> Tuple: # enter your function here
"""simple docstring"""
UpperCamelCase_ : int = (x - 0) * (x - 0)
return y
def snake_case ( ) -> Dict:
"""simple docstring"""
UpperCamelCase_ : Optional[Any] = 0.0 # Lower bound of integration
UpperCamelCase_ : Optional[int] = 1.0 # Upper bound of integration
UpperCamelCase_ : Optional[Any] = 10.0 # define number of steps or resolution
UpperCamelCase_ : Optional[Any] = [a, b] # define boundary of integration
UpperCamelCase_ : Any = method_a(a_ , a_ )
print(f"y = {y}" )
if __name__ == "__main__":
main()
| 208 | 1 |
'''simple docstring'''
import argparse
import datetime
def __lowerCAmelCase ( _UpperCamelCase ) -> str:
'''simple docstring'''
lowerCamelCase__: Union[str, Any] = {
"""0""": """Sunday""",
"""1""": """Monday""",
"""2""": """Tuesday""",
"""3""": """Wednesday""",
"""4""": """Thursday""",
"""5""": """Friday""",
"""6""": """Saturday""",
}
lowerCamelCase__: List[str] = {0: 1, 1: 2, 2: 3, 3: 4, 4: 5, 5: 6, 6: 0}
# Validate
if not 0 < len(_UpperCamelCase ) < 11:
raise ValueError("""Must be 10 characters long""" )
# Get month
lowerCamelCase__: int = int(date_input[0] + date_input[1] )
# Validate
if not 0 < m < 13:
raise ValueError("""Month must be between 1 - 12""" )
lowerCamelCase__: str = date_input[2]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError("""Date separator must be '-' or '/'""" )
# Get day
lowerCamelCase__: int = int(date_input[3] + date_input[4] )
# Validate
if not 0 < d < 32:
raise ValueError("""Date must be between 1 - 31""" )
# Get second separator
lowerCamelCase__: str = date_input[5]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError("""Date separator must be '-' or '/'""" )
# Get year
lowerCamelCase__: int = int(date_input[6] + date_input[7] + date_input[8] + date_input[9] )
# Arbitrary year range
if not 45 < y < 8500:
raise ValueError(
"""Year out of range. There has to be some sort of limit...right?""" )
# Get datetime obj for validation
lowerCamelCase__: int = datetime.date(int(_UpperCamelCase ) , int(_UpperCamelCase ) , int(_UpperCamelCase ) )
# Start math
if m <= 2:
lowerCamelCase__: str = y - 1
lowerCamelCase__: List[str] = m + 12
# maths var
lowerCamelCase__: int = int(str(_UpperCamelCase )[:2] )
lowerCamelCase__: int = int(str(_UpperCamelCase )[2:] )
lowerCamelCase__: int = int(2.6 * m - 5.39 )
lowerCamelCase__: int = int(c / 4 )
lowerCamelCase__: int = int(k / 4 )
lowerCamelCase__: int = int(d + k )
lowerCamelCase__: int = int(t + u + v + x )
lowerCamelCase__: int = int(z - (2 * c) )
lowerCamelCase__: int = round(w % 7 )
# End math
# Validate math
if f != convert_datetime_days[dt_ck.weekday()]:
raise AssertionError("""The date was evaluated incorrectly. Contact developer.""" )
# Response
lowerCamelCase__: str = f"""Your date {date_input}, is a {days[str(_UpperCamelCase )]}!"""
return response
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowercase = argparse.ArgumentParser(
description=(
'Find out what day of the week nearly any date is or was. Enter '
'date as a string in the mm-dd-yyyy or mm/dd/yyyy format'
)
)
parser.add_argument(
'date_input', type=str, help='Date as a string (mm-dd-yyyy or mm/dd/yyyy)'
)
_lowercase = parser.parse_args()
zeller(args.date_input)
| 708 |
import math
import os
import re
import sys
import unittest
from pathlib import Path
from typing import Tuple
from unittest.mock import patch
from parameterized import parameterized
from transformers.testing_utils import (
CaptureStderr,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
get_torch_dist_unique_port,
require_apex,
require_bitsandbytes,
require_fairscale,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
require_torch_non_multi_gpu,
slow,
)
from transformers.trainer_callback import TrainerState
from transformers.trainer_utils import set_seed
_lowercase = os.path.abspath(os.path.dirname(__file__))
with ExtendSysPath(F"""{bindir}/../../examples/pytorch/translation"""):
from run_translation import main # noqa
set_seed(42)
_lowercase = 'sshleifer/student_marian_en_ro_6_1'
_lowercase = 'sshleifer/tiny-mbart'
@require_torch
class lowerCamelCase__ ( A__ ):
def lowerCamelCase_ ( self : List[Any] , __a : Union[str, Any]=False , __a : Tuple=None , __a : str=True , __a : Optional[int]=True , __a : Tuple=True , __a : Union[str, Any]=True , ):
'''simple docstring'''
lowerCamelCase__: Any = self.run_trainer(
eval_steps=1 , max_len=12 , model_name=__a , num_train_epochs=1 , distributed=__a , extra_args_str=__a , predict_with_generate=__a , do_train=__a , do_eval=__a , do_predict=__a , )
lowerCamelCase__: Optional[Any] = TrainerState.load_from_json(os.path.join(__a , """trainer_state.json""" ) ).log_history
if not do_eval:
return
lowerCamelCase__: Dict = [log for log in logs if """eval_loss""" in log.keys()]
lowerCamelCase__: Tuple = eval_metrics[0]
if predict_with_generate:
assert "eval_bleu" in first_step_stats
lowerCamelCase__: Tuple = eval_metrics[-1]
assert isinstance(last_step_stats["""eval_bleu"""] , __a )
assert not math.isnan(float(last_step_stats["""eval_loss"""] ) ), "eval_loss must not be `nan`"
@require_torch_non_multi_gpu
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
self.run_seqaseq_quick()
@require_torch_multi_gpu
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
self.run_seqaseq_quick(distributed=__a )
@require_torch_multi_gpu
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
self.run_seqaseq_quick(distributed=__a )
@unittest.skip("""Requires an update of the env running those tests""" )
@require_torch_multi_gpu
@require_fairscale
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
self.run_seqaseq_quick(distributed=__a , extra_args_str="""--sharded_ddp simple""" )
@unittest.skip("""Requires an update of the env running those tests""" )
@require_torch_multi_gpu
@require_fairscale
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
self.run_seqaseq_quick(distributed=__a , extra_args_str="""--sharded_ddp simple --fp16""" )
@unittest.skip("""Requires an update of the env running those tests""" )
@require_torch_multi_gpu
@require_fairscale
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
self.run_seqaseq_quick(distributed=__a , extra_args_str="""--sharded_ddp zero_dp_2""" , predict_with_generate=__a )
@unittest.skip("""Requires an update of the env running those tests""" )
@require_torch_multi_gpu
@require_fairscale
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
self.run_seqaseq_quick(
distributed=__a , extra_args_str="""--sharded_ddp zero_dp_2 --fp16""" , predict_with_generate=__a )
@require_apex
@require_torch_gpu
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
self.run_seqaseq_quick(distributed=__a , extra_args_str="""--fp16 --fp16_backend=apex""" )
# test 2nd time - was getting eval_loss': nan'
# to reproduce the problem set distributed=False
self.run_seqaseq_quick(distributed=__a , extra_args_str="""--fp16 --fp16_backend=apex""" )
@parameterized.expand(["""base""", """low""", """high""", """mixed"""] )
@require_torch_multi_gpu
def lowerCamelCase_ ( self : Union[str, Any] , __a : Any ):
'''simple docstring'''
lowerCamelCase__: int = {
# test with the default log_level - should be info and thus log info once
"""base""": {"""extra_args_str""": """""", """n_matches""": 1},
# test with low log_level and log_level_replica - should be noisy on all processes
# now the info string should appear twice on 2 processes
"""low""": {"""extra_args_str""": """--log_level debug --log_level_replica debug""", """n_matches""": 2},
# test with high log_level and low log_level_replica
# now the info string should appear once only on the replica
"""high""": {"""extra_args_str""": """--log_level error --log_level_replica debug""", """n_matches""": 1},
# test with high log_level and log_level_replica - should be quiet on all processes
"""mixed""": {"""extra_args_str""": """--log_level error --log_level_replica error""", """n_matches""": 0},
}
lowerCamelCase__: Any = experiments[experiment_id]
lowerCamelCase__: Union[str, Any] = {"""distributed""": True, """predict_with_generate""": False, """do_eval""": False, """do_predict""": False}
lowerCamelCase__: Tuple = """Running training"""
with CaptureStderr() as cl:
self.run_seqaseq_quick(**__a , extra_args_str=data["""extra_args_str"""] )
lowerCamelCase__: List[str] = len(re.findall(__a , cl.err ) )
self.assertEqual(__a , data["""n_matches"""] )
@slow
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
lowerCamelCase__: Optional[Any] = self.run_trainer(
eval_steps=2 , max_len=128 , model_name=__a , learning_rate=3e-4 , num_train_epochs=10 , distributed=__a , )
# Check metrics
lowerCamelCase__: Optional[int] = TrainerState.load_from_json(os.path.join(__a , """trainer_state.json""" ) ).log_history
lowerCamelCase__: List[Any] = [log for log in logs if """eval_loss""" in log.keys()]
lowerCamelCase__: List[str] = eval_metrics[0]
lowerCamelCase__: List[Any] = eval_metrics[-1]
assert first_step_stats["eval_loss"] > last_step_stats["eval_loss"], "model learned nothing"
assert isinstance(last_step_stats["""eval_bleu"""] , __a )
# test if do_predict saves generations and metrics
lowerCamelCase__: Optional[Any] = os.listdir(__a )
lowerCamelCase__: Union[str, Any] = {os.path.basename(__a ) for p in contents}
assert "generated_predictions.txt" in contents
assert "predict_results.json" in contents
@slow
@require_bitsandbytes
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
from transformers.training_args import OptimizerNames
def train_and_return_metrics(__a : str ) -> Tuple[int, float]:
lowerCamelCase__: int = """--skip_memory_metrics 0"""
lowerCamelCase__: Optional[Any] = self.run_trainer(
max_len=128 , model_name=__a , learning_rate=3e-4 , num_train_epochs=1 , optim=__a , distributed=__a , extra_args_str=__a , do_eval=__a , do_predict=__a , n_gpus_to_use=1 , )
# Check metrics
lowerCamelCase__: Union[str, Any] = TrainerState.load_from_json(Path(__a , """trainer_state.json""" ) ).log_history
lowerCamelCase__: Any = int(logs[0]["""train_mem_gpu_peaked_delta"""] / 2**20 )
lowerCamelCase__: int = int(logs[0]["""train_mem_gpu_alloc_delta"""] / 2**20 )
lowerCamelCase__: int = logs[0]["""train_loss"""]
return gpu_peak_mem_mb, gpu_alloc_mem_mb, loss
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__: int = train_and_return_metrics(OptimizerNames.ADAMW_TORCH.value )
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__: List[Any] = train_and_return_metrics(OptimizerNames.ADAMW_BNB.value )
lowerCamelCase__: Any = gpu_alloc_mem_orig - gpu_alloc_mem_bnb
lowerCamelCase__: Optional[Any] = gpu_peak_mem_orig + gpu_alloc_mem_orig
lowerCamelCase__: Any = gpu_peak_mem_bnb + gpu_alloc_mem_bnb
lowerCamelCase__: List[Any] = gpu_total_mem_orig - gpu_total_mem_bnb
# sshleifer/student_marian_en_ro_6_1 has 54M parameter, 29M of which is `nn.Embedding` which
# doesn't get quantized and remains in fp32. Therefore we only have 25M parameters quantized
# in 2 bytes and the diff in optim memory usage is derived as so:
#
# - normal 25*8=~200MB (8 bytes per param)
# - bnb 25*2= ~50MB (2 bytes per param)
#
# Thus we should expect ~150MB total memory saved.
#
# Peak memory should be the same - the total should be different by about that same margin
#
# After leaving a small margin to accommodate for differences between gpus let's check
# that we have at least 120MB in savings
lowerCamelCase__: List[str] = 120
# uncomment the following if this test starts failing - requires py38 for a new print feature
# gpu_peak_mem_diff = gpu_peak_mem_orig - gpu_peak_mem_bnb
# print(f"{gpu_alloc_mem_orig=}MB {gpu_peak_mem_orig=}MB {gpu_alloc_mem_orig+gpu_peak_mem_orig=}MB")
# print(f" {gpu_alloc_mem_bnb=}MB {gpu_peak_mem_bnb=}MB {gpu_alloc_mem_bnb+gpu_peak_mem_bnb=}MB")
# print(f"{gpu_alloc_mem_diff=}MB")
# print(f"{gpu_peak_mem_diff=}MB")
# print(f"{gpu_total_mem_orig=}MB, {gpu_total_mem_bnb=}MB")
# print(f"{gpu_total_mem_diff=}MB, {gpu_total_mem_diff=}MB")
self.assertGreater(
__a , __a , """should use ~150MB less alloc gpu memory with BNB, compared to without it for this model but got"""
f""" a difference of {gpu_alloc_mem_diff}MB, with gpu_alloc_mem_orig={gpu_alloc_mem_orig}MB and"""
f""" gpu_alloc_mem_bnb={gpu_alloc_mem_bnb}MB""" , )
self.assertGreater(
__a , __a , """should use ~150MB less total gpu memory with BNB, compared to without it for this model but got"""
f""" a difference of {gpu_total_mem_diff}MB, with gpu_total_mem_orig={gpu_total_mem_orig}MB and"""
f""" gpu_total_mem_bnb={gpu_total_mem_bnb}MB""" , )
self.assertEqual(
__a , __a , f"""loss should be the same, but got loss_orig={loss_orig}, loss_bnb={loss_bnb}""" )
def lowerCamelCase_ ( self : Dict , __a : int , __a : str , __a : int , __a : float = 3e-3 , __a : str = "adafactor" , __a : bool = False , __a : str = None , __a : int = 0 , __a : bool = True , __a : bool = True , __a : bool = True , __a : bool = True , __a : int = None , ):
'''simple docstring'''
lowerCamelCase__: Any = self.test_file_dir / """../fixtures/tests_samples/wmt_en_ro"""
lowerCamelCase__: Any = self.get_auto_remove_tmp_dir()
lowerCamelCase__: Dict = f"""
--model_name_or_path {model_name}
--train_file {data_dir}/train.json
--validation_file {data_dir}/val.json
--test_file {data_dir}/test.json
--output_dir {output_dir}
--overwrite_output_dir
--max_train_samples 8
--max_source_length {max_len}
--max_target_length {max_len}
--do_train
--num_train_epochs {str(__a )}
--per_device_train_batch_size 4
--learning_rate {learning_rate}
--warmup_steps 8
--logging_steps 0
--logging_strategy no
--save_steps {str(__a )}
--group_by_length
--label_smoothing_factor 0.1
--target_lang ro_RO
--source_lang en_XX
""".split()
lowerCamelCase__: Any = f"""
--do_eval
--per_device_eval_batch_size 4
--max_eval_samples 8
--val_max_target_length {max_len}
--evaluation_strategy steps
--eval_steps {str(__a )}
""".split()
lowerCamelCase__: List[str] = """
--do_predict
""".split()
lowerCamelCase__: Dict = []
if do_train:
args += args_train
if do_eval:
args += args_eval
if do_predict:
args += args_predict
if predict_with_generate:
args += "--predict_with_generate".split()
if do_train:
if optim == "adafactor":
args += "--adafactor".split()
else:
args += f"""--optim {optim}""".split()
if extra_args_str is not None:
args += extra_args_str.split()
if distributed:
if n_gpus_to_use is None:
lowerCamelCase__: str = get_gpu_count()
lowerCamelCase__: Dict = get_torch_dist_unique_port()
lowerCamelCase__: int = f"""
-m torch.distributed.run
--nproc_per_node={n_gpus_to_use}
--master_port={master_port}
{self.examples_dir_str}/pytorch/translation/run_translation.py
""".split()
lowerCamelCase__: str = [sys.executable] + distributed_args + args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(__a , env=self.get_env() )
else:
lowerCamelCase__: Optional[int] = ["""run_translation.py"""] + args
with patch.object(__a , """argv""" , __a ):
main()
return output_dir
| 242 | 0 |
"""simple docstring"""
import json
import multiprocessing
import os
import re
from collections import defaultdict
import torch
from accelerate import Accelerator
from accelerate.utils import set_seed
from arguments import HumanEvalArguments
from datasets import load_dataset, load_metric
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList
__A : str = ["\nclass", "\ndef", "\n#", "\n@", "\nprint", "\nif"]
class __lowerCAmelCase ( _UpperCamelCase):
'''simple docstring'''
def __init__( self : Optional[int] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : str , UpperCamelCase__ : Optional[int]=None , UpperCamelCase__ : Tuple=1 ):
A__ : int =tokenizer
A__ : int =dataset
A__ : Optional[int] =len(UpperCamelCase__ ) if n_tasks is None else n_tasks
A__ : Union[str, Any] =n_copies
def __iter__( self : int ):
A__ : str =[]
for task in range(self.n_tasks ):
# without strip, the model generate commented codes ...
prompts.append(self.tokenizer.eos_token + self.dataset[task]["prompt"].strip() )
A__ : Union[str, Any] =self.tokenizer(UpperCamelCase__ , padding=UpperCamelCase__ , return_tensors="pt" )
for task in range(self.n_tasks ):
for _ in range(self.n_copies ):
yield {
"ids": outputs.input_ids[task],
"task_id": task,
"input_len": outputs.attention_mask[task].sum(),
}
class __lowerCAmelCase ( _UpperCamelCase):
'''simple docstring'''
def __init__( self : int , UpperCamelCase__ : str , UpperCamelCase__ : List[Any] , UpperCamelCase__ : List[Any] ):
A__ : List[Any] =start_length
A__ : List[str] =eof_strings
A__ : str =tokenizer
def __call__( self : str , UpperCamelCase__ : List[str] , UpperCamelCase__ : str , **UpperCamelCase__ : Optional[int] ):
A__ : Any =self.tokenizer.batch_decode(input_ids[:, self.start_length :] )
A__ : Union[str, Any] =[]
for decoded_generation in decoded_generations:
done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) )
return all(UpperCamelCase__ )
def lowercase ( UpperCamelCase : Optional[Any] ):
"""simple docstring"""
A__ : Union[str, Any] =re.split("(%s)" % "|".join(UpperCamelCase ) , UpperCamelCase )
# last string should be ""
return "".join(string_list[:-2] )
def lowercase ( UpperCamelCase : Optional[Any] , UpperCamelCase : Optional[Any] , UpperCamelCase : int , UpperCamelCase : Any , UpperCamelCase : Any , UpperCamelCase : Any=20 , **UpperCamelCase : str ):
"""simple docstring"""
A__ : List[Any] =defaultdict(UpperCamelCase ) # dict of list of generated tokens
for step, batch in tqdm(enumerate(UpperCamelCase ) ):
with torch.no_grad():
A__ : List[str] =batch["ids"].shape[-1]
A__ : List[Any] =accelerator.unwrap_model(UpperCamelCase ).generate(
input_ids=batch["ids"][:, : batch["input_len"]] , num_return_sequences=UpperCamelCase , **UpperCamelCase )
# each task is generated batch_size times
A__ : Dict =batch["task_id"].repeat(UpperCamelCase )
A__ : Optional[Any] =accelerator.pad_across_processes(
UpperCamelCase , dim=1 , pad_index=tokenizer.pad_token_id )
A__ , A__ : Optional[int] =accelerator.gather((generated_tokens, generated_tasks) )
A__ : Union[str, Any] =generated_tokens.cpu().numpy()
A__ : List[str] =generated_tasks.cpu().numpy()
for task, generated_tokens in zip(UpperCamelCase , UpperCamelCase ):
gen_token_dict[task].append(UpperCamelCase )
A__ : List[str] =[[] for _ in range(UpperCamelCase )]
for task, generated_tokens in gen_token_dict.items():
for s in generated_tokens:
A__ : int =tokenizer.decode(UpperCamelCase , skip_special_tokens=UpperCamelCase , clean_up_tokenization_spaces=UpperCamelCase )
code_gens[task].append(remove_last_block(UpperCamelCase ) )
return code_gens
def lowercase ( ):
"""simple docstring"""
# Setup configuration
A__ : Union[str, Any] =HfArgumentParser(UpperCamelCase )
A__ : Optional[Any] =parser.parse_args()
transformers.logging.set_verbosity_error()
# enables code execution in code_eval metric
A__ : int =args.HF_ALLOW_CODE_EVAL
# make sure tokenizer plays nice with multiprocessing
A__ : Tuple ="false"
if args.num_workers is None:
A__ : Union[str, Any] =multiprocessing.cpu_count()
# Use dataset load to feed to accelerate
A__ : Tuple =Accelerator()
set_seed(args.seed , device_specific=UpperCamelCase )
# Load model and tokenizer
A__ : Any =AutoTokenizer.from_pretrained(args.model_ckpt )
A__ : Union[str, Any] =tokenizer.eos_token
A__ : int =AutoModelForCausalLM.from_pretrained(args.model_ckpt )
# Generation settings
A__ : List[str] ={
"do_sample": args.do_sample,
"temperature": args.temperature,
"max_new_tokens": args.max_new_tokens,
"top_p": args.top_p,
"top_k": args.top_k,
"stopping_criteria": StoppingCriteriaList([EndOfFunctionCriteria(0 , UpperCamelCase , UpperCamelCase )] ),
}
# Load evaluation dataset and metric
A__ : Any =load_dataset("openai_humaneval" )
A__ : List[str] =load_metric("code_eval" )
A__ : str =args.num_tasks if args.num_tasks is not None else len(human_eval["test"] )
A__ : str =args.n_samples // args.batch_size
A__ : Optional[Any] =TokenizedDataset(UpperCamelCase , human_eval["test"] , n_copies=UpperCamelCase , n_tasks=UpperCamelCase )
# do not confuse args.batch_size, which is actually the num_return_sequences
A__ : int =DataLoader(UpperCamelCase , batch_size=1 )
# Run a quick test to see if code evaluation is enabled
try:
A__ : str =code_eval_metric.compute(references=[""] , predictions=[[""]] )
except ValueError as exception:
print(
"Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL=\"1\"`"
" flag to enable code evaluation." )
raise exception
A__ , A__ : Union[str, Any] =accelerator.prepare(UpperCamelCase , UpperCamelCase )
A__ : Optional[int] =complete_code(
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , n_tasks=UpperCamelCase , batch_size=args.batch_size , **UpperCamelCase , )
if accelerator.is_main_process:
A__ : Optional[int] =[]
for task in tqdm(range(UpperCamelCase ) ):
A__ : Any =human_eval["test"][task]["test"]
A__ : Optional[Any] =F'''check({human_eval["test"][task]["entry_point"]})'''
references.append("\n" + test_func + "\n" + entry_point )
# Evaluate completions with "code_eval" metric
A__ , A__ : int =code_eval_metric.compute(
references=UpperCamelCase , predictions=UpperCamelCase , num_workers=args.num_workers )
print(F'''Results: {pass_at_k}''' )
# Save results to json file
with open(args.output_file , "w" ) as fp:
json.dump(UpperCamelCase , UpperCamelCase )
# For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing
# https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script
if __name__ == "__main__":
main()
| 656 | """simple docstring"""
import collections.abc
from typing import Optional, Tuple, Union
import torch
import torch.utils.checkpoint
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_poolformer import PoolFormerConfig
__A : Optional[Any] = logging.get_logger(__name__)
# General docstring
__A : str = "PoolFormerConfig"
# Base docstring
__A : Optional[Any] = "sail/poolformer_s12"
__A : List[Any] = [1, 512, 7, 7]
# Image classification docstring
__A : List[str] = "sail/poolformer_s12"
__A : Tuple = "tabby, tabby cat"
__A : Tuple = [
"sail/poolformer_s12",
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
]
def lowercase ( UpperCamelCase : Any , UpperCamelCase : float = 0.0 , UpperCamelCase : bool = False ):
"""simple docstring"""
if drop_prob == 0.0 or not training:
return input
A__ : Tuple =1 - drop_prob
A__ : List[str] =(input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
A__ : Any =keep_prob + torch.rand(UpperCamelCase , dtype=input.dtype , device=input.device )
random_tensor.floor_() # binarize
A__ : Optional[int] =input.div(UpperCamelCase ) * random_tensor
return output
class __lowerCAmelCase ( nn.Module):
'''simple docstring'''
def __init__( self : Optional[int] , UpperCamelCase__ : Optional[float] = None ):
super().__init__()
A__ : Optional[int] =drop_prob
def _UpperCAmelCase ( self : List[str] , UpperCamelCase__ : torch.Tensor ):
return drop_path(UpperCamelCase__ , self.drop_prob , self.training )
def _UpperCAmelCase ( self : List[str] ):
return "p={}".format(self.drop_prob )
class __lowerCAmelCase ( nn.Module):
'''simple docstring'''
def __init__( self : Dict , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Any , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : int=None ):
super().__init__()
A__ : Optional[int] =patch_size if isinstance(UpperCamelCase__ , collections.abc.Iterable ) else (patch_size, patch_size)
A__ : Optional[int] =stride if isinstance(UpperCamelCase__ , collections.abc.Iterable ) else (stride, stride)
A__ : int =padding if isinstance(UpperCamelCase__ , collections.abc.Iterable ) else (padding, padding)
A__ : Any =nn.Convad(UpperCamelCase__ , UpperCamelCase__ , kernel_size=UpperCamelCase__ , stride=UpperCamelCase__ , padding=UpperCamelCase__ )
A__ : Any =norm_layer(UpperCamelCase__ ) if norm_layer else nn.Identity()
def _UpperCAmelCase ( self : Tuple , UpperCamelCase__ : str ):
A__ : List[str] =self.projection(UpperCamelCase__ )
A__ : Any =self.norm(UpperCamelCase__ )
return embeddings
class __lowerCAmelCase ( nn.GroupNorm):
'''simple docstring'''
def __init__( self : Tuple , UpperCamelCase__ : Dict , **UpperCamelCase__ : Union[str, Any] ):
super().__init__(1 , UpperCamelCase__ , **UpperCamelCase__ )
class __lowerCAmelCase ( nn.Module):
'''simple docstring'''
def __init__( self : Tuple , UpperCamelCase__ : Optional[int] ):
super().__init__()
A__ : Any =nn.AvgPoolad(UpperCamelCase__ , stride=1 , padding=pool_size // 2 , count_include_pad=UpperCamelCase__ )
def _UpperCAmelCase ( self : List[str] , UpperCamelCase__ : List[str] ):
return self.pool(UpperCamelCase__ ) - hidden_states
class __lowerCAmelCase ( nn.Module):
'''simple docstring'''
def __init__( self : Optional[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : Dict , UpperCamelCase__ : Any , UpperCamelCase__ : Union[str, Any] ):
super().__init__()
A__ : List[Any] =nn.Convad(UpperCamelCase__ , UpperCamelCase__ , 1 )
A__ : Union[str, Any] =nn.Convad(UpperCamelCase__ , UpperCamelCase__ , 1 )
A__ : Dict =PoolFormerDropPath(UpperCamelCase__ )
if isinstance(config.hidden_act , UpperCamelCase__ ):
A__ : Tuple =ACTaFN[config.hidden_act]
else:
A__ : Optional[Any] =config.hidden_act
def _UpperCAmelCase ( self : Tuple , UpperCamelCase__ : Dict ):
A__ : Optional[Any] =self.conva(UpperCamelCase__ )
A__ : List[str] =self.act_fn(UpperCamelCase__ )
A__ : List[str] =self.drop(UpperCamelCase__ )
A__ : Optional[int] =self.conva(UpperCamelCase__ )
A__ : Optional[Any] =self.drop(UpperCamelCase__ )
return hidden_states
class __lowerCAmelCase ( nn.Module):
'''simple docstring'''
def __init__( self : List[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : str , UpperCamelCase__ : Any ):
super().__init__()
A__ : Optional[int] =PoolFormerPooling(UpperCamelCase__ )
A__ : List[str] =PoolFormerOutput(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
A__ : int =PoolFormerGroupNorm(UpperCamelCase__ )
A__ : int =PoolFormerGroupNorm(UpperCamelCase__ )
# Useful for training neural nets
A__ : Tuple =PoolFormerDropPath(UpperCamelCase__ ) if drop_path > 0.0 else nn.Identity()
A__ : Optional[Any] =config.use_layer_scale
if config.use_layer_scale:
A__ : List[str] =nn.Parameter(
config.layer_scale_init_value * torch.ones((UpperCamelCase__) ) , requires_grad=UpperCamelCase__ )
A__ : List[Any] =nn.Parameter(
config.layer_scale_init_value * torch.ones((UpperCamelCase__) ) , requires_grad=UpperCamelCase__ )
def _UpperCAmelCase ( self : Any , UpperCamelCase__ : Optional[int] ):
if self.use_layer_scale:
A__ : Optional[int] =self.pooling(self.before_norm(UpperCamelCase__ ) )
A__ : Union[str, Any] =self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * pooling_output
# First residual connection
A__ : Union[str, Any] =hidden_states + self.drop_path(UpperCamelCase__ )
A__ : Tuple =()
A__ : List[str] =self.output(self.after_norm(UpperCamelCase__ ) )
A__ : Optional[Any] =self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * layer_output
# Second residual connection
A__ : str =hidden_states + self.drop_path(UpperCamelCase__ )
A__ : List[Any] =(output,) + outputs
return outputs
else:
A__ : Tuple =self.drop_path(self.pooling(self.before_norm(UpperCamelCase__ ) ) )
# First residual connection
A__ : Optional[Any] =pooling_output + hidden_states
A__ : Tuple =()
# Second residual connection inside the PoolFormerOutput block
A__ : List[str] =self.drop_path(self.output(self.after_norm(UpperCamelCase__ ) ) )
A__ : Any =hidden_states + layer_output
A__ : Tuple =(output,) + outputs
return outputs
class __lowerCAmelCase ( nn.Module):
'''simple docstring'''
def __init__( self : Dict , UpperCamelCase__ : List[str] ):
super().__init__()
A__ : Tuple =config
# stochastic depth decay rule
A__ : Dict =[x.item() for x in torch.linspace(0 , config.drop_path_rate , sum(config.depths ) )]
# patch embeddings
A__ : Tuple =[]
for i in range(config.num_encoder_blocks ):
embeddings.append(
PoolFormerEmbeddings(
patch_size=config.patch_sizes[i] , stride=config.strides[i] , padding=config.padding[i] , num_channels=config.num_channels if i == 0 else config.hidden_sizes[i - 1] , hidden_size=config.hidden_sizes[i] , ) )
A__ : List[str] =nn.ModuleList(UpperCamelCase__ )
# Transformer blocks
A__ : Union[str, Any] =[]
A__ : Any =0
for i in range(config.num_encoder_blocks ):
# each block consists of layers
A__ : Union[str, Any] =[]
if i != 0:
cur += config.depths[i - 1]
for j in range(config.depths[i] ):
layers.append(
PoolFormerLayer(
UpperCamelCase__ , num_channels=config.hidden_sizes[i] , pool_size=config.pool_size , hidden_size=config.hidden_sizes[i] , intermediate_size=int(config.hidden_sizes[i] * config.mlp_ratio ) , drop_path=dpr[cur + j] , ) )
blocks.append(nn.ModuleList(UpperCamelCase__ ) )
A__ : str =nn.ModuleList(UpperCamelCase__ )
def _UpperCAmelCase ( self : Dict , UpperCamelCase__ : Dict , UpperCamelCase__ : Tuple=False , UpperCamelCase__ : Optional[int]=True ):
A__ : Union[str, Any] =() if output_hidden_states else None
A__ : Dict =pixel_values
for idx, layers in enumerate(zip(self.patch_embeddings , self.block ) ):
A__ , A__ : List[Any] =layers
# Get patch embeddings from hidden_states
A__ : Any =embedding_layer(UpperCamelCase__ )
# Send the embeddings through the blocks
for _, blk in enumerate(UpperCamelCase__ ):
A__ : List[str] =blk(UpperCamelCase__ )
A__ : Tuple =layer_outputs[0]
if output_hidden_states:
A__ : List[Any] =all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, all_hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=UpperCamelCase__ , hidden_states=UpperCamelCase__ )
class __lowerCAmelCase ( _UpperCamelCase):
'''simple docstring'''
__magic_name__ : List[str] = PoolFormerConfig
__magic_name__ : int = """poolformer"""
__magic_name__ : Any = """pixel_values"""
__magic_name__ : Any = True
def _UpperCAmelCase ( self : List[str] , UpperCamelCase__ : str ):
if isinstance(UpperCamelCase__ , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(UpperCamelCase__ , nn.LayerNorm ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
def _UpperCAmelCase ( self : Tuple , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[Any]=False ):
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
A__ : Optional[Any] =value
__A : Optional[int] = R"\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`PoolFormerConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n"
__A : Dict = R"\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`PoolFormerImageProcessor.__call__`] for details.\n"
@add_start_docstrings(
"""The bare PoolFormer Model transformer outputting raw hidden-states without any specific head on top.""" , _UpperCamelCase , )
class __lowerCAmelCase ( _UpperCamelCase):
'''simple docstring'''
def __init__( self : List[str] , UpperCamelCase__ : Dict ):
super().__init__(UpperCamelCase__ )
A__ : List[Any] =config
A__ : Optional[Any] =PoolFormerEncoder(UpperCamelCase__ )
# Initialize weights and apply final processing
self.post_init()
def _UpperCAmelCase ( self : Tuple ):
return self.embeddings.patch_embeddings
@add_start_docstrings_to_model_forward(UpperCamelCase__ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=UpperCamelCase__ , config_class=_CONFIG_FOR_DOC , modality="vision" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def _UpperCAmelCase ( self : str , UpperCamelCase__ : Optional[torch.FloatTensor] = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[bool] = None , ):
A__ : int =(
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
A__ : Optional[int] =return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError("You have to specify pixel_values" )
A__ : List[Any] =self.encoder(
UpperCamelCase__ , output_hidden_states=UpperCamelCase__ , return_dict=UpperCamelCase__ , )
A__ : int =encoder_outputs[0]
if not return_dict:
return (sequence_output, None) + encoder_outputs[1:]
return BaseModelOutputWithNoAttention(
last_hidden_state=UpperCamelCase__ , hidden_states=encoder_outputs.hidden_states , )
class __lowerCAmelCase ( nn.Module):
'''simple docstring'''
def __init__( self : Dict , UpperCamelCase__ : Optional[Any] ):
super().__init__()
A__ : List[str] =nn.Linear(config.hidden_size , config.hidden_size )
def _UpperCAmelCase ( self : Optional[Any] , UpperCamelCase__ : List[Any] ):
A__ : int =self.dense(UpperCamelCase__ )
return output
@add_start_docstrings(
"""
PoolFormer Model transformer with an image classification head on top
""" , _UpperCamelCase , )
class __lowerCAmelCase ( _UpperCamelCase):
'''simple docstring'''
def __init__( self : Optional[Any] , UpperCamelCase__ : str ):
super().__init__(UpperCamelCase__ )
A__ : List[str] =config.num_labels
A__ : Optional[int] =PoolFormerModel(UpperCamelCase__ )
# Final norm
A__ : Dict =PoolFormerGroupNorm(config.hidden_sizes[-1] )
# Classifier head
A__ : Dict =(
nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity()
)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UpperCamelCase__ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=UpperCamelCase__ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def _UpperCAmelCase ( self : Optional[int] , UpperCamelCase__ : Optional[torch.FloatTensor] = None , UpperCamelCase__ : Optional[torch.LongTensor] = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[bool] = None , ):
A__ : Tuple =return_dict if return_dict is not None else self.config.use_return_dict
A__ : List[str] =self.poolformer(
UpperCamelCase__ , output_hidden_states=UpperCamelCase__ , return_dict=UpperCamelCase__ , )
A__ : str =outputs[0]
A__ : List[Any] =self.classifier(self.norm(UpperCamelCase__ ).mean([-2, -1] ) )
A__ : Optional[Any] =None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
A__ : int ="regression"
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
A__ : Tuple ="single_label_classification"
else:
A__ : Optional[int] ="multi_label_classification"
if self.config.problem_type == "regression":
A__ : Dict =MSELoss()
if self.num_labels == 1:
A__ : Optional[Any] =loss_fct(logits.squeeze() , labels.squeeze() )
else:
A__ : List[str] =loss_fct(UpperCamelCase__ , UpperCamelCase__ )
elif self.config.problem_type == "single_label_classification":
A__ : Tuple =CrossEntropyLoss()
A__ : int =loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
A__ : List[Any] =BCEWithLogitsLoss()
A__ : str =loss_fct(UpperCamelCase__ , UpperCamelCase__ )
if not return_dict:
A__ : Optional[int] =(logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=UpperCamelCase__ , logits=UpperCamelCase__ , hidden_states=outputs.hidden_states )
| 656 | 1 |
import os
import shutil
from pathlib import Path
from typing import Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ..utils import ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME, is_onnx_available, logging
if is_onnx_available():
import onnxruntime as ort
__A : str = logging.get_logger(__name__)
__A : str = {
"""tensor(bool)""": np.bool_,
"""tensor(int8)""": np.inta,
"""tensor(uint8)""": np.uinta,
"""tensor(int16)""": np.intaa,
"""tensor(uint16)""": np.uintaa,
"""tensor(int32)""": np.intaa,
"""tensor(uint32)""": np.uintaa,
"""tensor(int64)""": np.intaa,
"""tensor(uint64)""": np.uintaa,
"""tensor(float16)""": np.floataa,
"""tensor(float)""": np.floataa,
"""tensor(double)""": np.floataa,
}
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self : List[str] , a : Tuple=None , **a : Any ) -> List[str]:
logger.info("""`diffusers.OnnxRuntimeModel` is experimental and might change in the future.""" )
SCREAMING_SNAKE_CASE = model
SCREAMING_SNAKE_CASE = kwargs.get("""model_save_dir""" , a )
SCREAMING_SNAKE_CASE = kwargs.get("""latest_model_name""" , a )
def __call__( self : List[str] , **a : Any ) -> List[Any]:
SCREAMING_SNAKE_CASE = {k: np.array(a ) for k, v in kwargs.items()}
return self.model.run(a , a )
@staticmethod
def _UpperCAmelCase ( a : Union[str, Path] , a : Any=None , a : Optional[int]=None ) -> Optional[Any]:
if provider is None:
logger.info("""No onnxruntime provider specified, using CPUExecutionProvider""" )
SCREAMING_SNAKE_CASE = """CPUExecutionProvider"""
return ort.InferenceSession(a , providers=[provider] , sess_options=a )
def _UpperCAmelCase ( self : str , a : Union[str, Path] , a : Optional[str] = None , **a : str ) -> Tuple:
SCREAMING_SNAKE_CASE = file_name if file_name is not None else ONNX_WEIGHTS_NAME
SCREAMING_SNAKE_CASE = self.model_save_dir.joinpath(self.latest_model_name )
SCREAMING_SNAKE_CASE = Path(a ).joinpath(a )
try:
shutil.copyfile(a , a )
except shutil.SameFileError:
pass
# copy external weights (for models >2GB)
SCREAMING_SNAKE_CASE = self.model_save_dir.joinpath(a )
if src_path.exists():
SCREAMING_SNAKE_CASE = Path(a ).joinpath(a )
try:
shutil.copyfile(a , a )
except shutil.SameFileError:
pass
def _UpperCAmelCase ( self : Dict , a : Union[str, os.PathLike] , **a : Tuple , ) -> str:
if os.path.isfile(a ):
logger.error(f"""Provided path ({save_directory}) should be a directory, not a file""" )
return
os.makedirs(a , exist_ok=a )
# saving model weights/files
self._save_pretrained(a , **a )
@classmethod
def _UpperCAmelCase ( cls : List[Any] , a : Union[str, Path] , a : Optional[Union[bool, str, None]] = None , a : Optional[Union[str, None]] = None , a : bool = False , a : Optional[str] = None , a : Optional[str] = None , a : Optional[str] = None , a : Optional["ort.SessionOptions"] = None , **a : Union[str, Any] , ) -> List[str]:
SCREAMING_SNAKE_CASE = file_name if file_name is not None else ONNX_WEIGHTS_NAME
# load model from local directory
if os.path.isdir(a ):
SCREAMING_SNAKE_CASE = OnnxRuntimeModel.load_model(
os.path.join(a , a ) , provider=a , sess_options=a )
SCREAMING_SNAKE_CASE = Path(a )
# load model from hub
else:
# download model
SCREAMING_SNAKE_CASE = hf_hub_download(
repo_id=a , filename=a , use_auth_token=a , revision=a , cache_dir=a , force_download=a , )
SCREAMING_SNAKE_CASE = Path(a ).parent
SCREAMING_SNAKE_CASE = Path(a ).name
SCREAMING_SNAKE_CASE = OnnxRuntimeModel.load_model(a , provider=a , sess_options=a )
return cls(model=a , **a )
@classmethod
def _UpperCAmelCase ( cls : List[Any] , a : Union[str, Path] , a : bool = True , a : Optional[str] = None , a : Optional[str] = None , **a : Union[str, Any] , ) -> Any:
SCREAMING_SNAKE_CASE = None
if len(str(a ).split("""@""" ) ) == 2:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = model_id.split("""@""" )
return cls._from_pretrained(
model_id=a , revision=a , cache_dir=a , force_download=a , use_auth_token=a , **a , )
| 450 |
import math
from typing import Optional
import numpy as np
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A : List[Any] = logging.get_logger(__name__)
__A : Tuple = {
"""facebook/encodec_24khz""": """https://huggingface.co/facebook/encodec_24khz/resolve/main/config.json""",
"""facebook/encodec_48khz""": """https://huggingface.co/facebook/encodec_48khz/resolve/main/config.json""",
}
class UpperCAmelCase_ ( A ):
'''simple docstring'''
a__ = '''encodec'''
def __init__( self : Dict , a : Union[str, Any]=[1.5, 3.0, 6.0, 12.0, 24.0] , a : Union[str, Any]=24_000 , a : List[Any]=1 , a : str=False , a : List[Any]=None , a : List[Any]=None , a : Optional[int]=128 , a : int=32 , a : int=1 , a : Dict=[8, 5, 4, 2] , a : List[str]="weight_norm" , a : str=7 , a : int=7 , a : Optional[int]=3 , a : Optional[int]=2 , a : Optional[int]=True , a : Union[str, Any]="reflect" , a : Dict=2 , a : Union[str, Any]=2 , a : Optional[Any]=1.0 , a : List[Any]=1_024 , a : int=None , a : Dict=True , **a : Tuple , ) -> List[Any]:
SCREAMING_SNAKE_CASE = target_bandwidths
SCREAMING_SNAKE_CASE = sampling_rate
SCREAMING_SNAKE_CASE = audio_channels
SCREAMING_SNAKE_CASE = normalize
SCREAMING_SNAKE_CASE = chunk_length_s
SCREAMING_SNAKE_CASE = overlap
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = num_filters
SCREAMING_SNAKE_CASE = num_residual_layers
SCREAMING_SNAKE_CASE = upsampling_ratios
SCREAMING_SNAKE_CASE = norm_type
SCREAMING_SNAKE_CASE = kernel_size
SCREAMING_SNAKE_CASE = last_kernel_size
SCREAMING_SNAKE_CASE = residual_kernel_size
SCREAMING_SNAKE_CASE = dilation_growth_rate
SCREAMING_SNAKE_CASE = use_causal_conv
SCREAMING_SNAKE_CASE = pad_mode
SCREAMING_SNAKE_CASE = compress
SCREAMING_SNAKE_CASE = num_lstm_layers
SCREAMING_SNAKE_CASE = trim_right_ratio
SCREAMING_SNAKE_CASE = codebook_size
SCREAMING_SNAKE_CASE = codebook_dim if codebook_dim is not None else hidden_size
SCREAMING_SNAKE_CASE = use_conv_shortcut
if self.norm_type not in ["weight_norm", "time_group_norm"]:
raise ValueError(
f"""self.norm_type must be one of `\"weight_norm\"`, `\"time_group_norm\"`), got {self.norm_type}""" )
super().__init__(**a )
@property
def _UpperCAmelCase ( self : Optional[Any] ) -> Optional[int]:
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def _UpperCAmelCase ( self : Optional[Any] ) -> Optional[int]:
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length ) )
@property
def _UpperCAmelCase ( self : List[Any] ) -> int:
SCREAMING_SNAKE_CASE = np.prod(self.upsampling_ratios )
return math.ceil(self.sampling_rate / hop_length )
@property
def _UpperCAmelCase ( self : Any ) -> int:
return int(1_000 * self.target_bandwidths[-1] // (self.frame_rate * 10) )
| 450 | 1 |
"""simple docstring"""
from .imports import is_rich_available
if is_rich_available():
from rich.traceback import install
install(show_locals=False)
else:
raise ModuleNotFoundError("""To use the rich extension, install rich with `pip install rich`""")
| 77 |
'''simple docstring'''
import json
import os
import shutil
import warnings
from argparse import ArgumentParser, Namespace
from pathlib import Path
from typing import List
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from cookiecutter.main import cookiecutter
lowerCAmelCase = True
except ImportError:
lowerCAmelCase = False
lowerCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
def __A ( a_ : Namespace ):
return AddNewModelCommand(args.testing ,args.testing_file ,path=args.path )
class lowerCamelCase ( _A ):
@staticmethod
def _lowerCamelCase ( a_ ):
lowerCAmelCase : Optional[int] = parser.add_parser("add-new-model" )
add_new_model_parser.add_argument("--testing" , action="store_true" , help="If in testing mode." )
add_new_model_parser.add_argument("--testing_file" , type=a_ , help="Configuration file on which to run." )
add_new_model_parser.add_argument(
"--path" , type=a_ , help="Path to cookiecutter. Should only be used for testing purposes." )
add_new_model_parser.set_defaults(func=a_ )
def __init__( self , a_ , a_ , a_=None , *a_ ):
lowerCAmelCase : Any = testing
lowerCAmelCase : str = testing_file
lowerCAmelCase : Optional[int] = path
def _lowerCamelCase ( self ):
warnings.warn(
"The command `transformers-cli add-new-model` is deprecated and will be removed in v5 of Transformers. "
"It is not actively maintained anymore, so might give a result that won't pass all tests and quality "
"checks, you should use `transformers-cli add-new-model-like` instead." )
if not _has_cookiecutter:
raise ImportError(
"Model creation dependencies are required to use the `add_new_model` command. Install them by running "
"the following at the root of your `transformers` clone:\n\n\t$ pip install -e .[modelcreation]\n" )
# Ensure that there is no other `cookiecutter-template-xxx` directory in the current working directory
lowerCAmelCase : Optional[Any] = [directory for directory in os.listdir() if "cookiecutter-template-" == directory[:22]]
if len(a_ ) > 0:
raise ValueError(
"Several directories starting with `cookiecutter-template-` in current working directory. "
"Please clean your directory by removing all folders starting with `cookiecutter-template-` or "
"change your working directory." )
lowerCAmelCase : List[str] = (
Path(a_ ).parent.parent.parent.parent if self._path is None else Path(self._path ).parent.parent
)
lowerCAmelCase : str = path_to_transformer_root / "templates" / "adding_a_new_model"
# Execute cookiecutter
if not self._testing:
cookiecutter(str(a_ ) )
else:
with open(self._testing_file , "r" ) as configuration_file:
lowerCAmelCase : Dict = json.load(a_ )
cookiecutter(
str(path_to_cookiecutter if self._path is None else self._path ) , no_input=a_ , extra_context=a_ , )
lowerCAmelCase : Optional[Any] = [directory for directory in os.listdir() if "cookiecutter-template-" in directory[:22]][0]
# Retrieve configuration
with open(directory + "/configuration.json" , "r" ) as configuration_file:
lowerCAmelCase : str = json.load(a_ )
lowerCAmelCase : List[str] = configuration["lowercase_modelname"]
lowerCAmelCase : List[Any] = configuration["generate_tensorflow_pytorch_and_flax"]
os.remove(F'''{directory}/configuration.json''' )
lowerCAmelCase : Optional[int] = "PyTorch" in generate_tensorflow_pytorch_and_flax
lowerCAmelCase : Optional[int] = "TensorFlow" in generate_tensorflow_pytorch_and_flax
lowerCAmelCase : Optional[Any] = "Flax" in generate_tensorflow_pytorch_and_flax
lowerCAmelCase : List[str] = F'''{path_to_transformer_root}/src/transformers/models/{lowercase_model_name}'''
os.makedirs(a_ , exist_ok=a_ )
os.makedirs(F'''{path_to_transformer_root}/tests/models/{lowercase_model_name}''' , exist_ok=a_ )
# Tests require submodules as they have parent imports
with open(F'''{path_to_transformer_root}/tests/models/{lowercase_model_name}/__init__.py''' , "w" ):
pass
shutil.move(
F'''{directory}/__init__.py''' , F'''{model_dir}/__init__.py''' , )
shutil.move(
F'''{directory}/configuration_{lowercase_model_name}.py''' , F'''{model_dir}/configuration_{lowercase_model_name}.py''' , )
def remove_copy_lines(a_ ):
with open(a_ , "r" ) as f:
lowerCAmelCase : Dict = f.readlines()
with open(a_ , "w" ) as f:
for line in lines:
if "# Copied from transformers." not in line:
f.write(a_ )
if output_pytorch:
if not self._testing:
remove_copy_lines(F'''{directory}/modeling_{lowercase_model_name}.py''' )
shutil.move(
F'''{directory}/modeling_{lowercase_model_name}.py''' , F'''{model_dir}/modeling_{lowercase_model_name}.py''' , )
shutil.move(
F'''{directory}/test_modeling_{lowercase_model_name}.py''' , F'''{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_{lowercase_model_name}.py''' , )
else:
os.remove(F'''{directory}/modeling_{lowercase_model_name}.py''' )
os.remove(F'''{directory}/test_modeling_{lowercase_model_name}.py''' )
if output_tensorflow:
if not self._testing:
remove_copy_lines(F'''{directory}/modeling_tf_{lowercase_model_name}.py''' )
shutil.move(
F'''{directory}/modeling_tf_{lowercase_model_name}.py''' , F'''{model_dir}/modeling_tf_{lowercase_model_name}.py''' , )
shutil.move(
F'''{directory}/test_modeling_tf_{lowercase_model_name}.py''' , F'''{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_tf_{lowercase_model_name}.py''' , )
else:
os.remove(F'''{directory}/modeling_tf_{lowercase_model_name}.py''' )
os.remove(F'''{directory}/test_modeling_tf_{lowercase_model_name}.py''' )
if output_flax:
if not self._testing:
remove_copy_lines(F'''{directory}/modeling_flax_{lowercase_model_name}.py''' )
shutil.move(
F'''{directory}/modeling_flax_{lowercase_model_name}.py''' , F'''{model_dir}/modeling_flax_{lowercase_model_name}.py''' , )
shutil.move(
F'''{directory}/test_modeling_flax_{lowercase_model_name}.py''' , F'''{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_flax_{lowercase_model_name}.py''' , )
else:
os.remove(F'''{directory}/modeling_flax_{lowercase_model_name}.py''' )
os.remove(F'''{directory}/test_modeling_flax_{lowercase_model_name}.py''' )
shutil.move(
F'''{directory}/{lowercase_model_name}.md''' , F'''{path_to_transformer_root}/docs/source/en/model_doc/{lowercase_model_name}.md''' , )
shutil.move(
F'''{directory}/tokenization_{lowercase_model_name}.py''' , F'''{model_dir}/tokenization_{lowercase_model_name}.py''' , )
shutil.move(
F'''{directory}/tokenization_fast_{lowercase_model_name}.py''' , F'''{model_dir}/tokenization_{lowercase_model_name}_fast.py''' , )
from os import fdopen, remove
from shutil import copymode, move
from tempfile import mkstemp
def replace(a_ , a_ , a_ ):
# Create temp file
lowerCAmelCase , lowerCAmelCase : Tuple = mkstemp()
lowerCAmelCase : int = False
with fdopen(a_ , "w" ) as new_file:
with open(a_ ) as old_file:
for line in old_file:
new_file.write(a_ )
if line_to_copy_below in line:
lowerCAmelCase : Optional[int] = True
for line_to_copy in lines_to_copy:
new_file.write(a_ )
if not line_found:
raise ValueError(F'''Line {line_to_copy_below} was not found in file.''' )
# Copy the file permissions from the old file to the new file
copymode(a_ , a_ )
# Remove original file
remove(a_ )
# Move new file
move(a_ , a_ )
def skip_units(a_ ):
return (
("generating PyTorch" in line and not output_pytorch)
or ("generating TensorFlow" in line and not output_tensorflow)
or ("generating Flax" in line and not output_flax)
)
def replace_in_files(a_ ):
with open(a_ ) as datafile:
lowerCAmelCase : Dict = []
lowerCAmelCase : Optional[int] = False
lowerCAmelCase : List[Any] = False
for line in datafile:
if "# To replace in: " in line and "##" not in line:
lowerCAmelCase : Optional[int] = line.split("\"" )[1]
lowerCAmelCase : Tuple = skip_units(a_ )
elif "# Below: " in line and "##" not in line:
lowerCAmelCase : Any = line.split("\"" )[1]
lowerCAmelCase : List[str] = skip_units(a_ )
elif "# End." in line and "##" not in line:
if not skip_file and not skip_snippet:
replace(a_ , a_ , a_ )
lowerCAmelCase : List[str] = []
elif "# Replace with" in line and "##" not in line:
lowerCAmelCase : Any = []
elif "##" not in line:
lines_to_copy.append(a_ )
remove(a_ )
replace_in_files(F'''{directory}/to_replace_{lowercase_model_name}.py''' )
os.rmdir(a_ )
| 525 | 0 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor
class UpperCamelCase ( unittest.TestCase ):
def __init__(self : Optional[int] , _A : Tuple , _A : int=7 , _A : Dict=3 , _A : List[str]=18 , _A : List[Any]=30 , _A : str=4_00 , _A : Any=True , _A : Optional[Any]=None , _A : Optional[int]=True , _A : List[Any]=None , _A : List[str]=True , _A : Optional[int]=[0.48_145_466, 0.4_578_275, 0.40_821_073] , _A : List[Any]=[0.26_862_954, 0.26_130_258, 0.27_577_711] , _A : List[str]=True , ) -> List[str]:
__snake_case : str = size if size is not None else {'height': 2_24, 'width': 2_24}
__snake_case : Optional[Any] = crop_size if crop_size is not None else {'height': 18, 'width': 18}
__snake_case : Optional[int] = parent
__snake_case : Optional[Any] = batch_size
__snake_case : Any = num_channels
__snake_case : Union[str, Any] = image_size
__snake_case : str = min_resolution
__snake_case : Optional[Any] = max_resolution
__snake_case : Tuple = do_resize
__snake_case : Optional[int] = size
__snake_case : int = do_center_crop
__snake_case : List[str] = crop_size
__snake_case : Tuple = do_normalize
__snake_case : Optional[int] = image_mean
__snake_case : Tuple = image_std
__snake_case : List[Any] = do_convert_rgb
def _lowercase (self : Optional[Any]) -> Optional[int]:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_convert_rgb": self.do_convert_rgb,
}
def _lowercase (self : Optional[int] , _A : Tuple=False , _A : str=False , _A : Tuple=False) -> str:
assert not (numpify and torchify), "You cannot specify both numpy and PyTorch tensors at the same time"
if equal_resolution:
__snake_case : List[str] = []
for i in range(self.batch_size):
image_inputs.append(
np.random.randint(
2_55 , size=(self.num_channels, self.max_resolution, self.max_resolution) , dtype=np.uinta))
else:
__snake_case : List[str] = []
for i in range(self.batch_size):
__snake_case , __snake_case : str = np.random.choice(np.arange(self.min_resolution , self.max_resolution) , 2)
image_inputs.append(np.random.randint(2_55 , size=(self.num_channels, width, height) , dtype=np.uinta))
if not numpify and not torchify:
# PIL expects the channel dimension as last dimension
__snake_case : str = [Image.fromarray(np.moveaxis(_A , 0 , -1)) for x in image_inputs]
if torchify:
__snake_case : Optional[Any] = [torch.from_numpy(_A) for x in image_inputs]
return image_inputs
@require_torch
@require_vision
class UpperCamelCase ( lowercase , unittest.TestCase ):
UpperCAmelCase : Dict = ChineseCLIPImageProcessor if is_vision_available() else None
def _lowercase (self : Any) -> List[str]:
__snake_case : List[str] = ChineseCLIPImageProcessingTester(self , do_center_crop=_A)
@property
def _lowercase (self : Union[str, Any]) -> Tuple:
return self.image_processor_tester.prepare_image_processor_dict()
def _lowercase (self : List[Any]) -> Optional[Any]:
__snake_case : Union[str, Any] = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(_A , 'do_resize'))
self.assertTrue(hasattr(_A , 'size'))
self.assertTrue(hasattr(_A , 'do_center_crop'))
self.assertTrue(hasattr(_A , 'center_crop'))
self.assertTrue(hasattr(_A , 'do_normalize'))
self.assertTrue(hasattr(_A , 'image_mean'))
self.assertTrue(hasattr(_A , 'image_std'))
self.assertTrue(hasattr(_A , 'do_convert_rgb'))
def _lowercase (self : List[str]) -> Optional[int]:
__snake_case : int = self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size , {'height': 2_24, 'width': 2_24})
self.assertEqual(image_processor.crop_size , {'height': 18, 'width': 18})
__snake_case : List[str] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84)
self.assertEqual(image_processor.size , {'shortest_edge': 42})
self.assertEqual(image_processor.crop_size , {'height': 84, 'width': 84})
def _lowercase (self : int) -> Any:
pass
def _lowercase (self : Optional[Any]) -> Optional[Any]:
# Initialize image_processing
__snake_case : int = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
__snake_case : List[Any] = self.image_processor_tester.prepare_inputs(equal_resolution=_A)
for image in image_inputs:
self.assertIsInstance(_A , Image.Image)
# Test not batched input
__snake_case : Any = image_processing(image_inputs[0] , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
__snake_case : str = image_processing(_A , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def _lowercase (self : Optional[int]) -> Dict:
# Initialize image_processing
__snake_case : Tuple = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
__snake_case : int = self.image_processor_tester.prepare_inputs(equal_resolution=_A , numpify=_A)
for image in image_inputs:
self.assertIsInstance(_A , np.ndarray)
# Test not batched input
__snake_case : Dict = image_processing(image_inputs[0] , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
__snake_case : Union[str, Any] = image_processing(_A , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def _lowercase (self : Tuple) -> Dict:
# Initialize image_processing
__snake_case : List[Any] = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
__snake_case : Dict = self.image_processor_tester.prepare_inputs(equal_resolution=_A , torchify=_A)
for image in image_inputs:
self.assertIsInstance(_A , torch.Tensor)
# Test not batched input
__snake_case : Any = image_processing(image_inputs[0] , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
__snake_case : List[str] = image_processing(_A , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
@require_torch
@require_vision
class UpperCamelCase ( lowercase , unittest.TestCase ):
UpperCAmelCase : Optional[int] = ChineseCLIPImageProcessor if is_vision_available() else None
def _lowercase (self : Dict) -> Union[str, Any]:
__snake_case : List[Any] = ChineseCLIPImageProcessingTester(self , num_channels=4 , do_center_crop=_A)
__snake_case : Optional[int] = 3
@property
def _lowercase (self : str) -> str:
return self.image_processor_tester.prepare_image_processor_dict()
def _lowercase (self : Optional[Any]) -> int:
__snake_case : Optional[int] = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(_A , 'do_resize'))
self.assertTrue(hasattr(_A , 'size'))
self.assertTrue(hasattr(_A , 'do_center_crop'))
self.assertTrue(hasattr(_A , 'center_crop'))
self.assertTrue(hasattr(_A , 'do_normalize'))
self.assertTrue(hasattr(_A , 'image_mean'))
self.assertTrue(hasattr(_A , 'image_std'))
self.assertTrue(hasattr(_A , 'do_convert_rgb'))
def _lowercase (self : Dict) -> Any:
pass
def _lowercase (self : Optional[int]) -> Any:
# Initialize image_processing
__snake_case : Union[str, Any] = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
__snake_case : int = self.image_processor_tester.prepare_inputs(equal_resolution=_A)
for image in image_inputs:
self.assertIsInstance(_A , Image.Image)
# Test not batched input
__snake_case : str = image_processing(image_inputs[0] , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
__snake_case : str = image_processing(_A , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
| 192 | """simple docstring"""
import argparse
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import GLPNConfig, GLPNForDepthEstimation, GLPNImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_a : Union[str, Any]= logging.get_logger(__name__)
def __UpperCAmelCase ( UpperCAmelCase_ : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
__snake_case : str = OrderedDict()
for key, value in state_dict.items():
if key.startswith('module.encoder' ):
__snake_case : Any = key.replace('module.encoder' , 'glpn.encoder' )
if key.startswith('module.decoder' ):
__snake_case : int = key.replace('module.decoder' , 'decoder.stages' )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
__snake_case : Any = key[key.find('patch_embed' ) + len('patch_embed' )]
__snake_case : Optional[int] = key.replace(F"patch_embed{idx}" , F"patch_embeddings.{int(UpperCAmelCase_ )-1}" )
if "norm" in key:
__snake_case : List[str] = key.replace('norm' , 'layer_norm' )
if "glpn.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
__snake_case : Tuple = key[key.find('glpn.encoder.layer_norm' ) + len('glpn.encoder.layer_norm' )]
__snake_case : Tuple = key.replace(F"layer_norm{idx}" , F"layer_norm.{int(UpperCAmelCase_ )-1}" )
if "layer_norm1" in key:
__snake_case : Tuple = key.replace('layer_norm1' , 'layer_norm_1' )
if "layer_norm2" in key:
__snake_case : Dict = key.replace('layer_norm2' , 'layer_norm_2' )
if "block" in key:
# replace for example block1 by block.0
__snake_case : Any = key[key.find('block' ) + len('block' )]
__snake_case : int = key.replace(F"block{idx}" , F"block.{int(UpperCAmelCase_ )-1}" )
if "attn.q" in key:
__snake_case : Any = key.replace('attn.q' , 'attention.self.query' )
if "attn.proj" in key:
__snake_case : Any = key.replace('attn.proj' , 'attention.output.dense' )
if "attn" in key:
__snake_case : Dict = key.replace('attn' , 'attention.self' )
if "fc1" in key:
__snake_case : Optional[Any] = key.replace('fc1' , 'dense1' )
if "fc2" in key:
__snake_case : Union[str, Any] = key.replace('fc2' , 'dense2' )
if "linear_pred" in key:
__snake_case : Any = key.replace('linear_pred' , 'classifier' )
if "linear_fuse" in key:
__snake_case : Any = key.replace('linear_fuse.conv' , 'linear_fuse' )
__snake_case : Tuple = key.replace('linear_fuse.bn' , 'batch_norm' )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
__snake_case : List[Any] = key[key.find('linear_c' ) + len('linear_c' )]
__snake_case : int = key.replace(F"linear_c{idx}" , F"linear_c.{int(UpperCAmelCase_ )-1}" )
if "bot_conv" in key:
__snake_case : Tuple = key.replace('bot_conv' , '0.convolution' )
if "skip_conv1" in key:
__snake_case : Tuple = key.replace('skip_conv1' , '1.convolution' )
if "skip_conv2" in key:
__snake_case : Dict = key.replace('skip_conv2' , '2.convolution' )
if "fusion1" in key:
__snake_case : Optional[int] = key.replace('fusion1' , '1.fusion' )
if "fusion2" in key:
__snake_case : Dict = key.replace('fusion2' , '2.fusion' )
if "fusion3" in key:
__snake_case : int = key.replace('fusion3' , '3.fusion' )
if "fusion" in key and "conv" in key:
__snake_case : List[str] = key.replace('conv' , 'convolutional_layer' )
if key.startswith('module.last_layer_depth' ):
__snake_case : Optional[Any] = key.replace('module.last_layer_depth' , 'head.head' )
__snake_case : Union[str, Any] = value
return new_state_dict
def __UpperCAmelCase ( UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : List[Any] ) -> str:
'''simple docstring'''
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
__snake_case : Union[str, Any] = state_dict.pop(F"glpn.encoder.block.{i}.{j}.attention.self.kv.weight" )
__snake_case : Any = state_dict.pop(F"glpn.encoder.block.{i}.{j}.attention.self.kv.bias" )
# next, add keys and values (in that order) to the state dict
__snake_case : int = kv_weight[
: config.hidden_sizes[i], :
]
__snake_case : List[str] = kv_bias[: config.hidden_sizes[i]]
__snake_case : Dict = kv_weight[
config.hidden_sizes[i] :, :
]
__snake_case : Any = kv_bias[config.hidden_sizes[i] :]
def __UpperCAmelCase ( ) -> Any:
'''simple docstring'''
__snake_case : Tuple = 'http://images.cocodataset.org/val2017/000000039769.jpg'
__snake_case : Tuple = Image.open(requests.get(UpperCAmelCase_ , stream=UpperCAmelCase_ ).raw )
return image
@torch.no_grad()
def __UpperCAmelCase ( UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Dict=False , UpperCAmelCase_ : Optional[int]=None ) -> Dict:
'''simple docstring'''
__snake_case : Dict = GLPNConfig(hidden_sizes=[64, 1_28, 3_20, 5_12] , decoder_hidden_size=64 , depths=[3, 8, 27, 3] )
# load image processor (only resize + rescale)
__snake_case : Tuple = GLPNImageProcessor()
# prepare image
__snake_case : Optional[Any] = prepare_img()
__snake_case : Any = image_processor(images=UpperCAmelCase_ , return_tensors='pt' ).pixel_values
logger.info('Converting model...' )
# load original state dict
__snake_case : Dict = torch.load(UpperCAmelCase_ , map_location=torch.device('cpu' ) )
# rename keys
__snake_case : Union[str, Any] = rename_keys(UpperCAmelCase_ )
# key and value matrices need special treatment
read_in_k_v(UpperCAmelCase_ , UpperCAmelCase_ )
# create HuggingFace model and load state dict
__snake_case : Optional[int] = GLPNForDepthEstimation(UpperCAmelCase_ )
model.load_state_dict(UpperCAmelCase_ )
model.eval()
# forward pass
__snake_case : List[Any] = model(UpperCAmelCase_ )
__snake_case : Optional[int] = outputs.predicted_depth
# verify output
if model_name is not None:
if "nyu" in model_name:
__snake_case : int = torch.tensor(
[[4.4_147, 4.0_873, 4.0_673], [3.7_890, 3.2_881, 3.1_525], [3.7_674, 3.5_423, 3.4_913]] )
elif "kitti" in model_name:
__snake_case : Union[str, Any] = torch.tensor(
[[3.4_291, 2.7_865, 2.5_151], [3.2_841, 2.7_021, 2.3_502], [3.1_147, 2.4_625, 2.2_481]] )
else:
raise ValueError(F"Unknown model name: {model_name}" )
__snake_case : Tuple = torch.Size([1, 4_80, 6_40] )
assert predicted_depth.shape == expected_shape
assert torch.allclose(predicted_depth[0, :3, :3] , UpperCAmelCase_ , atol=1E-4 )
print('Looks ok!' )
# finally, push to hub if required
if push_to_hub:
logger.info('Pushing model and image processor to the hub...' )
model.push_to_hub(
repo_path_or_name=Path(UpperCAmelCase_ , UpperCAmelCase_ ) , organization='nielsr' , commit_message='Add model' , use_temp_dir=UpperCAmelCase_ , )
image_processor.push_to_hub(
repo_path_or_name=Path(UpperCAmelCase_ , UpperCAmelCase_ ) , organization='nielsr' , commit_message='Add image processor' , use_temp_dir=UpperCAmelCase_ , )
if __name__ == "__main__":
_a : List[Any]= argparse.ArgumentParser()
parser.add_argument(
"--checkpoint_path",
default=None,
type=str,
help="Path to the original PyTorch checkpoint (.pth file).",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether to upload the model to the HuggingFace hub."
)
parser.add_argument(
"--model_name",
default="glpn-kitti",
type=str,
help="Name of the model in case you're pushing to the hub.",
)
_a : int= parser.parse_args()
convert_glpn_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 192 | 1 |
'''simple docstring'''
def a ( __a , __a = False ) -> bool:
'''simple docstring'''
if n == 2:
return True
if not n % 2 or n < 2:
return False
if n > 5 and n % 10 not in (1, 3, 7, 9): # can quickly check last digit
return False
if n > 3317044064679887385961981 and not allow_probable:
raise ValueError(
'''Warning: upper bound of deterministic test is exceeded. '''
'''Pass allow_probable=True to allow probabilistic test. '''
'''A return value of True indicates a probable prime.''' )
# array bounds provided by analysis
UpperCamelCase__ :str = [
2047,
1373653,
25326001,
3215031751,
2152302898747,
3474749660383,
341550071728321,
1,
3825123056546413051,
1,
1,
318665857834031151167461,
3317044064679887385961981,
]
UpperCamelCase__ :Union[str, Any] = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41]
for idx, _p in enumerate(__a , 1 ):
if n < _p:
# then we have our last prime to check
UpperCamelCase__ :Tuple = primes[:idx]
break
UpperCamelCase__ , UpperCamelCase__ :Tuple = n - 1, 0
# break up n -1 into a power of 2 (s) and
# remaining odd component
# essentially, solve for d * 2 ** s == n - 1
while d % 2 == 0:
d //= 2
s += 1
for prime in plist:
UpperCamelCase__ :Any = False
for r in range(__a ):
UpperCamelCase__ :Tuple = pow(__a , d * 2**r , __a )
# see article for analysis explanation for m
if (r == 0 and m == 1) or ((m + 1) % n == 0):
UpperCamelCase__ :Union[str, Any] = True
# this loop will not determine compositeness
break
if pr:
continue
# if pr is False, then the above loop never evaluated to true,
# and the n MUST be composite
return False
return True
def a ( ) -> None:
'''simple docstring'''
assert not miller_rabin(561 )
assert miller_rabin(563 )
# 2047
assert not miller_rabin(838201 )
assert miller_rabin(838207 )
# 1_373_653
assert not miller_rabin(17316001 )
assert miller_rabin(17316017 )
# 25_326_001
assert not miller_rabin(3078386641 )
assert miller_rabin(3078386653 )
# 3_215_031_751
assert not miller_rabin(1713045574801 )
assert miller_rabin(1713045574819 )
# 2_152_302_898_747
assert not miller_rabin(2779799728307 )
assert miller_rabin(2779799728327 )
# 3_474_749_660_383
assert not miller_rabin(113850023909441 )
assert miller_rabin(113850023909527 )
# 341_550_071_728_321
assert not miller_rabin(1275041018848804351 )
assert miller_rabin(1275041018848804391 )
# 3_825_123_056_546_413_051
assert not miller_rabin(79666464458507787791867 )
assert miller_rabin(79666464458507787791951 )
# 318_665_857_834_031_151_167_461
assert not miller_rabin(552840677446647897660333 )
assert miller_rabin(552840677446647897660359 )
# 3_317_044_064_679_887_385_961_981
# upper limit for probabilistic test
if __name__ == "__main__":
test_miller_rabin() | 189 |
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
if is_torch_available():
import torch
from transformers import XLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_torch
class lowercase ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Optional[Any] = XLMRobertaModel.from_pretrained('''xlm-roberta-base''' )
UpperCamelCase__ :List[str] = torch.tensor([[0, 581, 10269, 83, 99942, 136, 60742, 23, 70, 80583, 18276, 2]] )
# The dog is cute and lives in the garden house
UpperCamelCase__ :Union[str, Any] = torch.Size((1, 12, 768) ) # batch_size, sequence_length, embedding_vector_dim
UpperCamelCase__ :Union[str, Any] = torch.tensor(
[[-0.0101, 0.1218, -0.0803, 0.0801, 0.1327, 0.0776, -0.1215, 0.2383, 0.3338, 0.3106, 0.0300, 0.0252]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
UpperCamelCase__ :Any = model(UpperCamelCase_ )['''last_hidden_state'''].detach()
self.assertEqual(output.shape , UpperCamelCase_ )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , UpperCamelCase_ , atol=1e-3 ) )
@slow
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :int = XLMRobertaModel.from_pretrained('''xlm-roberta-large''' )
UpperCamelCase__ :List[str] = torch.tensor([[0, 581, 10269, 83, 99942, 136, 60742, 23, 70, 80583, 18276, 2]] )
# The dog is cute and lives in the garden house
UpperCamelCase__ :Any = torch.Size((1, 12, 1024) ) # batch_size, sequence_length, embedding_vector_dim
UpperCamelCase__ :List[Any] = torch.tensor(
[[-0.0699, -0.0318, 0.0705, -0.1241, 0.0999, -0.0520, 0.1004, -0.1838, -0.4704, 0.1437, 0.0821, 0.0126]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
UpperCamelCase__ :Optional[int] = model(UpperCamelCase_ )['''last_hidden_state'''].detach()
self.assertEqual(output.shape , UpperCamelCase_ )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , UpperCamelCase_ , atol=1e-3 ) ) | 189 | 1 |
"""simple docstring"""
import collections
import os
from typing import List, Optional, Tuple
from transformers.utils import is_jieba_available, requires_backends
if is_jieba_available():
import jieba
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__UpperCamelCase : Dict = logging.get_logger(__name__)
__UpperCamelCase : List[Any] = {"vocab_file": "vocab.txt"}
__UpperCamelCase : Union[str, Any] = {
"vocab_file": {
"openbmb/cpm-ant-10b": "https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt",
},
}
__UpperCamelCase : Union[str, Any] = {
"openbmb/cpm-ant-10b": 1_0_2_4,
}
def __UpperCAmelCase ( _snake_case : Any ):
_lowercase = collections.OrderedDict()
with open(_snake_case, "r", encoding="utf-8" ) as reader:
_lowercase = reader.readlines()
for index, token in enumerate(_snake_case ):
_lowercase = token.rstrip("\n" )
_lowercase = index
return vocab
class UpperCAmelCase_ ( lowercase__ ):
def __init__( self : str , _lowercase : Dict , _lowercase : List[str]="<unk>" , _lowercase : List[Any]=2_0_0 ) -> List[str]:
_lowercase = vocab
_lowercase = unk_token
_lowercase = max_input_chars_per_word
def _lowerCamelCase ( self : List[str] , _lowercase : Tuple ) -> List[str]:
_lowercase = list(_lowercase )
if len(_lowercase ) > self.max_input_chars_per_word:
return [self.unk_token]
_lowercase = 0
_lowercase = []
while start < len(_lowercase ):
_lowercase = len(_lowercase )
_lowercase = None
while start < end:
_lowercase = "".join(chars[start:end] )
if substr in self.vocab:
_lowercase = substr
break
end -= 1
if cur_substr is None:
sub_tokens.append(self.unk_token )
start += 1
else:
sub_tokens.append(_lowercase )
_lowercase = end
return sub_tokens
class UpperCAmelCase_ ( lowercase__ ):
snake_case_ = VOCAB_FILES_NAMES
snake_case_ = PRETRAINED_VOCAB_FILES_MAP
snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ = ["""input_ids""", """attention_mask"""]
snake_case_ = False
def __init__( self : Any , _lowercase : Optional[Any] , _lowercase : List[Any]="<d>" , _lowercase : Optional[int]="</d>" , _lowercase : Union[str, Any]="<s>" , _lowercase : Union[str, Any]="</s>" , _lowercase : List[str]="<pad>" , _lowercase : Dict="<unk>" , _lowercase : Union[str, Any]="</n>" , _lowercase : List[str]="</_>" , _lowercase : Optional[int]="left" , **_lowercase : Tuple , ) -> str:
requires_backends(self , ["jieba"] )
super().__init__(
bod_token=_lowercase , eod_token=_lowercase , bos_token=_lowercase , eos_token=_lowercase , pad_token=_lowercase , unk_token=_lowercase , line_token=_lowercase , space_token=_lowercase , padding_side=_lowercase , **_lowercase , )
_lowercase = bod_token
_lowercase = eod_token
_lowercase = load_vocab(_lowercase )
_lowercase = self.encoder[space_token]
_lowercase = self.encoder[line_token]
del self.encoder[space_token]
del self.encoder[line_token]
_lowercase = collections.OrderedDict(sorted(self.encoder.items() , key=lambda _lowercase : x[1] ) )
_lowercase = {v: k for k, v in self.encoder.items()}
_lowercase = WordpieceTokenizer(vocab=self.encoder , unk_token=self.unk_token )
@property
def _lowerCamelCase ( self : str ) -> int:
return self.encoder[self.bod_token]
@property
def _lowerCamelCase ( self : Union[str, Any] ) -> Any:
return self.encoder[self.eod_token]
@property
def _lowerCamelCase ( self : Tuple ) -> List[str]:
return self.encoder["\n"]
@property
def _lowerCamelCase ( self : Union[str, Any] ) -> int:
return len(self.encoder )
def _lowerCamelCase ( self : List[str] ) -> Tuple:
return dict(self.encoder , **self.added_tokens_encoder )
def _lowerCamelCase ( self : Dict , _lowercase : Union[str, Any] ) -> List[str]:
_lowercase = []
for x in jieba.cut(_lowercase , cut_all=_lowercase ):
output_tokens.extend(self.wordpiece_tokenizer.tokenize(_lowercase ) )
return output_tokens
def _lowerCamelCase ( self : Optional[int] , _lowercase : Union[str, Any] , **_lowercase : Union[str, Any] ) -> Dict:
_lowercase = [i for i in token_ids if i >= 0]
_lowercase = [
x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id
]
return super()._decode(_lowercase , **_lowercase )
def _lowerCamelCase ( self : Optional[Any] , _lowercase : Tuple ) -> Any:
return token in self.encoder
def _lowerCamelCase ( self : Tuple , _lowercase : List[str] ) -> str:
return "".join(_lowercase )
def _lowerCamelCase ( self : Union[str, Any] , _lowercase : Tuple ) -> Tuple:
return self.encoder.get(_lowercase , self.encoder.get(self.unk_token ) )
def _lowerCamelCase ( self : Union[str, Any] , _lowercase : List[str] ) -> List[str]:
return self.decoder.get(_lowercase , self.unk_token )
def _lowerCamelCase ( self : Optional[int] , _lowercase : str , _lowercase : Optional[str] = None ) -> Tuple[str]:
if os.path.isdir(_lowercase ):
_lowercase = os.path.join(
_lowercase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
else:
_lowercase = (filename_prefix + "-" if filename_prefix else "") + save_directory
_lowercase = 0
if " " in self.encoder:
_lowercase = self.encoder[" "]
del self.encoder[" "]
if "\n" in self.encoder:
_lowercase = self.encoder["\n"]
del self.encoder["\n"]
_lowercase = collections.OrderedDict(sorted(self.encoder.items() , key=lambda _lowercase : x[1] ) )
with open(_lowercase , "w" , encoding="utf-8" ) as writer:
for token, token_index in self.encoder.items():
if index != token_index:
logger.warning(
f"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."""
" Please check that the vocabulary is not corrupted!" )
_lowercase = token_index
writer.write(token + "\n" )
index += 1
return (vocab_file,)
def _lowerCamelCase ( self : List[Any] , _lowercase : List[int] , _lowercase : List[int] = None ) -> List[int]:
if token_ids_a is None:
return [self.bos_token_id] + token_ids_a
return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a
def _lowerCamelCase ( self : int , _lowercase : List[int] , _lowercase : Optional[List[int]] = None , _lowercase : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowercase , token_ids_a=_lowercase , already_has_special_tokens=_lowercase )
if token_ids_a is not None:
return [1] + ([0] * len(_lowercase )) + [1] + ([0] * len(_lowercase ))
return [1] + ([0] * len(_lowercase )) | 715 | """simple docstring"""
import argparse
import logging
from collections import namedtuple
import torch
from model_bertabs import BertAbsSummarizer
from models.model_builder import AbsSummarizer # The authors' implementation
from transformers import BertTokenizer
logging.basicConfig(level=logging.INFO)
__UpperCamelCase : List[str] = logging.getLogger(__name__)
__UpperCamelCase : List[Any] = "Hello world! cécé herlolip"
__UpperCamelCase : Any = namedtuple(
"BertAbsConfig",
[
"temp_dir",
"large",
"use_bert_emb",
"finetune_bert",
"encoder",
"share_emb",
"max_pos",
"enc_layers",
"enc_hidden_size",
"enc_heads",
"enc_ff_size",
"enc_dropout",
"dec_layers",
"dec_hidden_size",
"dec_heads",
"dec_ff_size",
"dec_dropout",
],
)
def __UpperCAmelCase ( _snake_case : Union[str, Any], _snake_case : str ):
_lowercase = BertAbsConfig(
temp_dir=".", finetune_bert=_snake_case, large=_snake_case, share_emb=_snake_case, use_bert_emb=_snake_case, encoder="bert", max_pos=5_1_2, enc_layers=6, enc_hidden_size=5_1_2, enc_heads=8, enc_ff_size=5_1_2, enc_dropout=0.2, dec_layers=6, dec_hidden_size=7_6_8, dec_heads=8, dec_ff_size=2_0_4_8, dec_dropout=0.2, )
_lowercase = torch.load(_snake_case, lambda _snake_case, _snake_case : storage )
_lowercase = AbsSummarizer(_snake_case, torch.device("cpu" ), _snake_case )
original.eval()
_lowercase = BertAbsSummarizer(_snake_case, torch.device("cpu" ) )
new_model.eval()
# -------------------
# Convert the weights
# -------------------
logging.info("convert the model" )
new_model.bert.load_state_dict(original.bert.state_dict() )
new_model.decoder.load_state_dict(original.decoder.state_dict() )
new_model.generator.load_state_dict(original.generator.state_dict() )
# ----------------------------------
# Make sure the outpus are identical
# ----------------------------------
logging.info("Make sure that the models' outputs are identical" )
_lowercase = BertTokenizer.from_pretrained("bert-base-uncased" )
# prepare the model inputs
_lowercase = tokenizer.encode("This is sample éàalj'-." )
encoder_input_ids.extend([tokenizer.pad_token_id] * (5_1_2 - len(_snake_case )) )
_lowercase = torch.tensor(_snake_case ).unsqueeze(0 )
_lowercase = tokenizer.encode("This is sample 3 éàalj'-." )
decoder_input_ids.extend([tokenizer.pad_token_id] * (5_1_2 - len(_snake_case )) )
_lowercase = torch.tensor(_snake_case ).unsqueeze(0 )
# failsafe to make sure the weights reset does not affect the
# loaded weights.
assert torch.max(torch.abs(original.generator[0].weight - new_model.generator[0].weight ) ) == 0
# forward pass
_lowercase = encoder_input_ids
_lowercase = decoder_input_ids
_lowercase = _lowercase = None
_lowercase = None
_lowercase = _lowercase = None
_lowercase = _lowercase = None
_lowercase = None
# The original model does not apply the geneator layer immediatly but rather in
# the beam search (where it combines softmax + linear layer). Since we already
# apply the softmax in our generation process we only apply the linear layer here.
# We make sure that the outputs of the full stack are identical
_lowercase = original(_snake_case, _snake_case, _snake_case, _snake_case, _snake_case, _snake_case, _snake_case )[0]
_lowercase = original.generator(_snake_case )
_lowercase = new_model(
_snake_case, _snake_case, _snake_case, _snake_case, _snake_case )[0]
_lowercase = new_model.generator(_snake_case )
_lowercase = torch.max(torch.abs(output_converted_model - output_original_model ) ).item()
print("Maximum absolute difference beween weights: {:.2f}".format(_snake_case ) )
_lowercase = torch.max(torch.abs(output_converted_generator - output_original_generator ) ).item()
print("Maximum absolute difference beween weights: {:.2f}".format(_snake_case ) )
_lowercase = torch.allclose(_snake_case, _snake_case, atol=1e-3 )
if are_identical:
logging.info("all weights are equal up to 1e-3" )
else:
raise ValueError("the weights are different. The new model is likely different from the original one." )
# The model has been saved with torch.save(model) and this is bound to the exact
# directory structure. We save the state_dict instead.
logging.info("saving the model's state dictionary" )
torch.save(
new_model.state_dict(), "./bertabs-finetuned-cnndm-extractive-abstractive-summarization/pytorch_model.bin" )
if __name__ == "__main__":
__UpperCamelCase : List[Any] = argparse.ArgumentParser()
parser.add_argument(
"--bertabs_checkpoint_path",
default=None,
type=str,
required=True,
help="Path the official PyTorch dump.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
required=True,
help="Path to the output PyTorch model.",
)
__UpperCamelCase : List[Any] = parser.parse_args()
convert_bertabs_checkpoints(
args.bertabs_checkpoint_path,
args.pytorch_dump_folder_path,
) | 227 | 0 |
'''simple docstring'''
from __future__ import annotations
def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
if len(lowerCAmelCase ) < k or k < 0:
raise ValueError("""Invalid Input""" )
_lowerCAmelCase = _lowerCAmelCase = sum(array[:k] )
for i in range(len(lowerCAmelCase ) - k ):
_lowerCAmelCase = current_sum - array[i] + array[i + k]
_lowerCAmelCase = max(lowerCAmelCase , lowerCAmelCase )
return max_sum
if __name__ == "__main__":
from doctest import testmod
from random import randint
testmod()
A__ : str =[randint(-10_00, 10_00) for i in range(1_00)]
A__ : Dict =randint(0, 1_10)
print(F"""The maximum sum of {k} consecutive elements is {max_sum_in_array(array,k)}""")
| 207 |
'''simple docstring'''
from __future__ import annotations
import numpy as np
def UpperCamelCase__ ( lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase = np.shape(lowerCAmelCase )
if rows != columns:
_lowerCAmelCase = (
"""'table' has to be of square shaped array but got a """
f"{rows}x{columns} array:\n{table}"
)
raise ValueError(lowerCAmelCase )
_lowerCAmelCase = np.zeros((rows, columns) )
_lowerCAmelCase = np.zeros((rows, columns) )
for i in range(lowerCAmelCase ):
for j in range(lowerCAmelCase ):
_lowerCAmelCase = sum(lower[i][k] * upper[k][j] for k in range(lowerCAmelCase ) )
if upper[j][j] == 0:
raise ArithmeticError("""No LU decomposition exists""" )
_lowerCAmelCase = (table[i][j] - total) / upper[j][j]
_lowerCAmelCase = 1
for j in range(lowerCAmelCase , lowerCAmelCase ):
_lowerCAmelCase = sum(lower[i][k] * upper[k][j] for k in range(lowerCAmelCase ) )
_lowerCAmelCase = table[i][j] - total
return lower, upper
if __name__ == "__main__":
import doctest
doctest.testmod()
| 207 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
A = {
'configuration_roberta': ['ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RobertaConfig', 'RobertaOnnxConfig'],
'tokenization_roberta': ['RobertaTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = ['RobertaTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = [
'ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'RobertaForCausalLM',
'RobertaForMaskedLM',
'RobertaForMultipleChoice',
'RobertaForQuestionAnswering',
'RobertaForSequenceClassification',
'RobertaForTokenClassification',
'RobertaModel',
'RobertaPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = [
'TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFRobertaForCausalLM',
'TFRobertaForMaskedLM',
'TFRobertaForMultipleChoice',
'TFRobertaForQuestionAnswering',
'TFRobertaForSequenceClassification',
'TFRobertaForTokenClassification',
'TFRobertaMainLayer',
'TFRobertaModel',
'TFRobertaPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = [
'FlaxRobertaForCausalLM',
'FlaxRobertaForMaskedLM',
'FlaxRobertaForMultipleChoice',
'FlaxRobertaForQuestionAnswering',
'FlaxRobertaForSequenceClassification',
'FlaxRobertaForTokenClassification',
'FlaxRobertaModel',
'FlaxRobertaPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_roberta import ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaConfig, RobertaOnnxConfig
from .tokenization_roberta import RobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roberta_fast import RobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta import (
ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaForCausalLM,
RobertaForMaskedLM,
RobertaForMultipleChoice,
RobertaForQuestionAnswering,
RobertaForSequenceClassification,
RobertaForTokenClassification,
RobertaModel,
RobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta import (
TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForMultipleChoice,
TFRobertaForQuestionAnswering,
TFRobertaForSequenceClassification,
TFRobertaForTokenClassification,
TFRobertaMainLayer,
TFRobertaModel,
TFRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
FlaxRobertaPreTrainedModel,
)
else:
import sys
A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 712 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
A = logging.get_logger(__name__)
A = {
'google/bit-50': 'https://huggingface.co/google/bit-50/resolve/main/config.json',
}
class SCREAMING_SNAKE_CASE ( __snake_case , __snake_case ):
"""simple docstring"""
__A = """bit"""
__A = ["""preactivation""", """bottleneck"""]
__A = ["""SAME""", """VALID"""]
def __init__( self , __UpperCamelCase=3 , __UpperCamelCase=64 , __UpperCamelCase=[2_56, 5_12, 10_24, 20_48] , __UpperCamelCase=[3, 4, 6, 3] , __UpperCamelCase="preactivation" , __UpperCamelCase="relu" , __UpperCamelCase=None , __UpperCamelCase=32 , __UpperCamelCase=0.0 , __UpperCamelCase=False , __UpperCamelCase=32 , __UpperCamelCase=1 , __UpperCamelCase=None , __UpperCamelCase=None , **__UpperCamelCase , ):
"""simple docstring"""
super().__init__(**__UpperCamelCase )
if layer_type not in self.layer_types:
raise ValueError(f"""layer_type={layer_type} is not one of {','.join(self.layer_types )}""" )
if global_padding is not None:
if global_padding.upper() in self.supported_padding:
snake_case_ = global_padding.upper()
else:
raise ValueError(f"""Padding strategy {global_padding} not supported""" )
snake_case_ = num_channels
snake_case_ = embedding_size
snake_case_ = hidden_sizes
snake_case_ = depths
snake_case_ = layer_type
snake_case_ = hidden_act
snake_case_ = global_padding
snake_case_ = num_groups
snake_case_ = drop_path_rate
snake_case_ = embedding_dynamic_padding
snake_case_ = output_stride
snake_case_ = width_factor
snake_case_ = ['stem'] + [f"""stage{idx}""" for idx in range(1 , len(__UpperCamelCase ) + 1 )]
snake_case_ , snake_case_ = get_aligned_output_features_output_indices(
out_features=__UpperCamelCase , out_indices=__UpperCamelCase , stage_names=self.stage_names )
| 46 | 0 |
'''simple docstring'''
import numpy
class _UpperCamelCase :
'''simple docstring'''
def __init__( self , _a , _a ):
"""simple docstring"""
a__ = input_array
# Random initial weights are assigned where first argument is the
# number of nodes in previous layer and second argument is the
# number of nodes in the next layer.
# Random initial weights are assigned.
# self.input_array.shape[1] is used to represent number of nodes in input layer.
# First hidden layer consists of 4 nodes.
a__ = numpy.random.rand(
self.input_array.shape[1] , 4 )
# Random initial values for the first hidden layer.
# First hidden layer has 4 nodes.
# Second hidden layer has 3 nodes.
a__ = numpy.random.rand(
4 , 3 )
# Random initial values for the second hidden layer.
# Second hidden layer has 3 nodes.
# Output layer has 1 node.
a__ = numpy.random.rand(3 , 1 )
# Real output values provided.
a__ = output_array
# Predicted output values by the neural network.
# Predicted_output array initially consists of zeroes.
a__ = numpy.zeros(output_array.shape )
def lowercase__ ( self ):
"""simple docstring"""
a__ = sigmoid(
numpy.dot(self.input_array , self.input_layer_and_first_hidden_layer_weights ) )
# layer_between_first_hidden_layer_and_second_hidden_layer is the layer
# connecting the first hidden set of nodes with the second hidden set of nodes.
a__ = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
# layer_between_second_hidden_layer_and_output is the layer connecting
# second hidden layer with the output node.
a__ = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return self.layer_between_second_hidden_layer_and_output
def lowercase__ ( self ):
"""simple docstring"""
a__ = numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer.T , 2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , )
a__ = numpy.dot(
self.layer_between_input_and_first_hidden_layer.T , numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , )
a__ = numpy.dot(
self.input_array.T , numpy.dot(
numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , self.first_hidden_layer_and_second_hidden_layer_weights.T , )
* sigmoid_derivative(self.layer_between_input_and_first_hidden_layer ) , )
self.input_layer_and_first_hidden_layer_weights += (
updated_input_layer_and_first_hidden_layer_weights
)
self.first_hidden_layer_and_second_hidden_layer_weights += (
updated_first_hidden_layer_and_second_hidden_layer_weights
)
self.second_hidden_layer_and_output_layer_weights += (
updated_second_hidden_layer_and_output_layer_weights
)
def lowercase__ ( self , _a , _a , _a ):
"""simple docstring"""
for iteration in range(1 , iterations + 1 ):
a__ = self.feedforward()
self.back_propagation()
if give_loss:
a__ = numpy.mean(numpy.square(output - self.feedforward() ) )
print(F'''Iteration {iteration} Loss: {loss}''' )
def lowercase__ ( self , _a ):
"""simple docstring"""
a__ = input_arr
a__ = sigmoid(
numpy.dot(self.array , self.input_layer_and_first_hidden_layer_weights ) )
a__ = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
a__ = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return int(self.layer_between_second_hidden_layer_and_output > 0.6 )
def lowerCAmelCase_ ( a : Union[str, Any] ):
return 1 / (1 + numpy.exp(-value ))
def lowerCAmelCase_ ( a : int ):
return (value) * (1 - (value))
def lowerCAmelCase_ ( ):
a__ = numpy.array(
(
[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 1],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0],
[1, 1, 1],
) , dtype=numpy.floataa , )
# True output values for the given input values.
a__ = numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]) , dtype=numpy.floataa )
# Calling neural network class.
a__ = TwoHiddenLayerNeuralNetwork(
input_array=UpperCamelCase_ , output_array=UpperCamelCase_ )
# Calling training function.
# Set give_loss to True if you want to see loss in every iteration.
neural_network.train(output=UpperCamelCase_ , iterations=10 , give_loss=UpperCamelCase_ )
return neural_network.predict(numpy.array(([1, 1, 1]) , dtype=numpy.floataa ) )
if __name__ == "__main__":
example()
| 394 |
import os
import re
import sys
import traceback
import warnings
from pathlib import Path
from typing import Dict, Optional, Union
from uuid import uuida
from huggingface_hub import HfFolder, ModelCard, ModelCardData, hf_hub_download, whoami
from huggingface_hub.file_download import REGEX_COMMIT_HASH
from huggingface_hub.utils import (
EntryNotFoundError,
RepositoryNotFoundError,
RevisionNotFoundError,
is_jinja_available,
)
from packaging import version
from requests import HTTPError
from .. import __version__
from .constants import (
DEPRECATED_REVISION_ARGS,
DIFFUSERS_CACHE,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
SAFETENSORS_WEIGHTS_NAME,
WEIGHTS_NAME,
)
from .import_utils import (
ENV_VARS_TRUE_VALUES,
_flax_version,
_jax_version,
_onnxruntime_version,
_torch_version,
is_flax_available,
is_onnx_available,
is_torch_available,
)
from .logging import get_logger
__UpperCAmelCase : str = get_logger(__name__)
__UpperCAmelCase : Optional[Any] = Path(__file__).parent / 'model_card_template.md'
__UpperCAmelCase : Tuple = uuida().hex
__UpperCAmelCase : Optional[int] = os.getenv('HF_HUB_OFFLINE', '').upper() in ENV_VARS_TRUE_VALUES
__UpperCAmelCase : List[str] = os.getenv('DISABLE_TELEMETRY', '').upper() in ENV_VARS_TRUE_VALUES
__UpperCAmelCase : int = HUGGINGFACE_CO_RESOLVE_ENDPOINT + '/api/telemetry/'
def lowerCamelCase_ ( UpperCamelCase_ = None ):
_a : str = f"""diffusers/{__version__}; python/{sys.version.split()[0]}; session_id/{SESSION_ID}"""
if DISABLE_TELEMETRY or HF_HUB_OFFLINE:
return ua + "; telemetry/off"
if is_torch_available():
ua += f"""; torch/{_torch_version}"""
if is_flax_available():
ua += f"""; jax/{_jax_version}"""
ua += f"""; flax/{_flax_version}"""
if is_onnx_available():
ua += f"""; onnxruntime/{_onnxruntime_version}"""
# CI will set this value to True
if os.environ.get('''DIFFUSERS_IS_CI''' , '''''' ).upper() in ENV_VARS_TRUE_VALUES:
ua += "; is_ci/true"
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
ua += "; " + "; ".join(f"""{k}/{v}""" for k, v in user_agent.items() )
elif isinstance(UpperCamelCase_ , UpperCamelCase_ ):
ua += "; " + user_agent
return ua
def lowerCamelCase_ ( UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = None ):
if token is None:
_a : Optional[Any] = HfFolder.get_token()
if organization is None:
_a : Tuple = whoami(UpperCamelCase_ )['''name''']
return f"""{username}/{model_id}"""
else:
return f"""{organization}/{model_id}"""
def lowerCamelCase_ ( UpperCamelCase_ , UpperCamelCase_ ):
if not is_jinja_available():
raise ValueError(
'''Modelcard rendering is based on Jinja templates.'''
''' Please make sure to have `jinja` installed before using `create_model_card`.'''
''' To install it, please run `pip install Jinja2`.''' )
if hasattr(UpperCamelCase_ , '''local_rank''' ) and args.local_rank not in [-1, 0]:
return
_a : List[str] = args.hub_token if hasattr(UpperCamelCase_ , '''hub_token''' ) else None
_a : int = get_full_repo_name(UpperCamelCase_ , token=UpperCamelCase_ )
_a : Optional[int] = ModelCard.from_template(
card_data=ModelCardData( # Card metadata object that will be converted to YAML block
language='''en''' , license='''apache-2.0''' , library_name='''diffusers''' , tags=[] , datasets=args.dataset_name , metrics=[] , ) , template_path=UpperCamelCase_ , model_name=UpperCamelCase_ , repo_name=UpperCamelCase_ , dataset_name=args.dataset_name if hasattr(UpperCamelCase_ , '''dataset_name''' ) else None , learning_rate=args.learning_rate , train_batch_size=args.train_batch_size , eval_batch_size=args.eval_batch_size , gradient_accumulation_steps=(
args.gradient_accumulation_steps if hasattr(UpperCamelCase_ , '''gradient_accumulation_steps''' ) else None
) , adam_betaa=args.adam_betaa if hasattr(UpperCamelCase_ , '''adam_beta1''' ) else None , adam_betaa=args.adam_betaa if hasattr(UpperCamelCase_ , '''adam_beta2''' ) else None , adam_weight_decay=args.adam_weight_decay if hasattr(UpperCamelCase_ , '''adam_weight_decay''' ) else None , adam_epsilon=args.adam_epsilon if hasattr(UpperCamelCase_ , '''adam_epsilon''' ) else None , lr_scheduler=args.lr_scheduler if hasattr(UpperCamelCase_ , '''lr_scheduler''' ) else None , lr_warmup_steps=args.lr_warmup_steps if hasattr(UpperCamelCase_ , '''lr_warmup_steps''' ) else None , ema_inv_gamma=args.ema_inv_gamma if hasattr(UpperCamelCase_ , '''ema_inv_gamma''' ) else None , ema_power=args.ema_power if hasattr(UpperCamelCase_ , '''ema_power''' ) else None , ema_max_decay=args.ema_max_decay if hasattr(UpperCamelCase_ , '''ema_max_decay''' ) else None , mixed_precision=args.mixed_precision , )
_a : Dict = os.path.join(args.output_dir , '''README.md''' )
model_card.save(UpperCamelCase_ )
def lowerCamelCase_ ( UpperCamelCase_ , UpperCamelCase_ = None ):
if resolved_file is None or commit_hash is not None:
return commit_hash
_a : Union[str, Any] = str(Path(UpperCamelCase_ ).as_posix() )
_a : str = re.search(R'''snapshots/([^/]+)/''' , UpperCamelCase_ )
if search is None:
return None
_a : str = search.groups()[0]
return commit_hash if REGEX_COMMIT_HASH.match(UpperCamelCase_ ) else None
# Old default cache path, potentially to be migrated.
# This logic was more or less taken from `transformers`, with the following differences:
# - Diffusers doesn't use custom environment variables to specify the cache path.
# - There is no need to migrate the cache format, just move the files to the new location.
__UpperCAmelCase : Dict = os.path.expanduser(
os.getenv('HF_HOME', os.path.join(os.getenv('XDG_CACHE_HOME', '~/.cache'), 'huggingface'))
)
__UpperCAmelCase : Optional[Any] = os.path.join(hf_cache_home, 'diffusers')
def lowerCamelCase_ ( UpperCamelCase_ = None , UpperCamelCase_ = None ):
if new_cache_dir is None:
_a : Optional[Any] = DIFFUSERS_CACHE
if old_cache_dir is None:
_a : List[str] = old_diffusers_cache
_a : Dict = Path(UpperCamelCase_ ).expanduser()
_a : Union[str, Any] = Path(UpperCamelCase_ ).expanduser()
for old_blob_path in old_cache_dir.glob('''**/blobs/*''' ):
if old_blob_path.is_file() and not old_blob_path.is_symlink():
_a : str = new_cache_dir / old_blob_path.relative_to(UpperCamelCase_ )
new_blob_path.parent.mkdir(parents=UpperCamelCase_ , exist_ok=UpperCamelCase_ )
os.replace(UpperCamelCase_ , UpperCamelCase_ )
try:
os.symlink(UpperCamelCase_ , UpperCamelCase_ )
except OSError:
logger.warning(
'''Could not create symlink between old cache and new cache. If you use an older version of diffusers again, files will be re-downloaded.''' )
# At this point, old_cache_dir contains symlinks to the new cache (it can still be used).
__UpperCAmelCase : Any = os.path.join(DIFFUSERS_CACHE, 'version_diffusers_cache.txt')
if not os.path.isfile(cache_version_file):
__UpperCAmelCase : int = 0
else:
with open(cache_version_file) as f:
try:
__UpperCAmelCase : Union[str, Any] = int(f.read())
except ValueError:
__UpperCAmelCase : Optional[Any] = 0
if cache_version < 1:
__UpperCAmelCase : List[str] = os.path.isdir(old_diffusers_cache) and len(os.listdir(old_diffusers_cache)) > 0
if old_cache_is_not_empty:
logger.warning(
'The cache for model files in Diffusers v0.14.0 has moved to a new location. Moving your '
'existing cached models. This is a one-time operation, you can interrupt it or run it '
'later by calling `diffusers.utils.hub_utils.move_cache()`.'
)
try:
move_cache()
except Exception as e:
__UpperCAmelCase : Optional[int] = '\n'.join(traceback.format_tb(e.__traceback__))
logger.error(
f'''There was a problem when trying to move your cache:\n\n{trace}\n{e.__class__.__name__}: {e}\n\nPlease '''
'file an issue at https://github.com/huggingface/diffusers/issues/new/choose, copy paste this whole '
'message and we will do our best to help.'
)
if cache_version < 1:
try:
os.makedirs(DIFFUSERS_CACHE, exist_ok=True)
with open(cache_version_file, 'w') as f:
f.write('1')
except Exception:
logger.warning(
f'''There was a problem when trying to write in your cache folder ({DIFFUSERS_CACHE}). Please, ensure '''
'the directory exists and can be written to.'
)
def lowerCamelCase_ ( UpperCamelCase_ , UpperCamelCase_ = None ):
if variant is not None:
_a : Dict = weights_name.split('''.''' )
_a : List[str] = splits[:-1] + [variant] + splits[-1:]
_a : int = '''.'''.join(UpperCamelCase_ )
return weights_name
def lowerCamelCase_ ( UpperCamelCase_ , *,
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=None , ):
_a : int = str(UpperCamelCase_ )
if os.path.isfile(UpperCamelCase_ ):
return pretrained_model_name_or_path
elif os.path.isdir(UpperCamelCase_ ):
if os.path.isfile(os.path.join(UpperCamelCase_ , UpperCamelCase_ ) ):
# Load from a PyTorch checkpoint
_a : Tuple = os.path.join(UpperCamelCase_ , UpperCamelCase_ )
return model_file
elif subfolder is not None and os.path.isfile(
os.path.join(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) ):
_a : Tuple = os.path.join(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
return model_file
else:
raise EnvironmentError(
f"""Error no file named {weights_name} found in directory {pretrained_model_name_or_path}.""" )
else:
# 1. First check if deprecated way of loading from branches is used
if (
revision in DEPRECATED_REVISION_ARGS
and (weights_name == WEIGHTS_NAME or weights_name == SAFETENSORS_WEIGHTS_NAME)
and version.parse(version.parse(UpperCamelCase_ ).base_version ) >= version.parse('''0.20.0''' )
):
try:
_a : Optional[int] = hf_hub_download(
UpperCamelCase_ , filename=_add_variant(UpperCamelCase_ , UpperCamelCase_ ) , cache_dir=UpperCamelCase_ , force_download=UpperCamelCase_ , proxies=UpperCamelCase_ , resume_download=UpperCamelCase_ , local_files_only=UpperCamelCase_ , use_auth_token=UpperCamelCase_ , user_agent=UpperCamelCase_ , subfolder=UpperCamelCase_ , revision=revision or commit_hash , )
warnings.warn(
f"""Loading the variant {revision} from {pretrained_model_name_or_path} via `revision='{revision}'` is deprecated. Loading instead from `revision='main'` with `variant={revision}`. Loading model variants via `revision='{revision}'` will be removed in diffusers v1. Please use `variant='{revision}'` instead.""" , UpperCamelCase_ , )
return model_file
except: # noqa: E722
warnings.warn(
f"""You are loading the variant {revision} from {pretrained_model_name_or_path} via `revision='{revision}'`. This behavior is deprecated and will be removed in diffusers v1. One should use `variant='{revision}'` instead. However, it appears that {pretrained_model_name_or_path} currently does not have a {_add_variant(UpperCamelCase_ , UpperCamelCase_ )} file in the 'main' branch of {pretrained_model_name_or_path}. \n The Diffusers team and community would be very grateful if you could open an issue: https://github.com/huggingface/diffusers/issues/new with the title '{pretrained_model_name_or_path} is missing {_add_variant(UpperCamelCase_ , UpperCamelCase_ )}' so that the correct variant file can be added.""" , UpperCamelCase_ , )
try:
# 2. Load model file as usual
_a : str = hf_hub_download(
UpperCamelCase_ , filename=UpperCamelCase_ , cache_dir=UpperCamelCase_ , force_download=UpperCamelCase_ , proxies=UpperCamelCase_ , resume_download=UpperCamelCase_ , local_files_only=UpperCamelCase_ , use_auth_token=UpperCamelCase_ , user_agent=UpperCamelCase_ , subfolder=UpperCamelCase_ , revision=revision or commit_hash , )
return model_file
except RepositoryNotFoundError:
raise EnvironmentError(
f"""{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier """
'''listed on \'https://huggingface.co/models\'\nIf this is a private repository, make sure to pass a '''
'''token having permission to this repo with `use_auth_token` or log in with `huggingface-cli '''
'''login`.''' )
except RevisionNotFoundError:
raise EnvironmentError(
f"""{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for """
'''this model name. Check the model page at '''
f"""'https://huggingface.co/{pretrained_model_name_or_path}' for available revisions.""" )
except EntryNotFoundError:
raise EnvironmentError(
f"""{pretrained_model_name_or_path} does not appear to have a file named {weights_name}.""" )
except HTTPError as err:
raise EnvironmentError(
f"""There was a specific connection error when trying to load {pretrained_model_name_or_path}:\n{err}""" )
except ValueError:
raise EnvironmentError(
f"""We couldn't connect to '{HUGGINGFACE_CO_RESOLVE_ENDPOINT}' to load this model, couldn't find it"""
f""" in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a"""
f""" directory containing a file named {weights_name} or"""
''' \nCheckout your internet connection or see how to run the library in'''
''' offline mode at \'https://huggingface.co/docs/diffusers/installation#offline-mode\'.''' )
except EnvironmentError:
raise EnvironmentError(
f"""Can't load the model for '{pretrained_model_name_or_path}'. If you were trying to load it from """
'''\'https://huggingface.co/models\', make sure you don\'t have a local directory with the same name. '''
f"""Otherwise, make sure '{pretrained_model_name_or_path}' is the correct path to a directory """
f"""containing a file named {weights_name}""" )
| 471 | 0 |
"""simple docstring"""
def a__ ( ):
'''simple docstring'''
return [
a * b * (1_0_0_0 - a - b)
for a in range(1 , 9_9_9 )
for b in range(lowerCAmelCase_ , 9_9_9 )
if (a * a + b * b == (1_0_0_0 - a - b) ** 2)
][0]
if __name__ == "__main__":
print(F"{solution() = }")
| 707 |
"""simple docstring"""
from typing import Any
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : Dict = data
lowerCAmelCase : Any = None
def __repr__( self ):
"""simple docstring"""
return f"""Node({self.data})"""
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self ):
"""simple docstring"""
lowerCAmelCase : Tuple = None
def __iter__( self ):
"""simple docstring"""
lowerCAmelCase : Any = self.head
while node:
yield node.data
lowerCAmelCase : Optional[int] = node.next
def __len__( self ):
"""simple docstring"""
return sum(1 for _ in self )
def __repr__( self ):
"""simple docstring"""
return "->".join([str(snake_case__ ) for item in self] )
def __getitem__( self , snake_case__ ):
"""simple docstring"""
if not 0 <= index < len(self ):
raise ValueError("list index out of range." )
for i, node in enumerate(self ):
if i == index:
return node
return None
def __setitem__( self , snake_case__ , snake_case__ ):
"""simple docstring"""
if not 0 <= index < len(self ):
raise ValueError("list index out of range." )
lowerCAmelCase : Union[str, Any] = self.head
for _ in range(snake_case__ ):
lowerCAmelCase : int = current.next
lowerCAmelCase : List[str] = data
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
self.insert_nth(len(self ) , snake_case__ )
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
self.insert_nth(0 , snake_case__ )
def lowercase__ ( self , snake_case__ , snake_case__ ):
"""simple docstring"""
if not 0 <= index <= len(self ):
raise IndexError("list index out of range" )
lowerCAmelCase : Optional[int] = Node(snake_case__ )
if self.head is None:
lowerCAmelCase : Any = new_node
elif index == 0:
lowerCAmelCase : Any = self.head # link new_node to head
lowerCAmelCase : Union[str, Any] = new_node
else:
lowerCAmelCase : List[str] = self.head
for _ in range(index - 1 ):
lowerCAmelCase : int = temp.next
lowerCAmelCase : int = temp.next
lowerCAmelCase : Dict = new_node
def lowercase__ ( self ): # print every node data
"""simple docstring"""
print(self )
def lowercase__ ( self ):
"""simple docstring"""
return self.delete_nth(0 )
def lowercase__ ( self ): # delete from tail
"""simple docstring"""
return self.delete_nth(len(self ) - 1 )
def lowercase__ ( self , snake_case__ = 0 ):
"""simple docstring"""
if not 0 <= index <= len(self ) - 1: # test if index is valid
raise IndexError("List index out of range." )
lowerCAmelCase : List[Any] = self.head # default first node
if index == 0:
lowerCAmelCase : Optional[int] = self.head.next
else:
lowerCAmelCase : List[str] = self.head
for _ in range(index - 1 ):
lowerCAmelCase : Union[str, Any] = temp.next
lowerCAmelCase : Optional[Any] = temp.next
lowerCAmelCase : Any = temp.next.next
return delete_node.data
def lowercase__ ( self ):
"""simple docstring"""
return self.head is None
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : str = None
lowerCAmelCase : Optional[int] = self.head
while current:
# Store the current node's next node.
lowerCAmelCase : List[Any] = current.next
# Make the current node's next point backwards
lowerCAmelCase : Dict = prev
# Make the previous node be the current node
lowerCAmelCase : List[str] = current
# Make the current node the next node (to progress iteration)
lowerCAmelCase : int = next_node
# Return prev in order to put the head at the end
lowerCAmelCase : Tuple = prev
def a__ ( ):
'''simple docstring'''
lowerCAmelCase : Tuple = LinkedList()
assert linked_list.is_empty() is True
assert str(SCREAMING_SNAKE_CASE ) == ""
try:
linked_list.delete_head()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
try:
linked_list.delete_tail()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
for i in range(1_0 ):
assert len(SCREAMING_SNAKE_CASE ) == i
linked_list.insert_nth(SCREAMING_SNAKE_CASE , i + 1 )
assert str(SCREAMING_SNAKE_CASE ) == "->".join(str(SCREAMING_SNAKE_CASE ) for i in range(1 , 1_1 ) )
linked_list.insert_head(0 )
linked_list.insert_tail(1_1 )
assert str(SCREAMING_SNAKE_CASE ) == "->".join(str(SCREAMING_SNAKE_CASE ) for i in range(0 , 1_2 ) )
assert linked_list.delete_head() == 0
assert linked_list.delete_nth(9 ) == 1_0
assert linked_list.delete_tail() == 1_1
assert len(SCREAMING_SNAKE_CASE ) == 9
assert str(SCREAMING_SNAKE_CASE ) == "->".join(str(SCREAMING_SNAKE_CASE ) for i in range(1 , 1_0 ) )
assert all(linked_list[i] == i + 1 for i in range(0 , 9 ) ) is True
for i in range(0 , 9 ):
lowerCAmelCase : Optional[Any] = -i
assert all(linked_list[i] == -i for i in range(0 , 9 ) ) is True
linked_list.reverse()
assert str(SCREAMING_SNAKE_CASE ) == "->".join(str(SCREAMING_SNAKE_CASE ) for i in range(-8 , 1 ) )
def a__ ( ):
'''simple docstring'''
lowerCAmelCase : List[str] = [
-9,
1_0_0,
Node(7_7_3_4_5_1_1_2 ),
"dlrow olleH",
7,
5_5_5_5,
0,
-192.55_555,
"Hello, world!",
77.9,
Node(1_0 ),
None,
None,
12.20,
]
lowerCAmelCase : List[str] = LinkedList()
for i in test_input:
linked_list.insert_tail(SCREAMING_SNAKE_CASE )
# Check if it's empty or not
assert linked_list.is_empty() is False
assert (
str(SCREAMING_SNAKE_CASE ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->"
"-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the head
lowerCAmelCase : str = linked_list.delete_head()
assert result == -9
assert (
str(SCREAMING_SNAKE_CASE ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the tail
lowerCAmelCase : Union[str, Any] = linked_list.delete_tail()
assert result == 12.2
assert (
str(SCREAMING_SNAKE_CASE ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None"
)
# Delete a node in specific location in linked list
lowerCAmelCase : List[str] = linked_list.delete_nth(1_0 )
assert result is None
assert (
str(SCREAMING_SNAKE_CASE ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None"
)
# Add a Node instance to its head
linked_list.insert_head(Node("Hello again, world!" ) )
assert (
str(SCREAMING_SNAKE_CASE )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None"
)
# Add None to its tail
linked_list.insert_tail(SCREAMING_SNAKE_CASE )
assert (
str(SCREAMING_SNAKE_CASE )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None"
)
# Reverse the linked list
linked_list.reverse()
assert (
str(SCREAMING_SNAKE_CASE )
== "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->"
"7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)"
)
def a__ ( ):
'''simple docstring'''
from doctest import testmod
testmod()
lowerCAmelCase : Optional[Any] = LinkedList()
linked_list.insert_head(input("Inserting 1st at head " ).strip() )
linked_list.insert_head(input("Inserting 2nd at head " ).strip() )
print("\nPrint list:" )
linked_list.print_list()
linked_list.insert_tail(input("\nInserting 1st at tail " ).strip() )
linked_list.insert_tail(input("Inserting 2nd at tail " ).strip() )
print("\nPrint list:" )
linked_list.print_list()
print("\nDelete head" )
linked_list.delete_head()
print("Delete tail" )
linked_list.delete_tail()
print("\nPrint list:" )
linked_list.print_list()
print("\nReverse linked list" )
linked_list.reverse()
print("\nPrint list:" )
linked_list.print_list()
print("\nString representation of linked list:" )
print(SCREAMING_SNAKE_CASE )
print("\nReading/changing Node data using indexing:" )
print(f"""Element at Position 1: {linked_list[1]}""" )
lowerCAmelCase : Any = input("Enter New Value: " ).strip()
print("New list:" )
print(SCREAMING_SNAKE_CASE )
print(f"""length of linked_list is : {len(SCREAMING_SNAKE_CASE )}""" )
if __name__ == "__main__":
main()
| 681 | 0 |
from ..utils import DummyObject, requires_backends
class A ( metaclass=lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase : Union[str, Any] = ["""sentencepiece"""]
def __init__( self : List[str] , *_UpperCamelCase : Tuple , **_UpperCamelCase : List[Any]):
requires_backends(self , ["sentencepiece"])
class A ( metaclass=lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase : Union[str, Any] = ["""sentencepiece"""]
def __init__( self : Tuple , *_UpperCamelCase : Union[str, Any] , **_UpperCamelCase : int):
requires_backends(self , ["sentencepiece"])
class A ( metaclass=lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase : Dict = ["""sentencepiece"""]
def __init__( self : Optional[int] , *_UpperCamelCase : Union[str, Any] , **_UpperCamelCase : Any):
requires_backends(self , ["sentencepiece"])
class A ( metaclass=lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase : Optional[Any] = ["""sentencepiece"""]
def __init__( self : Optional[int] , *_UpperCamelCase : List[Any] , **_UpperCamelCase : List[Any]):
requires_backends(self , ["sentencepiece"])
class A ( metaclass=lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase : Tuple = ["""sentencepiece"""]
def __init__( self : int , *_UpperCamelCase : List[str] , **_UpperCamelCase : Any):
requires_backends(self , ["sentencepiece"])
class A ( metaclass=lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase : List[Any] = ["""sentencepiece"""]
def __init__( self : Tuple , *_UpperCamelCase : Tuple , **_UpperCamelCase : Tuple):
requires_backends(self , ["sentencepiece"])
class A ( metaclass=lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase : Dict = ["""sentencepiece"""]
def __init__( self : List[str] , *_UpperCamelCase : int , **_UpperCamelCase : int):
requires_backends(self , ["sentencepiece"])
class A ( metaclass=lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase : Dict = ["""sentencepiece"""]
def __init__( self : int , *_UpperCamelCase : Tuple , **_UpperCamelCase : Any):
requires_backends(self , ["sentencepiece"])
class A ( metaclass=lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase : List[str] = ["""sentencepiece"""]
def __init__( self : Union[str, Any] , *_UpperCamelCase : Dict , **_UpperCamelCase : Union[str, Any]):
requires_backends(self , ["sentencepiece"])
class A ( metaclass=lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase : Dict = ["""sentencepiece"""]
def __init__( self : List[Any] , *_UpperCamelCase : Optional[Any] , **_UpperCamelCase : List[Any]):
requires_backends(self , ["sentencepiece"])
class A ( metaclass=lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase : List[str] = ["""sentencepiece"""]
def __init__( self : List[str] , *_UpperCamelCase : str , **_UpperCamelCase : Dict):
requires_backends(self , ["sentencepiece"])
class A ( metaclass=lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase : Dict = ["""sentencepiece"""]
def __init__( self : List[Any] , *_UpperCamelCase : str , **_UpperCamelCase : Any):
requires_backends(self , ["sentencepiece"])
class A ( metaclass=lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase : Tuple = ["""sentencepiece"""]
def __init__( self : Tuple , *_UpperCamelCase : Union[str, Any] , **_UpperCamelCase : List[Any]):
requires_backends(self , ["sentencepiece"])
class A ( metaclass=lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase : List[str] = ["""sentencepiece"""]
def __init__( self : Tuple , *_UpperCamelCase : Optional[int] , **_UpperCamelCase : List[Any]):
requires_backends(self , ["sentencepiece"])
class A ( metaclass=lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase : List[Any] = ["""sentencepiece"""]
def __init__( self : str , *_UpperCamelCase : Any , **_UpperCamelCase : List[Any]):
requires_backends(self , ["sentencepiece"])
class A ( metaclass=lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase : List[Any] = ["""sentencepiece"""]
def __init__( self : Optional[int] , *_UpperCamelCase : Union[str, Any] , **_UpperCamelCase : Dict):
requires_backends(self , ["sentencepiece"])
class A ( metaclass=lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase : Dict = ["""sentencepiece"""]
def __init__( self : int , *_UpperCamelCase : List[str] , **_UpperCamelCase : Any):
requires_backends(self , ["sentencepiece"])
class A ( metaclass=lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase : Optional[Any] = ["""sentencepiece"""]
def __init__( self : str , *_UpperCamelCase : int , **_UpperCamelCase : List[str]):
requires_backends(self , ["sentencepiece"])
class A ( metaclass=lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase : int = ["""sentencepiece"""]
def __init__( self : Dict , *_UpperCamelCase : int , **_UpperCamelCase : Any):
requires_backends(self , ["sentencepiece"])
class A ( metaclass=lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase : List[str] = ["""sentencepiece"""]
def __init__( self : Tuple , *_UpperCamelCase : Optional[int] , **_UpperCamelCase : Tuple):
requires_backends(self , ["sentencepiece"])
class A ( metaclass=lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase : Any = ["""sentencepiece"""]
def __init__( self : Optional[int] , *_UpperCamelCase : Optional[Any] , **_UpperCamelCase : List[Any]):
requires_backends(self , ["sentencepiece"])
class A ( metaclass=lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase : Union[str, Any] = ["""sentencepiece"""]
def __init__( self : Any , *_UpperCamelCase : Union[str, Any] , **_UpperCamelCase : List[Any]):
requires_backends(self , ["sentencepiece"])
class A ( metaclass=lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase : List[str] = ["""sentencepiece"""]
def __init__( self : Optional[int] , *_UpperCamelCase : str , **_UpperCamelCase : Any):
requires_backends(self , ["sentencepiece"])
class A ( metaclass=lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase : Dict = ["""sentencepiece"""]
def __init__( self : Optional[Any] , *_UpperCamelCase : Optional[Any] , **_UpperCamelCase : Union[str, Any]):
requires_backends(self , ["sentencepiece"])
class A ( metaclass=lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase : int = ["""sentencepiece"""]
def __init__( self : List[str] , *_UpperCamelCase : Any , **_UpperCamelCase : Optional[Any]):
requires_backends(self , ["sentencepiece"])
class A ( metaclass=lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase : Tuple = ["""sentencepiece"""]
def __init__( self : str , *_UpperCamelCase : int , **_UpperCamelCase : str):
requires_backends(self , ["sentencepiece"])
class A ( metaclass=lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase : Any = ["""sentencepiece"""]
def __init__( self : Dict , *_UpperCamelCase : str , **_UpperCamelCase : str):
requires_backends(self , ["sentencepiece"])
class A ( metaclass=lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase : List[str] = ["""sentencepiece"""]
def __init__( self : Any , *_UpperCamelCase : List[str] , **_UpperCamelCase : List[str]):
requires_backends(self , ["sentencepiece"])
class A ( metaclass=lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase : str = ["""sentencepiece"""]
def __init__( self : Tuple , *_UpperCamelCase : List[Any] , **_UpperCamelCase : Union[str, Any]):
requires_backends(self , ["sentencepiece"])
class A ( metaclass=lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase : Optional[Any] = ["""sentencepiece"""]
def __init__( self : Dict , *_UpperCamelCase : Dict , **_UpperCamelCase : Optional[int]):
requires_backends(self , ["sentencepiece"])
class A ( metaclass=lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase : str = ["""sentencepiece"""]
def __init__( self : Union[str, Any] , *_UpperCamelCase : Tuple , **_UpperCamelCase : Any):
requires_backends(self , ["sentencepiece"])
| 226 |
import fire
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoTokenizer
from utils import SeqaSeqDataset, pickle_save
def __lowerCAmelCase ( __magic_name__ , __magic_name__ , __magic_name__=1_0_2_4 , __magic_name__=1_0_2_4 , __magic_name__=False , **__magic_name__ ):
_lowercase: List[Any] = AutoTokenizer.from_pretrained(__magic_name__ )
_lowercase: Dict = SeqaSeqDataset(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , type_path="train" , **__magic_name__ )
_lowercase: Union[str, Any] = tok.pad_token_id
def get_lens(__magic_name__ ):
_lowercase: Union[str, Any] = tqdm(
DataLoader(__magic_name__ , batch_size=5_1_2 , num_workers=8 , shuffle=__magic_name__ , collate_fn=ds.collate_fn ) , desc=str(ds.len_file ) , )
_lowercase: Dict = []
for batch in dl:
_lowercase: Any = batch["input_ids"].ne(__magic_name__ ).sum(1 ).tolist()
_lowercase: Dict = batch["labels"].ne(__magic_name__ ).sum(1 ).tolist()
if consider_target:
for src, tgt in zip(__magic_name__ , __magic_name__ ):
max_lens.append(max(__magic_name__ , __magic_name__ ) )
else:
max_lens.extend(__magic_name__ )
return max_lens
_lowercase: Optional[Any] = get_lens(__magic_name__ )
_lowercase: Tuple = SeqaSeqDataset(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , type_path="val" , **__magic_name__ )
_lowercase: Union[str, Any] = get_lens(__magic_name__ )
pickle_save(__magic_name__ , train_ds.len_file )
pickle_save(__magic_name__ , val_ds.len_file )
if __name__ == "__main__":
fire.Fire(save_len_file)
| 226 | 1 |
"""simple docstring"""
import argparse
import torch
from transformers import YosoConfig, YosoForMaskedLM
def __UpperCAmelCase ( _snake_case : Dict ):
'''simple docstring'''
if "model" in orig_key:
_lowercase = orig_key.replace("model.", "" )
if "norm1" in orig_key:
_lowercase = orig_key.replace("norm1", "attention.output.LayerNorm" )
if "norm2" in orig_key:
_lowercase = orig_key.replace("norm2", "output.LayerNorm" )
if "norm" in orig_key:
_lowercase = orig_key.replace("norm", "LayerNorm" )
if "transformer" in orig_key:
_lowercase = orig_key.split("." )[0].split("_" )[-1]
_lowercase = orig_key.replace(f"""transformer_{layer_num}""", f"""encoder.layer.{layer_num}""" )
if "mha.attn" in orig_key:
_lowercase = orig_key.replace("mha.attn", "attention.self" )
if "mha" in orig_key:
_lowercase = orig_key.replace("mha", "attention" )
if "W_q" in orig_key:
_lowercase = orig_key.replace("W_q", "self.query" )
if "W_k" in orig_key:
_lowercase = orig_key.replace("W_k", "self.key" )
if "W_v" in orig_key:
_lowercase = orig_key.replace("W_v", "self.value" )
if "ff1" in orig_key:
_lowercase = orig_key.replace("ff1", "intermediate.dense" )
if "ff2" in orig_key:
_lowercase = orig_key.replace("ff2", "output.dense" )
if "ff" in orig_key:
_lowercase = orig_key.replace("ff", "output.dense" )
if "mlm_class" in orig_key:
_lowercase = orig_key.replace("mlm.mlm_class", "cls.predictions.decoder" )
if "mlm" in orig_key:
_lowercase = orig_key.replace("mlm", "cls.predictions.transform" )
if "cls" not in orig_key:
_lowercase = "yoso." + orig_key
return orig_key
def __UpperCAmelCase ( _snake_case : str, _snake_case : Optional[int] ):
'''simple docstring'''
for key in orig_state_dict.copy().keys():
_lowercase = orig_state_dict.pop(_snake_case )
if ("pooler" in key) or ("sen_class" in key):
continue
else:
_lowercase = val
_lowercase = orig_state_dict["cls.predictions.decoder.bias"]
_lowercase = torch.arange(_snake_case ).expand((1, -1) ) + 2
return orig_state_dict
def __UpperCAmelCase ( _snake_case : List[str], _snake_case : Dict, _snake_case : List[Any] ):
'''simple docstring'''
_lowercase = torch.load(_snake_case, map_location="cpu" )["model_state_dict"]
_lowercase = YosoConfig.from_json_file(_snake_case )
_lowercase = YosoForMaskedLM(_snake_case )
_lowercase = convert_checkpoint_helper(config.max_position_embeddings, _snake_case )
print(model.load_state_dict(_snake_case ) )
model.eval()
model.save_pretrained(_snake_case )
print(f"""Checkpoint successfuly converted. Model saved at {pytorch_dump_path}""" )
if __name__ == "__main__":
__UpperCamelCase : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--pytorch_model_path", default=None, type=str, required=True, help="Path to YOSO pytorch checkpoint."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The json file for YOSO model config.",
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
__UpperCamelCase : Optional[int] = parser.parse_args()
convert_yoso_checkpoint(args.pytorch_model_path, args.config_file, args.pytorch_dump_path) | 700 | """simple docstring"""
def __UpperCAmelCase ( _snake_case : float, _snake_case : float, _snake_case : float, _snake_case : float, _snake_case : float, ):
_lowercase = [redshift, radiation_density, matter_density, dark_energy]
if any(p < 0 for p in parameters ):
raise ValueError("All input parameters must be positive" )
if any(p > 1 for p in parameters[1:4] ):
raise ValueError("Relative densities cannot be greater than one" )
else:
_lowercase = 1 - (matter_density + radiation_density + dark_energy)
_lowercase = (
radiation_density * (redshift + 1) ** 4
+ matter_density * (redshift + 1) ** 3
+ curvature * (redshift + 1) ** 2
+ dark_energy
)
_lowercase = hubble_constant * e_a ** (1 / 2)
return hubble
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# demo LCDM approximation
__UpperCamelCase : List[Any] = 0.3
print(
hubble_parameter(
hubble_constant=68.3,
radiation_density=1E-4,
matter_density=matter_density,
dark_energy=1 - matter_density,
redshift=0,
)
) | 227 | 0 |
import gc
import unittest
import numpy as np
import torch
from torch.backends.cuda import sdp_kernel
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
from diffusers.utils import randn_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_a, require_torch_gpu
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class _lowerCAmelCase( UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
a : List[str] =ConsistencyModelPipeline
a : Union[str, Any] =UNCONDITIONAL_IMAGE_GENERATION_PARAMS
a : List[str] =UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
# Override required_optional_params to remove num_images_per_prompt
a : List[str] =frozenset(
[
'''num_inference_steps''',
'''generator''',
'''latents''',
'''output_type''',
'''return_dict''',
'''callback''',
'''callback_steps''',
] )
@property
def _a ( self ):
UpperCamelCase_: List[str] = UNetaDModel.from_pretrained(
'diffusers/consistency-models-test' , subfolder='test_unet' , )
return unet
@property
def _a ( self ):
UpperCamelCase_: Any = UNetaDModel.from_pretrained(
'diffusers/consistency-models-test' , subfolder='test_unet_class_cond' , )
return unet
def _a ( self , _lowerCamelCase=False ):
if class_cond:
UpperCamelCase_: str = self.dummy_cond_unet
else:
UpperCamelCase_: Union[str, Any] = self.dummy_uncond_unet
# Default to CM multistep sampler
UpperCamelCase_: Optional[int] = CMStochasticIterativeScheduler(
num_train_timesteps=4_0 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , )
UpperCamelCase_: Optional[Any] = {
'unet': unet,
'scheduler': scheduler,
}
return components
def _a ( self , _lowerCamelCase , _lowerCamelCase=0 ):
if str(_lowerCamelCase ).startswith('mps' ):
UpperCamelCase_: List[Any] = torch.manual_seed(_lowerCamelCase )
else:
UpperCamelCase_: Union[str, Any] = torch.Generator(device=_lowerCamelCase ).manual_seed(_lowerCamelCase )
UpperCamelCase_: Union[str, Any] = {
'batch_size': 1,
'num_inference_steps': None,
'timesteps': [2_2, 0],
'generator': generator,
'output_type': 'np',
}
return inputs
def _a ( self ):
UpperCamelCase_: Optional[int] = 'cpu' # ensure determinism for the device-dependent torch.Generator
UpperCamelCase_: Union[str, Any] = self.get_dummy_components()
UpperCamelCase_: List[str] = ConsistencyModelPipeline(**_lowerCamelCase )
UpperCamelCase_: List[str] = pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
UpperCamelCase_: Dict = self.get_dummy_inputs(_lowerCamelCase )
UpperCamelCase_: Any = pipe(**_lowerCamelCase ).images
assert image.shape == (1, 3_2, 3_2, 3)
UpperCamelCase_: int = image[0, -3:, -3:, -1]
UpperCamelCase_: Optional[int] = np.array([0.3_5_7_2, 0.6_2_7_3, 0.4_0_3_1, 0.3_9_6_1, 0.4_3_2_1, 0.5_7_3_0, 0.5_2_6_6, 0.4_7_8_0, 0.5_0_0_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _a ( self ):
UpperCamelCase_: int = 'cpu' # ensure determinism for the device-dependent torch.Generator
UpperCamelCase_: List[Any] = self.get_dummy_components(class_cond=_lowerCamelCase )
UpperCamelCase_: Tuple = ConsistencyModelPipeline(**_lowerCamelCase )
UpperCamelCase_: Any = pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
UpperCamelCase_: Optional[int] = self.get_dummy_inputs(_lowerCamelCase )
UpperCamelCase_: Optional[int] = 0
UpperCamelCase_: Union[str, Any] = pipe(**_lowerCamelCase ).images
assert image.shape == (1, 3_2, 3_2, 3)
UpperCamelCase_: Union[str, Any] = image[0, -3:, -3:, -1]
UpperCamelCase_: int = np.array([0.3_5_7_2, 0.6_2_7_3, 0.4_0_3_1, 0.3_9_6_1, 0.4_3_2_1, 0.5_7_3_0, 0.5_2_6_6, 0.4_7_8_0, 0.5_0_0_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _a ( self ):
UpperCamelCase_: Dict = 'cpu' # ensure determinism for the device-dependent torch.Generator
UpperCamelCase_: Any = self.get_dummy_components()
UpperCamelCase_: Dict = ConsistencyModelPipeline(**_lowerCamelCase )
UpperCamelCase_: Any = pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
UpperCamelCase_: int = self.get_dummy_inputs(_lowerCamelCase )
UpperCamelCase_: List[str] = 1
UpperCamelCase_: List[str] = None
UpperCamelCase_: Optional[int] = pipe(**_lowerCamelCase ).images
assert image.shape == (1, 3_2, 3_2, 3)
UpperCamelCase_: str = image[0, -3:, -3:, -1]
UpperCamelCase_: Optional[int] = np.array([0.5_0_0_4, 0.5_0_0_4, 0.4_9_9_4, 0.5_0_0_8, 0.4_9_7_6, 0.5_0_1_8, 0.4_9_9_0, 0.4_9_8_2, 0.4_9_8_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _a ( self ):
UpperCamelCase_: Optional[int] = 'cpu' # ensure determinism for the device-dependent torch.Generator
UpperCamelCase_: Any = self.get_dummy_components(class_cond=_lowerCamelCase )
UpperCamelCase_: Any = ConsistencyModelPipeline(**_lowerCamelCase )
UpperCamelCase_: Optional[Any] = pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
UpperCamelCase_: Dict = self.get_dummy_inputs(_lowerCamelCase )
UpperCamelCase_: Tuple = 1
UpperCamelCase_: List[str] = None
UpperCamelCase_: Tuple = 0
UpperCamelCase_: Dict = pipe(**_lowerCamelCase ).images
assert image.shape == (1, 3_2, 3_2, 3)
UpperCamelCase_: Optional[Any] = image[0, -3:, -3:, -1]
UpperCamelCase_: Dict = np.array([0.5_0_0_4, 0.5_0_0_4, 0.4_9_9_4, 0.5_0_0_8, 0.4_9_7_6, 0.5_0_1_8, 0.4_9_9_0, 0.4_9_8_2, 0.4_9_8_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
@slow
@require_torch_gpu
class _lowerCAmelCase( unittest.TestCase ):
"""simple docstring"""
def _a ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _a ( self , _lowerCamelCase=0 , _lowerCamelCase=False , _lowerCamelCase="cpu" , _lowerCamelCase=torch.floataa , _lowerCamelCase=(1, 3, 6_4, 6_4) ):
UpperCamelCase_: Dict = torch.manual_seed(_lowerCamelCase )
UpperCamelCase_: Dict = {
'num_inference_steps': None,
'timesteps': [2_2, 0],
'class_labels': 0,
'generator': generator,
'output_type': 'np',
}
if get_fixed_latents:
UpperCamelCase_: Any = self.get_fixed_latents(seed=_lowerCamelCase , device=_lowerCamelCase , dtype=_lowerCamelCase , shape=_lowerCamelCase )
UpperCamelCase_: List[Any] = latents
return inputs
def _a ( self , _lowerCamelCase=0 , _lowerCamelCase="cpu" , _lowerCamelCase=torch.floataa , _lowerCamelCase=(1, 3, 6_4, 6_4) ):
if type(_lowerCamelCase ) == str:
UpperCamelCase_: List[Any] = torch.device(_lowerCamelCase )
UpperCamelCase_: List[Any] = torch.Generator(device=_lowerCamelCase ).manual_seed(_lowerCamelCase )
UpperCamelCase_: Optional[int] = randn_tensor(_lowerCamelCase , generator=_lowerCamelCase , device=_lowerCamelCase , dtype=_lowerCamelCase )
return latents
def _a ( self ):
UpperCamelCase_: List[Any] = UNetaDModel.from_pretrained('diffusers/consistency_models' , subfolder='diffusers_cd_imagenet64_l2' )
UpperCamelCase_: List[Any] = CMStochasticIterativeScheduler(
num_train_timesteps=4_0 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , )
UpperCamelCase_: Optional[int] = ConsistencyModelPipeline(unet=_lowerCamelCase , scheduler=_lowerCamelCase )
pipe.to(torch_device=_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
UpperCamelCase_: Dict = self.get_inputs()
UpperCamelCase_: int = pipe(**_lowerCamelCase ).images
assert image.shape == (1, 6_4, 6_4, 3)
UpperCamelCase_: int = image[0, -3:, -3:, -1]
UpperCamelCase_: Any = np.array([0.0_8_8_8, 0.0_8_8_1, 0.0_6_6_6, 0.0_4_7_9, 0.0_2_9_2, 0.0_1_9_5, 0.0_2_0_1, 0.0_1_6_3, 0.0_2_5_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
def _a ( self ):
UpperCamelCase_: str = UNetaDModel.from_pretrained('diffusers/consistency_models' , subfolder='diffusers_cd_imagenet64_l2' )
UpperCamelCase_: Union[str, Any] = CMStochasticIterativeScheduler(
num_train_timesteps=4_0 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , )
UpperCamelCase_: List[str] = ConsistencyModelPipeline(unet=_lowerCamelCase , scheduler=_lowerCamelCase )
pipe.to(torch_device=_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
UpperCamelCase_: str = self.get_inputs()
UpperCamelCase_: Union[str, Any] = 1
UpperCamelCase_: Dict = None
UpperCamelCase_: Optional[Any] = pipe(**_lowerCamelCase ).images
assert image.shape == (1, 6_4, 6_4, 3)
UpperCamelCase_: Union[str, Any] = image[0, -3:, -3:, -1]
UpperCamelCase_: Optional[Any] = np.array([0.0_3_4_0, 0.0_1_5_2, 0.0_0_6_3, 0.0_2_6_7, 0.0_2_2_1, 0.0_1_0_7, 0.0_4_1_6, 0.0_1_8_6, 0.0_2_1_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
@require_torch_a
def _a ( self ):
UpperCamelCase_: Tuple = UNetaDModel.from_pretrained('diffusers/consistency_models' , subfolder='diffusers_cd_imagenet64_l2' )
UpperCamelCase_: Dict = CMStochasticIterativeScheduler(
num_train_timesteps=4_0 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , )
UpperCamelCase_: List[str] = ConsistencyModelPipeline(unet=_lowerCamelCase , scheduler=_lowerCamelCase )
pipe.to(torch_device=_lowerCamelCase , torch_dtype=torch.floataa )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
UpperCamelCase_: Optional[Any] = self.get_inputs(get_fixed_latents=_lowerCamelCase , device=_lowerCamelCase )
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=_lowerCamelCase , enable_math=_lowerCamelCase , enable_mem_efficient=_lowerCamelCase ):
UpperCamelCase_: Union[str, Any] = pipe(**_lowerCamelCase ).images
assert image.shape == (1, 6_4, 6_4, 3)
UpperCamelCase_: Tuple = image[0, -3:, -3:, -1]
UpperCamelCase_: Any = np.array([0.1_8_7_5, 0.1_4_2_8, 0.1_2_8_9, 0.2_1_5_1, 0.2_0_9_2, 0.1_4_7_7, 0.1_8_7_7, 0.1_6_4_1, 0.1_3_5_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
@require_torch_a
def _a ( self ):
UpperCamelCase_: Any = UNetaDModel.from_pretrained('diffusers/consistency_models' , subfolder='diffusers_cd_imagenet64_l2' )
UpperCamelCase_: Tuple = CMStochasticIterativeScheduler(
num_train_timesteps=4_0 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , )
UpperCamelCase_: List[Any] = ConsistencyModelPipeline(unet=_lowerCamelCase , scheduler=_lowerCamelCase )
pipe.to(torch_device=_lowerCamelCase , torch_dtype=torch.floataa )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
UpperCamelCase_: Any = self.get_inputs(get_fixed_latents=_lowerCamelCase , device=_lowerCamelCase )
UpperCamelCase_: Union[str, Any] = 1
UpperCamelCase_: List[str] = None
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=_lowerCamelCase , enable_math=_lowerCamelCase , enable_mem_efficient=_lowerCamelCase ):
UpperCamelCase_: Optional[Any] = pipe(**_lowerCamelCase ).images
assert image.shape == (1, 6_4, 6_4, 3)
UpperCamelCase_: Union[str, Any] = image[0, -3:, -3:, -1]
UpperCamelCase_: Tuple = np.array([0.1_6_6_3, 0.1_9_4_8, 0.2_2_7_5, 0.1_6_8_0, 0.1_2_0_4, 0.1_2_4_5, 0.1_8_5_8, 0.1_3_3_8, 0.2_0_9_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 | 57 |
'''simple docstring'''
from __future__ import annotations
def a_ ( __UpperCAmelCase ) -> list[int]:
"""simple docstring"""
snake_case: Tuple =[True] * limit
snake_case: Optional[int] =False
snake_case: Union[str, Any] =False
snake_case: List[Any] =True
for i in range(3 , int(limit**0.5 + 1 ) , 2 ):
snake_case: str =i * 2
while index < limit:
snake_case: List[Any] =False
snake_case: List[str] =index + i
snake_case: Union[str, Any] =[2]
for i in range(3 , __UpperCAmelCase , 2 ):
if is_prime[i]:
primes.append(__UpperCAmelCase )
return primes
def a_ ( __UpperCAmelCase = 1_00_00_00 ) -> int:
"""simple docstring"""
snake_case: str =prime_sieve(__UpperCAmelCase )
snake_case: str =0
snake_case: str =0
for i in range(len(__UpperCAmelCase ) ):
for j in range(i + length , len(__UpperCAmelCase ) ):
snake_case: Tuple =sum(primes[i:j] )
if sol >= ceiling:
break
if sol in primes:
snake_case: List[str] =j - i
snake_case: Optional[int] =sol
return largest
if __name__ == "__main__":
print(F"""{solution() = }""")
| 350 | 0 |
"""simple docstring"""
def lowerCAmelCase__ ( _UpperCamelCase : Dict ) -> Optional[int]:
"""simple docstring"""
snake_case = [0] * len(_UpperCamelCase )
snake_case = []
snake_case = [1] * len(_UpperCamelCase )
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(_UpperCamelCase ) ):
if indegree[i] == 0:
queue.append(_UpperCamelCase )
while queue:
snake_case = queue.pop(0 )
for x in graph[vertex]:
indegree[x] -= 1
if long_dist[vertex] + 1 > long_dist[x]:
snake_case = long_dist[vertex] + 1
if indegree[x] == 0:
queue.append(_UpperCamelCase )
print(max(_UpperCamelCase ) )
# Adjacency list of Graph
SCREAMING_SNAKE_CASE__ = {0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []}
longest_distance(graph)
| 104 | """simple docstring"""
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import ShapEPipeline
else:
from .camera import create_pan_cameras
from .pipeline_shap_e import ShapEPipeline
from .pipeline_shap_e_img2img import ShapEImgaImgPipeline
from .renderer import (
BoundingBoxVolume,
ImportanceRaySampler,
MLPNeRFModelOutput,
MLPNeRSTFModel,
ShapEParamsProjModel,
ShapERenderer,
StratifiedRaySampler,
VoidNeRFModel,
)
| 104 | 1 |
def A_ ( lowercase_ ) -> int:
if not isinstance(lowercase_ , lowercase_ ):
_snake_case : Optional[Any] = f'''Input value of [number={number}] must be an integer'''
raise TypeError(lowercase_ )
if number < 1:
_snake_case : Optional[Any] = f'''Input value of [number={number}] must be > 0'''
raise ValueError(lowercase_ )
_snake_case : Any = 1
for i in range(1 , lowercase_ ):
current_number *= 4 * i - 2
current_number //= i + 1
return current_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 326 |
import warnings
from ...utils import logging
from .image_processing_owlvit import OwlViTImageProcessor
lowerCAmelCase_ = logging.get_logger(__name__)
class A (__UpperCAmelCase ):
def __init__( self , *lowercase_ , **lowercase_ ) -> None:
'''simple docstring'''
warnings.warn(
'''The class OwlViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use OwlViTImageProcessor instead.''' , lowercase_ , )
super().__init__(*lowercase_ , **lowercase_ )
| 326 | 1 |
'''simple docstring'''
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
__A = logging.get_logger(__name__)
@add_end_docstrings(UpperCamelCase_ )
class a_ ( UpperCamelCase_ ):
def __init__(self , *__a , **__a) -> Optional[Any]:
"""simple docstring"""
super().__init__(*__a , **__a)
requires_backends(self , 'vision')
self.check_model_type(
TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
if self.framework == 'tf'
else MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING)
def SCREAMING_SNAKE_CASE__ (self , __a=None) -> Union[str, Any]:
"""simple docstring"""
__snake_case : Optional[Any] = {}
if top_k is not None:
__snake_case : str = top_k
return {}, {}, postprocess_params
def __call__(self , __a , **__a) -> str:
"""simple docstring"""
return super().__call__(__a , **__a)
def SCREAMING_SNAKE_CASE__ (self , __a) -> Union[str, Any]:
"""simple docstring"""
__snake_case : Tuple = load_image(__a)
__snake_case : Optional[int] = self.image_processor(images=__a , return_tensors=self.framework)
return model_inputs
def SCREAMING_SNAKE_CASE__ (self , __a) -> Optional[int]:
"""simple docstring"""
__snake_case : Union[str, Any] = self.model(**__a)
return model_outputs
def SCREAMING_SNAKE_CASE__ (self , __a , __a=5) -> str:
"""simple docstring"""
if top_k > self.model.config.num_labels:
__snake_case : Optional[int] = self.model.config.num_labels
if self.framework == "pt":
__snake_case : List[str] = model_outputs.logits.softmax(-1)[0]
__snake_case ,__snake_case : Tuple = probs.topk(__a)
elif self.framework == "tf":
__snake_case : List[Any] = stable_softmax(model_outputs.logits , axis=-1)[0]
__snake_case : int = tf.math.top_k(__a , k=__a)
__snake_case ,__snake_case : Optional[int] = topk.values.numpy(), topk.indices.numpy()
else:
raise ValueError(F"""Unsupported framework: {self.framework}""")
__snake_case : Any = scores.tolist()
__snake_case : int = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(__a , __a)] | 61 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__A = {
'''configuration_distilbert''': [
'''DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''DistilBertConfig''',
'''DistilBertOnnxConfig''',
],
'''tokenization_distilbert''': ['''DistilBertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ['''DistilBertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
'''DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''DistilBertForMaskedLM''',
'''DistilBertForMultipleChoice''',
'''DistilBertForQuestionAnswering''',
'''DistilBertForSequenceClassification''',
'''DistilBertForTokenClassification''',
'''DistilBertModel''',
'''DistilBertPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
'''TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFDistilBertForMaskedLM''',
'''TFDistilBertForMultipleChoice''',
'''TFDistilBertForQuestionAnswering''',
'''TFDistilBertForSequenceClassification''',
'''TFDistilBertForTokenClassification''',
'''TFDistilBertMainLayer''',
'''TFDistilBertModel''',
'''TFDistilBertPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
'''FlaxDistilBertForMaskedLM''',
'''FlaxDistilBertForMultipleChoice''',
'''FlaxDistilBertForQuestionAnswering''',
'''FlaxDistilBertForSequenceClassification''',
'''FlaxDistilBertForTokenClassification''',
'''FlaxDistilBertModel''',
'''FlaxDistilBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_distilbert import (
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DistilBertConfig,
DistilBertOnnxConfig,
)
from .tokenization_distilbert import DistilBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_distilbert_fast import DistilBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_distilbert import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
DistilBertPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertMainLayer,
TFDistilBertModel,
TFDistilBertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
FlaxDistilBertPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 61 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.